aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug15
-rw-r--r--drivers/gpu/drm/i915/Makefile20
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c82
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c72
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c48
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c34
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c31
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c89
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h24
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c566
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c89
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h445
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c468
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c30
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.h29
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h43
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c81
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c70
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c178
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c154
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c66
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h366
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c422
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.c118
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.h34
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c96
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c27
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h30
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h884
-rw-r--r--drivers/gpu/drm/i915/i915_request.c438
-rw-r--r--drivers/gpu/drm/i915/i915_request.h49
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h72
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.c105
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h (renamed from drivers/gpu/drm/i915/i915_gem_timeline.h)71
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h129
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h6
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c19
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c7
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c18
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c52
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c28
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c453
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c169
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c481
-rw-r--r--drivers/gpu/drm/i915/intel_display.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c360
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c14
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c11
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c909
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h97
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h86
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c34
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c8
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c937
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c28
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c7
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gpu_commands.h274
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c231
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h82
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.c9
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c545
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.h18
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c7
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h162
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c544
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.h59
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h14
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c114
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c16
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c185
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c46
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c3
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c30
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h7
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c550
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c5
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c75
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c577
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c444
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c78
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h71
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c101
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c178
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c12
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c132
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h24
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c175
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h1
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c275
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.h31
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c949
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h17
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_timeline.c)94
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c70
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.h14
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_engine_cs.c58
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c414
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c459
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c291
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c67
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c45
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.h28
139 files changed, 11573 insertions, 5525 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 108d21f34777..9de8b1c51a5c 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -25,7 +25,8 @@ config DRM_I915_DEBUG
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
- select DRM_DEBUG_MM_SELFTEST
+ select STACKDEPOT if DRM=y # for DRM_DEBUG_MM
+ select DRM_DEBUG_SELFTEST
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
@@ -89,6 +90,18 @@ config DRM_I915_SW_FENCE_CHECK_DAG
If in doubt, say "N".
+config DRM_I915_DEBUG_GUC
+ bool "Enable additional driver debugging for GuC"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will help resolve GuC related issues.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_SELFTEST
bool "Enable selftests upon driver load"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 4eee91a3a236..4c6adae23e18 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -12,12 +12,16 @@
# Note the danger in using -Wall -Wextra is that when CI updates gcc we
# will most likely get a sudden build breakage... Hopefully we will fix
# new warnings before CI updates!
-subdir-ccflags-y := -Wall -Wextra
+subdir-ccflags-y := -Wall -Wextra -Wvla
subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
subdir-ccflags-y += $(call cc-disable-warning, type-limits)
subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
+# clang warnings
+subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
+subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
+subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
@@ -43,7 +47,8 @@ i915-y := i915_drv.o \
intel_csr.o \
intel_device_info.o \
intel_pm.o \
- intel_runtime_pm.o
+ intel_runtime_pm.o \
+ intel_workarounds.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
@@ -66,11 +71,11 @@ i915-y += i915_cmd_parser.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
- i915_gem_timeline.o \
i915_gem_userptr.o \
i915_gemfs.o \
i915_query.o \
i915_request.o \
+ i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
@@ -79,7 +84,8 @@ i915-y += i915_cmd_parser.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \
- intel_uncore.o
+ intel_uncore.o \
+ intel_wopcm.o
# general-purpose microcontroller (GuC) support
i915-y += intel_uc.o \
@@ -152,7 +158,8 @@ i915-y += dvo_ch7017.o \
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
- selftests/i915_selftest.o
+ selftests/i915_selftest.o \
+ selftests/igt_flush_test.o
# virtual gpu code
i915-y += i915_vgpu.o
@@ -171,7 +178,8 @@ i915-y += i915_perf.o \
i915_oa_glk.o \
i915_oa_cflgt2.o \
i915_oa_cflgt3.o \
- i915_oa_cnl.o
+ i915_oa_cnl.o \
+ i915_oa_icl.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d85939bd7b47..b51c05d03f14 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -813,15 +813,31 @@ static inline bool is_force_nonpriv_mmio(unsigned int offset)
}
static int force_nonpriv_reg_handler(struct parser_exec_state *s,
- unsigned int offset, unsigned int index)
+ unsigned int offset, unsigned int index, char *cmd)
{
struct intel_gvt *gvt = s->vgpu->gvt;
- unsigned int data = cmd_val(s, index + 1);
+ unsigned int data;
+ u32 ring_base;
+ u32 nopid;
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (!strcmp(cmd, "lri"))
+ data = cmd_val(s, index + 1);
+ else {
+ gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n",
+ offset, cmd);
+ return -EINVAL;
+ }
+
+ ring_base = dev_priv->engine[s->ring_id]->mmio_base;
+ nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
- if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
+ if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
+ data != nopid) {
gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
offset, data);
- return -EPERM;
+ patch_value(s, cmd_ptr(s, index), nopid);
+ return 0;
}
return 0;
}
@@ -869,7 +885,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return -EINVAL;
if (is_force_nonpriv_mmio(offset) &&
- force_nonpriv_reg_handler(s, offset, index))
+ force_nonpriv_reg_handler(s, offset, index, cmd))
return -EPERM;
if (offset == i915_mmio_reg_offset(DERRMR) ||
@@ -1604,7 +1620,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8))
+ if (cmd_val(s, 0) & (1 << 8) &&
+ !(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
return 0;
}
return 1;
@@ -1618,6 +1635,8 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
bool bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;
+ struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
+ s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
*bb_size = 0;
@@ -1629,18 +1648,22 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
cmd = cmd_val(s, 0);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
- cmd, get_opcode(cmd, s->ring_id));
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
+ cmd, get_opcode(cmd, s->ring_id),
+ (s->buf_addr_type == PPGTT_BUFFER) ?
+ "ppgtt" : "ggtt", s->ring_id, s->workload);
return -EBADRQC;
}
do {
- if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ if (copy_gma_to_hva(s->vgpu, mm,
gma, gma + 4, &cmd) < 0)
return -EFAULT;
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
- cmd, get_opcode(cmd, s->ring_id));
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
+ cmd, get_opcode(cmd, s->ring_id),
+ (s->buf_addr_type == PPGTT_BUFFER) ?
+ "ppgtt" : "ggtt", s->ring_id, s->workload);
return -EBADRQC;
}
@@ -1666,6 +1689,9 @@ static int perform_bb_shadow(struct parser_exec_state *s)
unsigned long gma = 0;
unsigned long bb_size;
int ret = 0;
+ struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
+ s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
+ unsigned long gma_start_offset = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
@@ -1680,8 +1706,24 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (!bb)
return -ENOMEM;
+ bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
+
+ /* the gma_start_offset stores the batch buffer's start gma's
+ * offset relative to page boundary. so for non-privileged batch
+ * buffer, the shadowed gem object holds exactly the same page
+ * layout as original gem object. This is for the convience of
+ * replacing the whole non-privilged batch buffer page to this
+ * shadowed one in PPGTT at the same gma address. (this replacing
+ * action is not implemented yet now, but may be necessary in
+ * future).
+ * for prileged batch buffer, we just change start gma address to
+ * that of shadowed page.
+ */
+ if (bb->ppgtt)
+ gma_start_offset = gma & ~I915_GTT_PAGE_MASK;
+
bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
- roundup(bb_size, PAGE_SIZE));
+ roundup(bb_size + gma_start_offset, PAGE_SIZE));
if (IS_ERR(bb->obj)) {
ret = PTR_ERR(bb->obj);
goto err_free_bb;
@@ -1702,9 +1744,9 @@ static int perform_bb_shadow(struct parser_exec_state *s)
bb->clflush &= ~CLFLUSH_BEFORE;
}
- ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ ret = copy_gma_to_hva(s->vgpu, mm,
gma, gma + bb_size,
- bb->va);
+ bb->va + gma_start_offset);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
ret = -EFAULT;
@@ -1730,7 +1772,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
* buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early).
*/
- s->ip_va = bb->va;
+ s->ip_va = bb->va + gma_start_offset;
s->ip_gma = gma;
return 0;
err_unmap:
@@ -2469,15 +2511,18 @@ static int cmd_parser_exec(struct parser_exec_state *s)
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
- cmd, get_opcode(cmd, s->ring_id));
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
+ cmd, get_opcode(cmd, s->ring_id),
+ (s->buf_addr_type == PPGTT_BUFFER) ?
+ "ppgtt" : "ggtt", s->ring_id, s->workload);
return -EBADRQC;
}
s->info = info;
trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
- cmd_length(s), s->buf_type);
+ cmd_length(s), s->buf_type, s->buf_addr_type,
+ s->workload, info->name);
if (info->handler) {
ret = info->handler(s);
@@ -2864,6 +2909,7 @@ static int init_cmd_table(struct intel_gvt *gvt)
if (info) {
gvt_err("%s %s duplicated\n", e->info->name,
info->name);
+ kfree(e);
return -EEXIST;
}
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 32a66dfdf112..2ec89bcb59f1 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -122,18 +122,69 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
seq_printf(s, "Total: %d, Diff: %d\n", param.total, param.diff);
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(vgpu_mmio_diff);
-static int vgpu_mmio_diff_open(struct inode *inode, struct file *file)
+static int
+vgpu_scan_nonprivbb_get(void *data, u64 *val)
{
- return single_open(file, vgpu_mmio_diff_show, inode->i_private);
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+ *val = vgpu->scan_nonprivbb;
+ return 0;
}
-static const struct file_operations vgpu_mmio_diff_fops = {
- .open = vgpu_mmio_diff_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+/*
+ * set/unset bit engine_id of vgpu->scan_nonprivbb to turn on/off scanning
+ * of non-privileged batch buffer. e.g.
+ * if vgpu->scan_nonprivbb=3, then it will scan non-privileged batch buffer
+ * on engine 0 and 1.
+ */
+static int
+vgpu_scan_nonprivbb_set(void *data, u64 val)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum intel_engine_id id;
+ char buf[128], *s;
+ int len;
+
+ val &= (1 << I915_NUM_ENGINES) - 1;
+
+ if (vgpu->scan_nonprivbb == val)
+ return 0;
+
+ if (!val)
+ goto done;
+
+ len = sprintf(buf,
+ "gvt: vgpu %d turns on non-privileged batch buffers scanning on Engines:",
+ vgpu->id);
+
+ s = buf + len;
+
+ for (id = 0; id < I915_NUM_ENGINES; id++) {
+ struct intel_engine_cs *engine;
+
+ engine = dev_priv->engine[id];
+ if (engine && (val & (1 << id))) {
+ len = snprintf(s, 4, "%d, ", engine->id);
+ s += len;
+ } else
+ val &= ~(1 << id);
+ }
+
+ if (val)
+ sprintf(s, "low performance expected.");
+
+ pr_warn("%s\n", buf);
+
+done:
+ vgpu->scan_nonprivbb = val;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
+ vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
+ "0x%llx\n");
/**
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
@@ -162,6 +213,11 @@ int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
if (!ent)
return -ENOMEM;
+ ent = debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs,
+ vgpu, &vgpu_scan_nonprivbb_fops);
+ if (!ent)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index b46b86892d58..ea7c1c525b8c 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -67,7 +67,7 @@
#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
-#define AUX_BURST_SIZE 16
+#define AUX_BURST_SIZE 20
/* DPCD addresses */
#define DPCD_REV 0x000
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 78e55aafc8bc..23296547da95 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1585,8 +1585,9 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
mm->type = INTEL_GVT_MM_GGTT;
nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
- mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
- vgpu->gvt->device_info.gtt_entry_size);
+ mm->ggtt_mm.virtual_ggtt =
+ vzalloc(array_size(nr_entries,
+ vgpu->gvt->device_info.gtt_entry_size));
if (!mm->ggtt_mm.virtual_ggtt) {
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index efacd8abbedc..05d15a095310 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -99,7 +99,6 @@ struct intel_vgpu_fence {
struct intel_vgpu_mmio {
void *vreg;
void *sreg;
- bool disable_warn_untrack;
};
#define INTEL_GVT_MAX_BAR_NUM 4
@@ -226,6 +225,7 @@ struct intel_vgpu {
struct completion vblank_done;
+ u32 scan_nonprivbb;
};
/* validating GM healthy status*/
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a33c1c3e4a21..bcbc47a88a70 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -191,6 +191,8 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
unsigned int max_fence = vgpu_fence_sz(vgpu);
if (fence_num >= max_fence) {
+ gvt_vgpu_err("access oob fence reg %d/%d\n",
+ fence_num, max_fence);
/* When guest access oob fence regs without access
* pv_info first, we treat guest not supporting GVT,
@@ -200,11 +202,6 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
enter_failsafe_mode(vgpu,
GVT_FAILSAFE_UNSUPPORTED_GUEST);
- if (!vgpu->mmio.disable_warn_untrack) {
- gvt_vgpu_err("found oob fence register access\n");
- gvt_vgpu_err("total fence %d, access fence %d\n",
- max_fence, fence_num);
- }
memset(p_data, 0, bytes);
return -EINVAL;
}
@@ -477,22 +474,28 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 reg_nonpriv = *(u32 *)p_data;
+ int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ u32 ring_base;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ret = -EINVAL;
- if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
- gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
- vgpu->id, offset, bytes);
+ if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
+ gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
+ vgpu->id, ring_id, offset, bytes);
return ret;
}
- if (in_whitelist(reg_nonpriv)) {
+ ring_base = dev_priv->engine[ring_id]->mmio_base;
+
+ if (in_whitelist(reg_nonpriv) ||
+ reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes);
- } else {
- gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
- vgpu->id, reg_nonpriv);
- }
- return ret;
+ } else
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
+ vgpu->id, reg_nonpriv, offset);
+
+ return 0;
}
static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
@@ -900,11 +903,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
}
/*
- * Write request format: (command + address) occupies
- * 3 bytes, followed by (len + 1) bytes of data.
+ * Write request format: Headr (command + address + size) occupies
+ * 4 bytes, followed by (len + 1) bytes of data. See details at
+ * intel_dp_aux_transfer().
*/
- if (WARN_ON((len + 4) > AUX_BURST_SIZE))
+ if ((len + 1 + 4) > AUX_BURST_SIZE) {
+ gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
return -EINVAL;
+ }
/* unpack data from vreg to buf */
for (t = 0; t < 4; t++) {
@@ -968,8 +974,10 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
/*
* Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
*/
- if (WARN_ON((len + 2) > AUX_BURST_SIZE))
+ if ((len + 2) > AUX_BURST_SIZE) {
+ gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
return -EINVAL;
+ }
/* read from virtual DPCD to vreg */
/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
@@ -3092,9 +3100,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
*/
mmio_info = find_mmio_info(gvt, offset);
if (!mmio_info) {
- if (!vgpu->mmio.disable_warn_untrack)
- gvt_vgpu_err("untracked MMIO %08x len %d\n",
- offset, bytes);
+ gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
goto default_rw;
}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 1466d8769ec9..df4e4a07db3d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -123,6 +123,12 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
return -EINVAL;
}
+ if (!pfn_valid(pfn)) {
+ gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
+ vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
+ return -EINVAL;
+ }
+
/* Setup DMA mapping. */
page = pfn_to_page(pfn);
*dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
@@ -583,6 +589,17 @@ out:
return ret;
}
+static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
+{
+ struct eventfd_ctx *trigger;
+
+ trigger = vgpu->vdev.msi_trigger;
+ if (trigger) {
+ eventfd_ctx_put(trigger);
+ vgpu->vdev.msi_trigger = NULL;
+ }
+}
+
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
struct kvmgt_guest_info *info;
@@ -607,6 +624,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
info = (struct kvmgt_guest_info *)vgpu->handle;
kvmgt_guest_exit(info);
+ intel_vgpu_release_msi_eventfd_ctx(vgpu);
+
vgpu->vdev.kvm = NULL;
vgpu->handle = 0;
}
@@ -987,7 +1006,8 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
return PTR_ERR(trigger);
}
vgpu->vdev.msi_trigger = trigger;
- }
+ } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
+ intel_vgpu_release_msi_eventfd_ctx(vgpu);
return 0;
}
@@ -1592,6 +1612,18 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
info = (struct kvmgt_guest_info *)handle;
vgpu = info->vgpu;
+ /*
+ * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
+ * config and mmio register isn't restored to default during guest
+ * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
+ * may be enabled, then once this vgpu is active, it will get inject
+ * vblank interrupt request. But msi_trigger is null until msi is
+ * enabled by guest. so if msi_trigger is null, success is still
+ * returned and don't inject interrupt into guest.
+ */
+ if (vgpu->vdev.msi_trigger == NULL)
+ return 0;
+
if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 11b71b33f1c0..b31eb36fc102 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -244,8 +244,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
-
- vgpu->mmio.disable_warn_untrack = false;
} else {
#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
/* only reset the engine related, so starting with 0x44200
@@ -269,7 +267,7 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
- vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2));
if (!vgpu->mmio.vreg)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index a5bac83d53a9..0f949554d118 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -448,7 +448,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
{
- u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
+ u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 75b7bc7b344c..d053cbe1dc94 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -53,7 +53,6 @@ struct vgpu_sched_data {
bool active;
ktime_t sched_in_time;
- ktime_t sched_out_time;
ktime_t sched_time;
ktime_t left_ts;
ktime_t allocated_ts;
@@ -66,17 +65,22 @@ struct gvt_sched_data {
struct hrtimer timer;
unsigned long period;
struct list_head lru_runq_head;
+ ktime_t expire_time;
};
-static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
+static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
{
ktime_t delta_ts;
- struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
+ struct vgpu_sched_data *vgpu_data;
- delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
+ if (!vgpu || vgpu == vgpu->gvt->idle_vgpu)
+ return;
- vgpu_data->sched_time += delta_ts;
- vgpu_data->left_ts -= delta_ts;
+ vgpu_data = vgpu->sched_data;
+ delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
+ vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
+ vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
+ vgpu_data->sched_in_time = cur_time;
}
#define GVT_TS_BALANCE_PERIOD_MS 100
@@ -150,11 +154,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
}
cur_time = ktime_get();
- if (scheduler->current_vgpu) {
- vgpu_data = scheduler->current_vgpu->sched_data;
- vgpu_data->sched_out_time = cur_time;
- vgpu_update_timeslice(scheduler->current_vgpu);
- }
+ vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
vgpu_data = scheduler->next_vgpu->sched_data;
vgpu_data->sched_in_time = cur_time;
@@ -226,17 +226,22 @@ out:
void intel_gvt_schedule(struct intel_gvt *gvt)
{
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
- static uint64_t timer_check;
+ ktime_t cur_time;
mutex_lock(&gvt->lock);
+ cur_time = ktime_get();
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request)) {
- if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
+ if (cur_time >= sched_data->expire_time) {
gvt_balance_timeslice(sched_data);
+ sched_data->expire_time = ktime_add_ms(
+ cur_time, GVT_TS_BALANCE_PERIOD_MS);
+ }
}
clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
+ vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
tbs_sched_func(sched_data);
mutex_unlock(&gvt->lock);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 638abe84857c..c2d183b91500 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -58,7 +58,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->engine[ring_id].state->obj;
+ shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
@@ -97,7 +97,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
- if (!workload || !reg_state || workload->ring_id != RCS)
+ if (workload->ring_id != RCS)
return;
if (save) {
@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->engine[ring_id].state->obj;
+ shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
@@ -283,7 +283,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- struct intel_context *ce = &ctx->engine[engine->id];
+ struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc = 0;
desc = ce->lrc_desc;
@@ -389,7 +389,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
- ring = engine->context_pin(engine, shadow_ctx);
+ ring = intel_context_pin(shadow_ctx, engine);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
@@ -403,7 +403,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
return 0;
err_unpin:
- engine->context_unpin(engine, shadow_ctx);
+ intel_context_unpin(shadow_ctx, engine);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
@@ -437,7 +437,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
return 0;
err_unpin:
- engine->context_unpin(engine, shadow_ctx);
+ intel_context_unpin(shadow_ctx, engine);
release_shadow_wa_ctx(&workload->wa_ctx);
return ret;
}
@@ -452,12 +452,6 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
int ret;
list_for_each_entry(bb, &workload->shadow_bb, list) {
- bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
- if (IS_ERR(bb->vma)) {
- ret = PTR_ERR(bb->vma);
- goto err;
- }
-
/* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
* is only updated into ring_scan_buffer, not real ring address
* allocated in later copy_workload_to_ring_buffer. pls be noted
@@ -469,25 +463,53 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
+ bb->bb_offset;
- /* relocate shadow batch buffer */
- bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
- if (gmadr_bytes == 8)
- bb->bb_start_cmd_va[2] = 0;
+ if (bb->ppgtt) {
+ /* for non-priv bb, scan&shadow is only for
+ * debugging purpose, so the content of shadow bb
+ * is the same as original bb. Therefore,
+ * here, rather than switch to shadow bb's gma
+ * address, we directly use original batch buffer's
+ * gma address, and send original bb to hardware
+ * directly
+ */
+ if (bb->clflush & CLFLUSH_AFTER) {
+ drm_clflush_virt_range(bb->va,
+ bb->obj->base.size);
+ bb->clflush &= ~CLFLUSH_AFTER;
+ }
+ i915_gem_obj_finish_shmem_access(bb->obj);
+ bb->accessing = false;
+
+ } else {
+ bb->vma = i915_gem_object_ggtt_pin(bb->obj,
+ NULL, 0, 0, 0);
+ if (IS_ERR(bb->vma)) {
+ ret = PTR_ERR(bb->vma);
+ goto err;
+ }
- /* No one is going to touch shadow bb from now on. */
- if (bb->clflush & CLFLUSH_AFTER) {
- drm_clflush_virt_range(bb->va, bb->obj->base.size);
- bb->clflush &= ~CLFLUSH_AFTER;
- }
+ /* relocate shadow batch buffer */
+ bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
+ if (gmadr_bytes == 8)
+ bb->bb_start_cmd_va[2] = 0;
- ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
- if (ret)
- goto err;
+ /* No one is going to touch shadow bb from now on. */
+ if (bb->clflush & CLFLUSH_AFTER) {
+ drm_clflush_virt_range(bb->va,
+ bb->obj->base.size);
+ bb->clflush &= ~CLFLUSH_AFTER;
+ }
- i915_gem_obj_finish_shmem_access(bb->obj);
- bb->accessing = false;
+ ret = i915_gem_object_set_to_gtt_domain(bb->obj,
+ false);
+ if (ret)
+ goto err;
- i915_vma_move_to_active(bb->vma, workload->req, 0);
+ i915_gem_obj_finish_shmem_access(bb->obj);
+ bb->accessing = false;
+
+ i915_vma_move_to_active(bb->vma, workload->req, 0);
+ }
}
return 0;
err:
@@ -504,7 +526,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
struct intel_vgpu_submission *s = &workload->vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->engine[ring_id].state->obj;
+ shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
@@ -666,7 +688,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ret = prepare_workload(workload);
if (ret) {
- engine->context_unpin(engine, shadow_ctx);
+ intel_context_unpin(shadow_ctx, engine);
goto out;
}
@@ -749,7 +771,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->engine[ring_id].state->obj;
+ shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
@@ -876,7 +898,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
}
mutex_lock(&dev_priv->drm.struct_mutex);
/* unpin shadow ctx as the shadow_ctx update is done */
- engine->context_unpin(engine, s->shadow_ctx);
+ intel_context_unpin(s->shadow_ctx, engine);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
@@ -1134,9 +1156,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
if (IS_ERR(s->shadow_ctx))
return PTR_ERR(s->shadow_ctx);
- if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
- s->shadow_ctx->priority = INT_MAX;
-
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 486ed57a4ad1..6c644782193e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -125,6 +125,7 @@ struct intel_vgpu_shadow_bb {
unsigned int clflush;
bool accessing;
unsigned long bb_offset;
+ bool ppgtt;
};
#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 82093f1e8612..1fd64202d74e 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -224,19 +224,25 @@ TRACE_EVENT(oos_sync,
TP_printk("%s", __entry->buf)
);
+#define GVT_CMD_STR_LEN 40
TRACE_EVENT(gvt_command,
- TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len,
- u32 buf_type),
+ TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va,
+ u32 cmd_len, u32 buf_type, u32 buf_addr_type,
+ void *workload, char *cmd_name),
- TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type),
+ TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type,
+ buf_addr_type, workload, cmd_name),
TP_STRUCT__entry(
__field(u8, vgpu_id)
__field(u8, ring_id)
__field(u32, ip_gma)
__field(u32, buf_type)
+ __field(u32, buf_addr_type)
__field(u32, cmd_len)
+ __field(void*, workload)
__dynamic_array(u32, raw_cmd, cmd_len)
+ __array(char, cmd_name, GVT_CMD_STR_LEN)
),
TP_fast_assign(
@@ -244,17 +250,25 @@ TRACE_EVENT(gvt_command,
__entry->ring_id = ring_id;
__entry->ip_gma = ip_gma;
__entry->buf_type = buf_type;
+ __entry->buf_addr_type = buf_addr_type;
__entry->cmd_len = cmd_len;
+ __entry->workload = workload;
+ snprintf(__entry->cmd_name, GVT_CMD_STR_LEN, "%s", cmd_name);
memcpy(__get_dynamic_array(raw_cmd), cmd_va, cmd_len * sizeof(*cmd_va));
),
- TP_printk("vgpu%d ring %d: buf_type %u, ip_gma %08x, raw cmd %s",
+ TP_printk("vgpu%d ring %d: address_type %u, buf_type %u, ip_gma %08x,cmd (name=%s,len=%u,raw cmd=%s), workload=%p\n",
__entry->vgpu_id,
__entry->ring_id,
+ __entry->buf_addr_type,
__entry->buf_type,
__entry->ip_gma,
- __print_array(__get_dynamic_array(raw_cmd), __entry->cmd_len, 4))
+ __entry->cmd_name,
+ __entry->cmd_len,
+ __print_array(__get_dynamic_array(raw_cmd),
+ __entry->cmd_len, 4),
+ __entry->workload)
);
#define GVT_TEMP_STR_LEN 10
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 2e0a02a80fe4..572a18c2bfb5 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -121,7 +121,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
- gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
+ gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
GFP_KERNEL);
if (!gvt->types)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 89f7ff2c652e..13e7b9e4a6e6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -377,16 +377,19 @@ static void print_batch_pool_stats(struct seq_file *m,
print_file_stats(m, "[k]batch pool", stats);
}
-static int per_file_ctx_stats(int id, void *ptr, void *data)
+static int per_file_ctx_stats(int idx, void *ptr, void *data)
{
struct i915_gem_context *ctx = ptr;
- int n;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, ctx->i915, id) {
+ struct intel_context *ce = to_intel_context(ctx, engine);
- for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
- if (ctx->engine[n].state)
- per_file_stats(0, ctx->engine[n].state->obj, data);
- if (ctx->engine[n].ring)
- per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
+ if (ce->state)
+ per_file_stats(0, ce->state->obj, data);
+ if (ce->ring)
+ per_file_stats(0, ce->ring->vma->obj, data);
}
return 0;
@@ -1215,20 +1218,20 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ||
- IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(dev_priv) ||
- IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ||
- IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -1340,10 +1343,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
struct rb_node *rb;
seq_printf(m, "%s:\n", engine->name);
- seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
+ seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno, seqno[id],
- intel_engine_last_submit(engine),
- engine->timeline->inflight_seqnos);
+ intel_engine_last_submit(engine));
seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
@@ -1796,9 +1798,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int ret = 0;
- int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
+ int gpu_freq, ia_freq;
+ int ret;
if (!HAS_LLC(dev_priv))
return -ENODEV;
@@ -1809,13 +1811,12 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
goto out;
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
+ if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
/* Convert GT frequency to 50 HZ units */
- min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
- max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
- } else {
- min_gpu_freq = rps->min_freq_softlimit;
- max_gpu_freq = rps->max_freq_softlimit;
+ min_gpu_freq /= GEN9_FREQ_SCALER;
+ max_gpu_freq /= GEN9_FREQ_SCALER;
}
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
@@ -1828,7 +1829,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq *
(IS_GEN9_BC(dev_priv) ||
- IS_CANNONLAKE(dev_priv) ?
+ INTEL_GEN(dev_priv) >= 10 ?
GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
@@ -1923,8 +1924,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
{
- seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
- ring->space, ring->head, ring->tail);
+ seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
+ ring->space, ring->head, ring->tail, ring->emit);
}
static int i915_context_status(struct seq_file *m, void *unused)
@@ -1961,7 +1962,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
for_each_engine(engine, dev_priv, id) {
- struct intel_context *ce = &ctx->engine[engine->id];
+ struct intel_context *ce =
+ to_intel_context(ctx, engine);
seq_printf(m, "%s: ", engine->name);
if (ce->state)
@@ -2326,30 +2328,45 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
return 0;
}
-static void i915_guc_log_info(struct seq_file *m,
- struct drm_i915_private *dev_priv)
+static const char *
+stringify_guc_log_type(enum guc_log_buffer_type type)
{
- struct intel_guc *guc = &dev_priv->guc;
+ switch (type) {
+ case GUC_ISR_LOG_BUFFER:
+ return "ISR";
+ case GUC_DPC_LOG_BUFFER:
+ return "DPC";
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return "CRASH";
+ default:
+ MISSING_CASE(type);
+ }
- seq_puts(m, "\nGuC logging stats:\n");
+ return "";
+}
- seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n",
- guc->log.flush_count[GUC_ISR_LOG_BUFFER],
- guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
+static void i915_guc_log_info(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct intel_guc_log *log = &dev_priv->guc.log;
+ enum guc_log_buffer_type type;
- seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n",
- guc->log.flush_count[GUC_DPC_LOG_BUFFER],
- guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
+ if (!intel_guc_log_relay_enabled(log)) {
+ seq_puts(m, "GuC log relay disabled\n");
+ return;
+ }
- seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
- guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
- guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
+ seq_puts(m, "GuC logging stats:\n");
- seq_printf(m, "\tTotal flush interrupt count: %u\n",
- guc->log.flush_interrupt_count);
+ seq_printf(m, "\tRelay full count: %u\n",
+ log->relay.full_count);
- seq_printf(m, "\tCapture miss count: %u\n",
- guc->log.capture_miss_count);
+ for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
+ stringify_guc_log_type(type),
+ log->stats[type].flush,
+ log->stats[type].sampled_overflow);
+ }
}
static void i915_guc_client_info(struct seq_file *m,
@@ -2379,14 +2396,19 @@ static int i915_guc_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->guc;
- if (!USES_GUC_SUBMISSION(dev_priv))
+ if (!USES_GUC(dev_priv))
return -ENODEV;
+ i915_guc_log_info(m, dev_priv);
+
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return 0;
+
GEM_BUG_ON(!guc->execbuf_client);
- seq_printf(m, "Doorbell map:\n");
+ seq_printf(m, "\nDoorbell map:\n");
seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
- seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
+ seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
i915_guc_client_info(m, dev_priv, guc->execbuf_client);
@@ -2396,8 +2418,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
i915_guc_client_info(m, dev_priv, guc->preempt_client);
}
- i915_guc_log_info(m, dev_priv);
-
/* Add more as required ... */
return 0;
@@ -2496,35 +2516,73 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
return 0;
}
-static int i915_guc_log_control_get(void *data, u64 *val)
+static int i915_guc_log_level_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- if (!HAS_GUC(dev_priv))
+ if (!USES_GUC(dev_priv))
return -ENODEV;
- if (!dev_priv->guc.log.vma)
- return -EINVAL;
-
- *val = i915_modparams.guc_log_level;
+ *val = intel_guc_log_level_get(&dev_priv->guc.log);
return 0;
}
-static int i915_guc_log_control_set(void *data, u64 val)
+static int i915_guc_log_level_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
- if (!HAS_GUC(dev_priv))
+ if (!USES_GUC(dev_priv))
return -ENODEV;
- return intel_guc_log_control(&dev_priv->guc, val);
+ return intel_guc_log_level_set(&dev_priv->guc.log, val);
}
-DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
- i915_guc_log_control_get, i915_guc_log_control_set,
+DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
+ i915_guc_log_level_get, i915_guc_log_level_set,
"%lld\n");
+static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (!USES_GUC(dev_priv))
+ return -ENODEV;
+
+ file->private_data = &dev_priv->guc.log;
+
+ return intel_guc_log_relay_open(&dev_priv->guc.log);
+}
+
+static ssize_t
+i915_guc_log_relay_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct intel_guc_log *log = filp->private_data;
+
+ intel_guc_log_relay_flush(log);
+
+ return cnt;
+}
+
+static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ intel_guc_log_relay_close(&dev_priv->guc.log);
+
+ return 0;
+}
+
+static const struct file_operations i915_guc_log_relay_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_guc_log_relay_open,
+ .write = i915_guc_log_relay_write,
+ .release = i915_guc_log_relay_release,
+};
+
static const char *psr2_live_status(u32 val)
{
static const char * const live_status[] = {
@@ -2548,6 +2606,26 @@ static const char *psr2_live_status(u32 val)
return "unknown";
}
+static const char *psr_sink_status(u8 val)
+{
+ static const char * const sink_status[] = {
+ "inactive",
+ "transition to active, capture and display",
+ "active, display from RFB",
+ "active, capture and display on sink device timings",
+ "transition to inactive, capture and display, timing re-sync",
+ "reserved",
+ "reserved",
+ "sink internal error"
+ };
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ return sink_status[val];
+
+ return "unknown";
+}
+
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2569,14 +2647,13 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
mutex_lock(&dev_priv->psr.lock);
seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
- seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
dev_priv->psr.busy_frontbuffer_bits);
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_support)
+ if (dev_priv->psr.psr2_enabled)
enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
else
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
@@ -2624,18 +2701,67 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
- if (dev_priv->psr.psr2_support) {
+ if (dev_priv->psr.psr2_enabled) {
u32 psr2 = I915_READ(EDP_PSR2_STATUS);
seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
psr2, psr2_live_status(psr2));
}
+
+ if (dev_priv->psr.enabled) {
+ struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
+ u8 val;
+
+ if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
+ psr_sink_status(val));
+ }
mutex_unlock(&dev_priv->psr.lock);
+ if (READ_ONCE(dev_priv->psr.debug)) {
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ dev_priv->psr.last_entry_attempt);
+ seq_printf(m, "Last exit at: %lld\n",
+ dev_priv->psr.last_exit);
+ }
+
intel_runtime_pm_put(dev_priv);
return 0;
}
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
+
+ intel_runtime_pm_get(dev_priv);
+ intel_psr_irq_control(dev_priv, !!val);
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ *val = READ_ONCE(dev_priv->psr.debug);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+ i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+ "%llu\n");
+
static int i915_sink_crc(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3231,7 +3357,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
+ seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
+ pll->info->id);
seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
@@ -3241,6 +3368,28 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
+ seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
+ seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
+ seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
+ pll->state.hw_state.mg_refclkin_ctl);
+ seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_coreclkctl1);
+ seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_hsclkctl);
+ seq_printf(m, " mg_pll_div0: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div0);
+ seq_printf(m, " mg_pll_div1: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div1);
+ seq_printf(m, " mg_pll_lf: 0x%08x\n",
+ pll->state.hw_state.mg_pll_lf);
+ seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
+ pll->state.hw_state.mg_pll_frac_lock);
+ seq_printf(m, " mg_pll_ssc: 0x%08x\n",
+ pll->state.hw_state.mg_pll_ssc);
+ seq_printf(m, " mg_pll_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_bias);
+ seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_tdc_coldst_bias);
}
drm_modeset_unlock_all(dev);
@@ -3249,24 +3398,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- int i;
- int ret;
- struct intel_engine_cs *engine;
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
struct i915_workarounds *workarounds = &dev_priv->workarounds;
- enum intel_engine_id id;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ int i;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_engine(engine, dev_priv, id)
- seq_printf(m, "HW whitelist count for %s: %d\n",
- engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
i915_reg_t addr;
u32 mask, value, read;
@@ -3282,7 +3420,6 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
}
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -3567,7 +3704,8 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
static int i915_displayport_test_active_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp;
@@ -3601,10 +3739,8 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
static int i915_displayport_test_active_open(struct inode *inode,
struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
-
return single_open(file, i915_displayport_test_active_show,
- &dev_priv->drm);
+ inode->i_private);
}
static const struct file_operations i915_displayport_test_active_fops = {
@@ -3618,7 +3754,8 @@ static const struct file_operations i915_displayport_test_active_fops = {
static int i915_displayport_test_data_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp;
@@ -3657,26 +3794,12 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
return 0;
}
-static int i915_displayport_test_data_open(struct inode *inode,
- struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- return single_open(file, i915_displayport_test_data_show,
- &dev_priv->drm);
-}
-
-static const struct file_operations i915_displayport_test_data_fops = {
- .owner = THIS_MODULE,
- .open = i915_displayport_test_data_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
static int i915_displayport_test_type_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp;
@@ -3703,23 +3826,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
return 0;
}
-
-static int i915_displayport_test_type_open(struct inode *inode,
- struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- return single_open(file, i915_displayport_test_type_show,
- &dev_priv->drm);
-}
-
-static const struct file_operations i915_displayport_test_type_fops = {
- .owner = THIS_MODULE,
- .open = i915_displayport_test_type_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
@@ -3987,8 +4094,8 @@ i915_wedged_set(void *data, u64 val)
engine->hangcheck.stalled = true;
}
- i915_handle_error(i915, val, "Manually set wedged engine mask = %llx",
- val);
+ i915_handle_error(i915, val, I915_ERROR_CAPTURE,
+ "Manually set wedged engine mask = %llx", val);
wait_on_bit(&i915->gpu_error.flags,
I915_RESET_HANDOFF,
@@ -4152,119 +4259,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
"0x%08llx\n");
static int
-i915_max_freq_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- if (INTEL_GEN(dev_priv) < 6)
- return -ENODEV;
-
- *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
- return 0;
-}
-
-static int
-i915_max_freq_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 hw_max, hw_min;
- int ret;
-
- if (INTEL_GEN(dev_priv) < 6)
- return -ENODEV;
-
- DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
-
- ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
- if (ret)
- return ret;
-
- /*
- * Turbo will still be enabled, but won't go above the set value.
- */
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = rps->max_freq;
- hw_min = rps->min_freq;
-
- if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
- mutex_unlock(&dev_priv->pcu_lock);
- return -EINVAL;
- }
-
- rps->max_freq_softlimit = val;
-
- if (intel_set_rps(dev_priv, val))
- DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
-
- mutex_unlock(&dev_priv->pcu_lock);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
- i915_max_freq_get, i915_max_freq_set,
- "%llu\n");
-
-static int
-i915_min_freq_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- if (INTEL_GEN(dev_priv) < 6)
- return -ENODEV;
-
- *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
- return 0;
-}
-
-static int
-i915_min_freq_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 hw_max, hw_min;
- int ret;
-
- if (INTEL_GEN(dev_priv) < 6)
- return -ENODEV;
-
- DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
-
- ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
- if (ret)
- return ret;
-
- /*
- * Turbo will still be enabled, but won't go below the set value.
- */
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = rps->max_freq;
- hw_min = rps->min_freq;
-
- if (val < hw_min ||
- val > hw_max || val > rps->max_freq_softlimit) {
- mutex_unlock(&dev_priv->pcu_lock);
- return -EINVAL;
- }
-
- rps->min_freq_softlimit = val;
-
- if (intel_set_rps(dev_priv, val))
- DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
-
- mutex_unlock(&dev_priv->pcu_lock);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
- i915_min_freq_get, i915_min_freq_set,
- "%llu\n");
-
-static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
@@ -4316,9 +4310,10 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
- int ss_max = 2;
+#define SS_MAX 2
+ const int ss_max = SS_MAX;
+ u32 sig1[SS_MAX], sig2[SS_MAX];
int ss;
- u32 sig1[ss_max], sig2[ss_max];
sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
@@ -4342,15 +4337,16 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->eu_per_subslice = max_t(unsigned int,
sseu->eu_per_subslice, eu_cnt);
}
+#undef SS_MAX
}
static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
+#define SS_MAX 6
const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
int s, ss;
- u32 s_reg[info->sseu.max_slices];
- u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
for (s = 0; s < info->sseu.max_slices; s++) {
/*
@@ -4397,15 +4393,16 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
eu_cnt);
}
}
+#undef SS_MAX
}
static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
+#define SS_MAX 3
const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
int s, ss;
- u32 s_reg[info->sseu.max_slices];
- u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
for (s = 0; s < info->sseu.max_slices; s++) {
s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
@@ -4452,6 +4449,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
eu_cnt);
}
}
+#undef SS_MAX
}
static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
@@ -4703,6 +4701,67 @@ static int i915_drrs_ctl_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
+static ssize_t
+i915_fifo_underrun_reset_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct drm_i915_private *dev_priv = filp->private_data;
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = &dev_priv->drm;
+ int ret;
+ bool reset;
+
+ ret = kstrtobool_from_user(ubuf, cnt, &reset);
+ if (ret)
+ return ret;
+
+ if (!reset)
+ return cnt;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ struct drm_crtc_commit *commit;
+ struct intel_crtc_state *crtc_state;
+
+ ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(intel_crtc->base.state);
+ commit = crtc_state->base.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (!ret)
+ ret = wait_for_completion_interruptible(&commit->flip_done);
+ }
+
+ if (!ret && crtc_state->base.active) {
+ DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+
+ intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
+ }
+
+ drm_modeset_unlock(&intel_crtc->base.mutex);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_fbc_reset_underrun(dev_priv);
+ if (ret)
+ return ret;
+
+ return cnt;
+}
+
+static const struct file_operations i915_fifo_underrun_reset_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = i915_fifo_underrun_reset_write,
+ .llseek = default_llseek,
+};
+
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@@ -4760,8 +4819,6 @@ static const struct i915_debugfs_files {
const struct file_operations *fops;
} i915_debugfs_files[] = {
{"i915_wedged", &i915_wedged_fops},
- {"i915_max_freq", &i915_max_freq_fops},
- {"i915_min_freq", &i915_min_freq_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
{"i915_ring_test_irq", &i915_ring_test_irq_fops},
@@ -4770,6 +4827,7 @@ static const struct i915_debugfs_files {
{"i915_error_state", &i915_error_state_fops},
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
+ {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
{"i915_next_seqno", &i915_next_seqno_fops},
{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
@@ -4779,10 +4837,12 @@ static const struct i915_debugfs_files {
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
- {"i915_guc_log_control", &i915_guc_log_control_fops},
+ {"i915_guc_log_level", &i915_guc_log_level_fops},
+ {"i915_guc_log_relay", &i915_guc_log_relay_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
{"i915_ipc_status", &i915_ipc_status_fops},
- {"i915_drrs_ctl", &i915_drrs_ctl_fops}
+ {"i915_drrs_ctl", &i915_drrs_ctl_fops},
+ {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)
@@ -4876,19 +4936,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
return 0;
}
-
-static int i915_dpcd_open(struct inode *inode, struct file *file)
-{
- return single_open(file, i915_dpcd_show, inode->i_private);
-}
-
-static const struct file_operations i915_dpcd_fops = {
- .owner = THIS_MODULE,
- .open = i915_dpcd_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
static int i915_panel_show(struct seq_file *m, void *data)
{
@@ -4910,19 +4958,7 @@ static int i915_panel_show(struct seq_file *m, void *data)
return 0;
}
-
-static int i915_panel_open(struct inode *inode, struct file *file)
-{
- return single_open(file, i915_panel_show, inode->i_private);
-}
-
-static const struct file_operations i915_panel_fops = {
- .owner = THIS_MODULE,
- .open = i915_panel_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(i915_panel);
/**
* i915_debugfs_connector_add - add i915 specific connector debugfs files
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3b4daafebdcb..9c449b8d8eab 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -101,7 +101,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
__builtin_return_address(0), &vaf);
if (is_error && !shown_bug_once) {
- dev_notice(kdev, "%s", FDO_BUG_MSG);
+ /*
+ * Ask the user to file a bug report for the error, except
+ * if they may have caused the bug by fiddling with unsafe
+ * module parameters.
+ */
+ if (!test_taint(TAINT_USER))
+ dev_notice(kdev, "%s", FDO_BUG_MSG);
shown_bug_once = true;
}
@@ -377,9 +383,9 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
- intel_runtime_pm_get(dev_priv);
- value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
- intel_runtime_pm_put(dev_priv);
+ value = intel_huc_check_status(&dev_priv->huc);
+ if (value < 0)
+ return value;
break;
case I915_PARAM_MMAP_GTT_VERSION:
/* Though we've started our numbering from 1, and so class all
@@ -695,11 +701,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_irq;
- intel_uc_init_fw(dev_priv);
-
ret = i915_gem_init(dev_priv);
if (ret)
- goto cleanup_uc;
+ goto cleanup_irq;
intel_setup_overlay(dev_priv);
@@ -719,8 +723,6 @@ cleanup_gem:
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv);
-cleanup_uc:
- intel_uc_fini_fw(dev_priv);
cleanup_irq:
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev_priv);
@@ -922,16 +924,21 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
- intel_uc_init_early(dev_priv);
i915_memcpy_init_early(dev_priv);
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
goto err_engines;
+ ret = i915_gem_init_early(dev_priv);
+ if (ret < 0)
+ goto err_workqueues;
+
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
+ intel_wopcm_init_early(&dev_priv->wopcm);
+ intel_uc_init_early(dev_priv);
intel_pm_setup(dev_priv);
intel_init_dpio(dev_priv);
intel_power_domains_init(dev_priv);
@@ -940,18 +947,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- ret = i915_gem_load_init(dev_priv);
- if (ret < 0)
- goto err_irq;
-
intel_display_crc_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
return 0;
-err_irq:
- intel_irq_fini(dev_priv);
+err_workqueues:
i915_workqueues_cleanup(dev_priv);
err_engines:
i915_engines_cleanup(dev_priv);
@@ -964,8 +966,9 @@ err_engines:
*/
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
- i915_gem_load_cleanup(dev_priv);
intel_irq_fini(dev_priv);
+ intel_uc_cleanup_early(dev_priv);
+ i915_gem_cleanup_early(dev_priv);
i915_workqueues_cleanup(dev_priv);
i915_engines_cleanup(dev_priv);
}
@@ -1035,6 +1038,10 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
intel_uncore_init(dev_priv);
+ intel_device_info_init_mmio(dev_priv);
+
+ intel_uncore_prune(dev_priv);
+
intel_uc_init_mmio(dev_priv);
ret = intel_engines_init_mmio(dev_priv);
@@ -1077,8 +1084,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
i915_modparams.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
- intel_uc_sanitize_options(dev_priv);
-
intel_gvt_sanitize_options(dev_priv);
}
@@ -1244,7 +1249,6 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
- i915_guc_log_register(dev_priv);
i915_setup_sysfs(dev_priv);
/* Depends on sysfs having been initialized */
@@ -1304,7 +1308,6 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_pmu_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
- i915_guc_log_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
i915_gem_shrinker_unregister(dev_priv);
@@ -1463,7 +1466,6 @@ void i915_driver_unload(struct drm_device *dev)
i915_reset_error_state(dev_priv);
i915_gem_fini(dev_priv);
- intel_uc_fini_fw(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv);
@@ -1876,7 +1878,8 @@ static int i915_resume_switcheroo(struct drm_device *dev)
/**
* i915_reset - reset chip after a hang
* @i915: #drm_i915_private to reset
- * @flags: Instructions
+ * @stalled_mask: mask of the stalled engines with the guilty requests
+ * @reason: user error message for why we are resetting
*
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
@@ -1891,12 +1894,16 @@ static int i915_resume_switcheroo(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-void i915_reset(struct drm_i915_private *i915, unsigned int flags)
+void i915_reset(struct drm_i915_private *i915,
+ unsigned int stalled_mask,
+ const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
int ret;
int i;
+ GEM_TRACE("flags=%lx\n", error->flags);
+
might_sleep();
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
@@ -1908,8 +1915,8 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
if (!i915_gem_unset_wedged(i915))
goto wakeup;
- if (!(flags & I915_RESET_QUIET))
- dev_notice(i915->drm.dev, "Resetting chip after gpu hang\n");
+ if (reason)
+ dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
error->reset_count++;
disable_irq(i915->drm.irq);
@@ -1952,7 +1959,7 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
goto error;
}
- i915_gem_reset(i915);
+ i915_gem_reset(i915, stalled_mask);
intel_overlay_reset(i915);
/*
@@ -1998,7 +2005,6 @@ taint:
error:
i915_gem_set_wedged(i915);
i915_retire_requests(i915);
- intel_gpu_reset(i915, ALL_ENGINES);
goto finish;
}
@@ -2011,7 +2017,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
/**
* i915_reset_engine - reset GPU engine to recover from a hang
* @engine: engine to reset
- * @flags: options
+ * @msg: reason for GPU reset; or NULL for no dev_notice()
*
* Reset a specific GPU engine. Useful if a hang is detected.
* Returns zero on successful reset or otherwise an error code.
@@ -2021,12 +2027,13 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
* - reset engine (which will force the engine to idle)
* - re-init/configure engine
*/
-int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
+int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
{
struct i915_gpu_error *error = &engine->i915->gpu_error;
struct i915_request *active_request;
int ret;
+ GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
active_request = i915_gem_reset_prepare_engine(engine);
@@ -2036,10 +2043,9 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
goto out;
}
- if (!(flags & I915_RESET_QUIET)) {
+ if (msg)
dev_notice(engine->i915->drm.dev,
- "Resetting %s after gpu hang\n", engine->name);
- }
+ "Resetting %s for %s\n", engine->name, msg);
error->reset_engine_count[engine->id]++;
if (!engine->i915->guc.execbuf_client)
@@ -2059,7 +2065,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
* active request and can drop it, adjust head to skip the offending
* request to resume executing remaining requests in the queue.
*/
- i915_gem_reset_engine(engine, active_request);
+ i915_gem_reset_engine(engine, active_request, true);
/*
* The engine and its registers (and workarounds in case of render)
@@ -2468,10 +2474,13 @@ static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
/*
* RC6 transitioning can be delayed up to 2 msec (see
* valleyview_enable_rps), use 3 msec for safety.
+ *
+ * This can fail to turn off the rc6 if the GPU is stuck after a failed
+ * reset and we are trying to force the machine to sleep.
*/
if (vlv_wait_for_pw_status(dev_priv, mask, val))
- DRM_ERROR("timeout waiting for GT wells to go %s\n",
- onoff(wait_for_on));
+ DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
+ onoff(wait_for_on));
}
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
@@ -2822,10 +2831,10 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ce18b6cf6e68..7014a96546f4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -64,6 +64,7 @@
#include "intel_opregion.h"
#include "intel_ringbuffer.h"
#include "intel_uncore.h"
+#include "intel_wopcm.h"
#include "intel_uc.h"
#include "i915_gem.h"
@@ -71,9 +72,10 @@
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
-#include "i915_gem_timeline.h"
-
+#include "i915_gpu_error.h"
#include "i915_request.h"
+#include "i915_scheduler.h"
+#include "i915_timeline.h"
#include "i915_vma.h"
#include "intel_gvt.h"
@@ -83,8 +85,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180308"
-#define DRIVER_TIMESTAMP 1520513379
+#define DRIVER_DATE "20180514"
+#define DRIVER_TIMESTAMP 1526300884
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -261,6 +263,7 @@ enum hpd_pin {
HPD_PORT_C,
HPD_PORT_D,
HPD_PORT_E,
+ HPD_PORT_F,
HPD_NUM_PINS
};
@@ -337,14 +340,21 @@ struct drm_i915_file_private {
unsigned int bsd_engine;
-/* Client can have a maximum of 3 contexts banned before
- * it is denied of creating new contexts. As one context
- * ban needs 4 consecutive hangs, and more if there is
- * progress in between, this is a last resort stop gap measure
- * to limit the badly behaving clients access to gpu.
+/*
+ * Every context ban increments per client ban score. Also
+ * hangs in short succession increments ban score. If ban threshold
+ * is reached, client is considered banned and submitting more work
+ * will fail. This is a stop gap measure to limit the badly behaving
+ * clients access to gpu. Note that unbannable contexts never increment
+ * the client ban score.
*/
-#define I915_MAX_CLIENT_CONTEXT_BANS 3
- atomic_t context_bans;
+#define I915_CLIENT_SCORE_HANG_FAST 1
+#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
+#define I915_CLIENT_SCORE_CONTEXT_BAN 3
+#define I915_CLIENT_SCORE_BANNED 9
+ /** ban_score: Accumulated score of all ctx bans and fast hangs. */
+ atomic_t ban_score;
+ unsigned long hang_timestamp;
};
/* Interface history:
@@ -453,172 +463,6 @@ struct intel_csr {
uint32_t allowed_dc_mask;
};
-struct intel_display_error_state;
-
-struct i915_gpu_state {
- struct kref ref;
- ktime_t time;
- ktime_t boottime;
- ktime_t uptime;
-
- struct drm_i915_private *i915;
-
- char error_msg[128];
- bool simulated;
- bool awake;
- bool wakelock;
- bool suspended;
- int iommu;
- u32 reset_count;
- u32 suspend_count;
- struct intel_device_info device_info;
- struct intel_driver_caps driver_caps;
- struct i915_params params;
-
- struct i915_error_uc {
- struct intel_uc_fw guc_fw;
- struct intel_uc_fw huc_fw;
- struct drm_i915_error_object *guc_log;
- } uc;
-
- /* Generic register state */
- u32 eir;
- u32 pgtbl_er;
- u32 ier;
- u32 gtier[4], ngtier;
- u32 ccid;
- u32 derrmr;
- u32 forcewake;
- u32 error; /* gen6+ */
- u32 err_int; /* gen7 */
- u32 fault_data0; /* gen8, gen9 */
- u32 fault_data1; /* gen8, gen9 */
- u32 done_reg;
- u32 gac_eco;
- u32 gam_ecochk;
- u32 gab_ctl;
- u32 gfx_mode;
-
- u32 nfence;
- u64 fence[I915_MAX_NUM_FENCES];
- struct intel_overlay_error_state *overlay;
- struct intel_display_error_state *display;
-
- struct drm_i915_error_engine {
- int engine_id;
- /* Software tracked state */
- bool idle;
- bool waiting;
- int num_waiters;
- unsigned long hangcheck_timestamp;
- bool hangcheck_stalled;
- enum intel_engine_hangcheck_action hangcheck_action;
- struct i915_address_space *vm;
- int num_requests;
- u32 reset_count;
-
- /* position of active request inside the ring */
- u32 rq_head, rq_post, rq_tail;
-
- /* our own tracking of ring head and tail */
- u32 cpu_ring_head;
- u32 cpu_ring_tail;
-
- u32 last_seqno;
-
- /* Register state */
- u32 start;
- u32 tail;
- u32 head;
- u32 ctl;
- u32 mode;
- u32 hws;
- u32 ipeir;
- u32 ipehr;
- u32 bbstate;
- u32 instpm;
- u32 instps;
- u32 seqno;
- u64 bbaddr;
- u64 acthd;
- u32 fault_reg;
- u64 faddr;
- u32 rc_psmi; /* sleep state */
- u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
- struct intel_instdone instdone;
-
- struct drm_i915_error_context {
- char comm[TASK_COMM_LEN];
- pid_t pid;
- u32 handle;
- u32 hw_id;
- int priority;
- int ban_score;
- int active;
- int guilty;
- bool bannable;
- } context;
-
- struct drm_i915_error_object {
- u64 gtt_offset;
- u64 gtt_size;
- int page_count;
- int unused;
- u32 *pages[0];
- } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
-
- struct drm_i915_error_object **user_bo;
- long user_bo_count;
-
- struct drm_i915_error_object *wa_ctx;
- struct drm_i915_error_object *default_state;
-
- struct drm_i915_error_request {
- long jiffies;
- pid_t pid;
- u32 context;
- int priority;
- int ban_score;
- u32 seqno;
- u32 head;
- u32 tail;
- } *requests, execlist[EXECLIST_MAX_PORTS];
- unsigned int num_ports;
-
- struct drm_i915_error_waiter {
- char comm[TASK_COMM_LEN];
- pid_t pid;
- u32 seqno;
- } *waiters;
-
- struct {
- u32 gfx_mode;
- union {
- u64 pdp[4];
- u32 pp_dir_base;
- };
- } vm_info;
- } engine[I915_NUM_ENGINES];
-
- struct drm_i915_error_buffer {
- u32 size;
- u32 name;
- u32 rseqno[I915_NUM_ENGINES], wseqno;
- u64 gtt_offset;
- u32 read_domains;
- u32 write_domain;
- s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
- u32 tiling:2;
- u32 dirty:1;
- u32 purgeable:1;
- u32 userptr:1;
- s32 engine:4;
- u32 cache_level:3;
- } *active_bo[I915_NUM_ENGINES], *pinned_bo;
- u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
- struct i915_address_space *active_vm[I915_NUM_ENGINES];
-};
-
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
@@ -766,12 +610,16 @@ struct i915_psr {
bool active;
struct delayed_work work;
unsigned busy_frontbuffer_bits;
- bool psr2_support;
- bool aux_frame_sync;
+ bool sink_psr2_support;
bool link_standby;
- bool y_cord_support;
bool colorimetry_support;
bool alpm;
+ bool has_hw_tracking;
+ bool psr2_enabled;
+ u8 sink_sync_latency;
+ bool debug;
+ ktime_t last_entry_attempt;
+ ktime_t last_exit;
void (*enable_source)(struct intel_dp *,
const struct intel_crtc_state *);
@@ -1146,16 +994,6 @@ struct i915_gem_mm {
u32 object_count;
};
-struct drm_i915_error_state_buf {
- struct drm_i915_private *i915;
- unsigned bytes;
- unsigned size;
- int err;
- u8 *buf;
- loff_t start;
- loff_t pos;
-};
-
#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
@@ -1164,102 +1002,6 @@ struct drm_i915_error_state_buf {
#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
-struct i915_gpu_error {
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
-
- struct delayed_work hangcheck_work;
-
- /* For reset and error_state handling. */
- spinlock_t lock;
- /* Protected by the above dev->gpu_error.lock. */
- struct i915_gpu_state *first_error;
-
- atomic_t pending_fb_pin;
-
- unsigned long missed_irq_rings;
-
- /**
- * State variable controlling the reset flow and count
- *
- * This is a counter which gets incremented when reset is triggered,
- *
- * Before the reset commences, the I915_RESET_BACKOFF bit is set
- * meaning that any waiters holding onto the struct_mutex should
- * relinquish the lock immediately in order for the reset to start.
- *
- * If reset is not completed succesfully, the I915_WEDGE bit is
- * set meaning that hardware is terminally sour and there is no
- * recovery. All waiters on the reset_queue will be woken when
- * that happens.
- *
- * This counter is used by the wait_seqno code to notice that reset
- * event happened and it needs to restart the entire ioctl (since most
- * likely the seqno it waited for won't ever signal anytime soon).
- *
- * This is important for lock-free wait paths, where no contended lock
- * naturally enforces the correct ordering between the bail-out of the
- * waiter and the gpu reset work code.
- */
- unsigned long reset_count;
-
- /**
- * flags: Control various stages of the GPU reset
- *
- * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
- * other users acquiring the struct_mutex. To do this we set the
- * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
- * and then check for that bit before acquiring the struct_mutex (in
- * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
- * secondary role in preventing two concurrent global reset attempts.
- *
- * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
- * struct_mutex. We try to acquire the struct_mutex in the reset worker,
- * but it may be held by some long running waiter (that we cannot
- * interrupt without causing trouble). Once we are ready to do the GPU
- * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
- * they already hold the struct_mutex and want to participate they can
- * inspect the bit and do the reset directly, otherwise the worker
- * waits for the struct_mutex.
- *
- * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
- * acquire the struct_mutex to reset an engine, we need an explicit
- * flag to prevent two concurrent reset attempts in the same engine.
- * As the number of engines continues to grow, allocate the flags from
- * the most significant bits.
- *
- * #I915_WEDGED - If reset fails and we can no longer use the GPU,
- * we set the #I915_WEDGED bit. Prior to command submission, e.g.
- * i915_request_alloc(), this bit is checked and the sequence
- * aborted (with -EIO reported to userspace) if set.
- */
- unsigned long flags;
-#define I915_RESET_BACKOFF 0
-#define I915_RESET_HANDOFF 1
-#define I915_RESET_MODESET 2
-#define I915_WEDGED (BITS_PER_LONG - 1)
-#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
-
- /** Number of times an engine has been reset */
- u32 reset_engine_count[I915_NUM_ENGINES];
-
- /**
- * Waitqueue to signal when a hang is detected. Used to for waiters
- * to release the struct_mutex for the reset to procede.
- */
- wait_queue_head_t wait_queue;
-
- /**
- * Waitqueue to signal when the reset has completed. Used by clients
- * that wait for dev_priv->mm.wedged to settle.
- */
- wait_queue_head_t reset_queue;
-
- /* For missed irq/seqno simulation. */
- unsigned long test_irq_rings;
-};
-
enum modeset_restore {
MODESET_ON_LID_OPEN,
MODESET_DONE,
@@ -1338,6 +1080,7 @@ struct intel_vbt_data {
} edp;
struct {
+ bool enable;
bool full_link;
bool require_aux_wakeup;
int idle_frames;
@@ -1451,11 +1194,13 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
- struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
+ /* packed/y */
+ struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
+ struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
+ u8 enabled_slices; /* GEN11 has configurable 2 slices */
};
-struct skl_wm_values {
+struct skl_ddb_values {
unsigned dirty_pipes;
struct skl_ddb_allocation ddb;
};
@@ -1470,6 +1215,7 @@ struct skl_wm_level {
struct skl_wm_params {
bool x_tiled, y_tiled;
bool rc_surface;
+ bool is_planar;
uint32_t width;
uint8_t cpp;
uint32_t plane_pixel_rate;
@@ -1564,7 +1310,6 @@ struct i915_wa_reg {
struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS];
u32 count;
- u32 hw_whitelist_count[I915_NUM_ENGINES];
};
struct i915_virtual_gpu {
@@ -1860,6 +1605,8 @@ struct drm_i915_private {
struct intel_gvt *gvt;
+ struct intel_wopcm wopcm;
+
struct intel_huc huc;
struct intel_guc guc;
@@ -2152,7 +1899,7 @@ struct drm_i915_private {
/* current hardware state */
union {
struct ilk_wm_values hw;
- struct skl_wm_values skl_hw;
+ struct skl_ddb_values skl_hw;
struct vlv_wm_values vlv;
struct g4x_wm_values g4x;
};
@@ -2321,8 +2068,11 @@ struct drm_i915_private {
void (*cleanup_engine)(struct intel_engine_cs *engine);
struct list_head timelines;
- struct i915_gem_timeline global_timeline;
+
+ struct list_head active_rings;
+ struct list_head closed_vma;
u32 active_requests;
+ u32 request_serial;
/**
* Is the GPU currently considered idle, or busy executing
@@ -2392,6 +2142,11 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
return to_i915(dev_get_drvdata(kdev));
}
+static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
+{
+ return container_of(wopcm, struct drm_i915_private, wopcm);
+}
+
static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
{
return container_of(guc, struct drm_i915_private, guc);
@@ -2411,8 +2166,10 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
- for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
- tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
+ for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \
+ (tmp__) ? \
+ ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
+ 0;)
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -2720,6 +2477,15 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_CNL_REVID(p, since, until) \
(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
+#define ICL_REVID_A0 0x0
+#define ICL_REVID_A2 0x1
+#define ICL_REVID_B0 0x3
+#define ICL_REVID_B2 0x4
+#define ICL_REVID_C0 0x5
+
+#define IS_ICL_REVID(p, since, until) \
+ (IS_ICELAKE(p) && IS_REVID(p, since, until))
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@@ -2963,10 +2729,11 @@ extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-#define I915_RESET_QUIET BIT(0)
-extern void i915_reset(struct drm_i915_private *i915, unsigned int flags);
+extern void i915_reset(struct drm_i915_private *i915,
+ unsigned int stalled_mask,
+ const char *reason);
extern int i915_reset_engine(struct intel_engine_cs *engine,
- unsigned int flags);
+ const char *reason);
extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
extern int intel_reset_guc(struct drm_i915_private *dev_priv);
@@ -3014,10 +2781,12 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
&dev_priv->gpu_error.hangcheck_work, delay);
}
-__printf(3, 4)
+__printf(4, 5)
void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,
+ unsigned long flags,
const char *fmt, ...);
+#define I915_ERROR_CAPTURE BIT(0)
extern void intel_irq_init(struct drm_i915_private *dev_priv);
extern void intel_irq_fini(struct drm_i915_private *dev_priv);
@@ -3132,8 +2901,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_sanitize(struct drm_i915_private *i915);
-int i915_gem_load_init(struct drm_i915_private *dev_priv);
-void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
+int i915_gem_init_early(struct drm_i915_private *dev_priv);
+void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
@@ -3388,13 +3157,15 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
-void i915_gem_reset(struct drm_i915_private *dev_priv);
+void i915_gem_reset(struct drm_i915_private *dev_priv,
+ unsigned int stalled_mask);
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct i915_request *request);
+ struct i915_request *request,
+ bool stalled);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -3412,7 +3183,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
struct intel_rps_client *rps);
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
- int priority);
+ const struct i915_sched_attr *attr);
#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
int __must_check
@@ -3481,16 +3252,6 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
return ctx;
}
-static inline struct intel_timeline *
-i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct i915_address_space *vm;
-
- vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
- return &vm->timeline.engine[engine->id];
-}
-
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
@@ -3589,64 +3350,6 @@ static inline int i915_debugfs_connector_add(struct drm_connector *connector)
static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
#endif
-/* i915_gpu_error.c */
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-__printf(2, 3)
-void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
- const struct i915_gpu_state *gpu);
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
- struct drm_i915_private *i915,
- size_t count, loff_t pos);
-static inline void i915_error_state_buf_release(
- struct drm_i915_error_state_buf *eb)
-{
- kfree(eb->buf);
-}
-
-struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
-void i915_capture_error_state(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- const char *error_msg);
-
-static inline struct i915_gpu_state *
-i915_gpu_state_get(struct i915_gpu_state *gpu)
-{
- kref_get(&gpu->ref);
- return gpu;
-}
-
-void __i915_gpu_state_free(struct kref *kref);
-static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
-{
- if (gpu)
- kref_put(&gpu->ref, __i915_gpu_state_free);
-}
-
-struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
-void i915_reset_error_state(struct drm_i915_private *i915);
-
-#else
-
-static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- const char *error_msg)
-{
-}
-
-static inline struct i915_gpu_state *
-i915_first_error_state(struct drm_i915_private *i915)
-{
- return NULL;
-}
-
-static inline void i915_reset_error_state(struct drm_i915_private *i915)
-{
-}
-
-#endif
-
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7b5a9d7c9593..d44ad7bc1e94 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,6 +35,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include "intel_workarounds.h"
#include "i915_gemfs.h"
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
@@ -136,6 +137,102 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
return 0;
}
+static u32 __i915_gem_park(struct drm_i915_private *i915)
+{
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ GEM_BUG_ON(i915->gt.active_requests);
+ GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
+
+ if (!i915->gt.awake)
+ return I915_EPOCH_INVALID;
+
+ GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
+
+ /*
+ * Be paranoid and flush a concurrent interrupt to make sure
+ * we don't reactivate any irq tasklets after parking.
+ *
+ * FIXME: Note that even though we have waited for execlists to be idle,
+ * there may still be an in-flight interrupt even though the CSB
+ * is now empty. synchronize_irq() makes sure that a residual interrupt
+ * is completed before we continue, but it doesn't prevent the HW from
+ * raising a spurious interrupt later. To complete the shield we should
+ * coordinate disabling the CS irq with flushing the interrupts.
+ */
+ synchronize_irq(i915->drm.irq);
+
+ intel_engines_park(i915);
+ i915_timelines_park(i915);
+
+ i915_pmu_gt_parked(i915);
+ i915_vma_parked(i915);
+
+ i915->gt.awake = false;
+
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_idle(i915);
+
+ intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
+
+ intel_runtime_pm_put(i915);
+
+ return i915->gt.epoch;
+}
+
+void i915_gem_park(struct drm_i915_private *i915)
+{
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ GEM_BUG_ON(i915->gt.active_requests);
+
+ if (!i915->gt.awake)
+ return;
+
+ /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
+ mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
+}
+
+void i915_gem_unpark(struct drm_i915_private *i915)
+{
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915->gt.active_requests);
+
+ if (i915->gt.awake)
+ return;
+
+ intel_runtime_pm_get_noresume(i915);
+
+ /*
+ * It seems that the DMC likes to transition between the DC states a lot
+ * when there are no connected displays (no active power domains) during
+ * command submission.
+ *
+ * This activity has negative impact on the performance of the chip with
+ * huge latencies observed in the interrupt handler and elsewhere.
+ *
+ * Work around it by grabbing a GT IRQ power domain whilst there is any
+ * GT activity, preventing any DC state transitions.
+ */
+ intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+
+ i915->gt.awake = true;
+ if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
+ i915->gt.epoch = 1;
+
+ intel_enable_gt_powersave(i915);
+ i915_update_gfx_val(i915);
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_busy(i915);
+ i915_pmu_gt_unparked(i915);
+
+ intel_engines_unpark(i915);
+
+ i915_queue_hangcheck(i915);
+
+ queue_delayed_work(i915->wq,
+ &i915->gt.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -469,7 +566,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
return timeout;
}
-static void __fence_set_priority(struct dma_fence *fence, int prio)
+static void __fence_set_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
{
struct i915_request *rq;
struct intel_engine_cs *engine;
@@ -480,13 +578,16 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
rq = to_request(fence);
engine = rq->engine;
- rcu_read_lock();
+ local_bh_disable();
+ rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
- engine->schedule(rq, prio);
+ engine->schedule(rq, attr);
rcu_read_unlock();
+ local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
-static void fence_set_priority(struct dma_fence *fence, int prio)
+static void fence_set_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
{
/* Recurse once into a fence-array */
if (dma_fence_is_array(fence)) {
@@ -494,16 +595,16 @@ static void fence_set_priority(struct dma_fence *fence, int prio)
int i;
for (i = 0; i < array->num_fences; i++)
- __fence_set_priority(array->fences[i], prio);
+ __fence_set_priority(array->fences[i], attr);
} else {
- __fence_set_priority(fence, prio);
+ __fence_set_priority(fence, attr);
}
}
int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
- int prio)
+ const struct i915_sched_attr *attr)
{
struct dma_fence *excl;
@@ -518,7 +619,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
return ret;
for (i = 0; i < count; i++) {
- fence_set_priority(shared[i], prio);
+ fence_set_priority(shared[i], attr);
dma_fence_put(shared[i]);
}
@@ -528,7 +629,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
}
if (excl) {
- fence_set_priority(excl, prio);
+ fence_set_priority(excl, attr);
dma_fence_put(excl);
}
return 0;
@@ -2832,32 +2933,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
+static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
+ const struct i915_gem_context *ctx)
+{
+ unsigned int score;
+ unsigned long prev_hang;
+
+ if (i915_gem_context_is_banned(ctx))
+ score = I915_CLIENT_SCORE_CONTEXT_BAN;
+ else
+ score = 0;
+
+ prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+ if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+ score += I915_CLIENT_SCORE_HANG_FAST;
+
+ if (score) {
+ atomic_add(score, &file_priv->ban_score);
+
+ DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
+ ctx->name, score,
+ atomic_read(&file_priv->ban_score));
+ }
+}
+
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
{
- bool banned;
+ unsigned int score;
+ bool banned, bannable;
atomic_inc(&ctx->guilty_count);
- banned = false;
- if (i915_gem_context_is_bannable(ctx)) {
- unsigned int score;
+ bannable = i915_gem_context_is_bannable(ctx);
+ score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+ banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
- score = atomic_add_return(CONTEXT_SCORE_GUILTY,
- &ctx->ban_score);
- banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+ DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
+ ctx->name, atomic_read(&ctx->guilty_count),
+ score, yesno(banned && bannable));
- DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
- ctx->name, score, yesno(banned));
- }
- if (!banned)
+ /* Cool contexts don't accumulate client ban score */
+ if (!bannable)
return;
- i915_gem_context_set_banned(ctx);
- if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- atomic_inc(&ctx->file_priv->context_bans);
- DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
- ctx->name, atomic_read(&ctx->file_priv->context_bans));
- }
+ if (banned)
+ i915_gem_context_set_banned(ctx);
+
+ if (!IS_ERR_OR_NULL(ctx->file_priv))
+ i915_gem_client_mark_guilty(ctx->file_priv, ctx);
}
static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
@@ -2871,45 +2994,30 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
struct i915_request *request, *active = NULL;
unsigned long flags;
- /* We are called by the error capture and reset at a random
- * point in time. In particular, note that neither is crucially
- * ordered with an interrupt. After a hang, the GPU is dead and we
- * assume that no more writes can happen (we waited long enough for
- * all writes that were in transaction to be flushed) - adding an
+ /*
+ * We are called by the error capture, reset and to dump engine
+ * state at random points in time. In particular, note that neither is
+ * crucially ordered with an interrupt. After a hang, the GPU is dead
+ * and we assume that no more writes can happen (we waited long enough
+ * for all writes that were in transaction to be flushed) - adding an
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
+ * At all other times, we must assume the GPU is still running, but
+ * we only care about the snapshot of this moment.
*/
- spin_lock_irqsave(&engine->timeline->lock, flags);
- list_for_each_entry(request, &engine->timeline->requests, link) {
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ list_for_each_entry(request, &engine->timeline.requests, link) {
if (__i915_request_completed(request, request->global_seqno))
continue;
- GEM_BUG_ON(request->engine != engine);
- GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &request->fence.flags));
-
active = request;
break;
}
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
return active;
}
-static bool engine_stalled(struct intel_engine_cs *engine)
-{
- if (!engine->hangcheck.stalled)
- return false;
-
- /* Check for possible seqno movement after hang declaration */
- if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
- DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
- return false;
- }
-
- return true;
-}
-
/*
* Ensure irq handler finishes, and not run again.
* Also return the active request so that we only search for it once.
@@ -2998,6 +3106,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
}
i915_gem_revoke_fences(dev_priv);
+ intel_uc_sanitize(dev_priv);
return err;
}
@@ -3025,15 +3134,15 @@ static void engine_skip_context(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct i915_gem_context *hung_ctx = request->ctx;
- struct intel_timeline *timeline;
+ struct i915_timeline *timeline = request->timeline;
unsigned long flags;
- timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
+ GEM_BUG_ON(timeline == &engine->timeline);
- spin_lock_irqsave(&engine->timeline->lock, flags);
- spin_lock(&timeline->lock);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
- list_for_each_entry_continue(request, &engine->timeline->requests, link)
+ list_for_each_entry_continue(request, &engine->timeline.requests, link)
if (request->ctx == hung_ctx)
skip_request(request);
@@ -3041,13 +3150,14 @@ static void engine_skip_context(struct i915_request *request)
skip_request(request);
spin_unlock(&timeline->lock);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
/* Returns the request if it was guilty of the hang */
static struct i915_request *
i915_gem_reset_request(struct intel_engine_cs *engine,
- struct i915_request *request)
+ struct i915_request *request,
+ bool stalled)
{
/* The guilty request will get skipped on a hung engine.
*
@@ -3070,7 +3180,15 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
* subsequent hangs.
*/
- if (engine_stalled(engine)) {
+ if (i915_request_completed(request)) {
+ GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n",
+ engine->name, request->global_seqno,
+ request->fence.context, request->fence.seqno,
+ intel_engine_get_seqno(engine));
+ stalled = false;
+ }
+
+ if (stalled) {
i915_gem_context_mark_guilty(request->ctx);
skip_request(request);
@@ -3089,11 +3207,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
dma_fence_set_error(&request->fence, -EAGAIN);
/* Rewind the engine to replay the incomplete rq */
- spin_lock_irq(&engine->timeline->lock);
+ spin_lock_irq(&engine->timeline.lock);
request = list_prev_entry(request, link);
- if (&request->link == &engine->timeline->requests)
+ if (&request->link == &engine->timeline.requests)
request = NULL;
- spin_unlock_irq(&engine->timeline->lock);
+ spin_unlock_irq(&engine->timeline.lock);
}
}
@@ -3101,7 +3219,8 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
}
void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct i915_request *request)
+ struct i915_request *request,
+ bool stalled)
{
/*
* Make sure this write is visible before we re-enable the interrupt
@@ -3111,7 +3230,7 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
smp_store_mb(engine->irq_posted, 0);
if (request)
- request = i915_gem_reset_request(engine, request);
+ request = i915_gem_reset_request(engine, request, stalled);
if (request) {
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
@@ -3122,7 +3241,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
engine->reset_hw(engine, request);
}
-void i915_gem_reset(struct drm_i915_private *dev_priv)
+void i915_gem_reset(struct drm_i915_private *dev_priv,
+ unsigned int stalled_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -3134,10 +3254,12 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
struct i915_gem_context *ctx;
- i915_gem_reset_engine(engine, engine->hangcheck.active_request);
+ i915_gem_reset_engine(engine,
+ engine->hangcheck.active_request,
+ stalled_mask & ENGINE_MASK(id));
ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx)
- engine->context_unpin(engine, ctx);
+ intel_context_unpin(ctx, engine);
/*
* Ostensibily, we always want a context loaded for powersaving,
@@ -3160,13 +3282,6 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
}
i915_gem_restore_fences(dev_priv);
-
- if (dev_priv->gt.awake) {
- intel_sanitize_gt_powersave(dev_priv);
- intel_enable_gt_powersave(dev_priv);
- if (INTEL_GEN(dev_priv) >= 6)
- gen6_rps_busy(dev_priv);
- }
}
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
@@ -3192,6 +3307,9 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct i915_request *request)
{
+ GEM_TRACE("%s fence %llx:%d -> -EIO\n",
+ request->engine->name,
+ request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO);
i915_request_submit(request);
@@ -3201,12 +3319,15 @@ static void nop_complete_submit_request(struct i915_request *request)
{
unsigned long flags;
+ GEM_TRACE("%s fence %llx:%d -> -EIO\n",
+ request->engine->name,
+ request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO);
- spin_lock_irqsave(&request->engine->timeline->lock, flags);
+ spin_lock_irqsave(&request->engine->timeline.lock, flags);
__i915_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
- spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
+ spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
}
void i915_gem_set_wedged(struct drm_i915_private *i915)
@@ -3214,7 +3335,9 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- if (drm_debug & DRM_UT_DRIVER) {
+ GEM_TRACE("start\n");
+
+ if (GEM_SHOW_DEBUG()) {
struct drm_printer p = drm_debug_printer(__func__);
for_each_engine(engine, i915, id)
@@ -3237,6 +3360,9 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
}
i915->caps.scheduler = 0;
+ /* Even if the GPU reset fails, it should still stop the engines */
+ intel_gpu_reset(i915, ALL_ENGINES);
+
/*
* Make sure no one is running the old callback before we proceed with
* cancelling requests and resetting the completion tracking. Otherwise
@@ -3270,27 +3396,31 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
*/
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
intel_engine_init_global_seqno(engine,
intel_engine_last_submit(engine));
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
i915_gem_reset_finish_engine(engine);
}
+ GEM_TRACE("end\n");
+
wake_up_all(&i915->gpu_error.reset_queue);
}
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
- struct i915_gem_timeline *tl;
- int i;
+ struct i915_timeline *tl;
lockdep_assert_held(&i915->drm.struct_mutex);
if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
return true;
- /* Before unwedging, make sure that all pending operations
+ GEM_TRACE("start\n");
+
+ /*
+ * Before unwedging, make sure that all pending operations
* are flushed and errored out - we may have requests waiting upon
* third party fences. We marked all inflight requests as EIO, and
* every execbuf since returned EIO, for consistency we want all
@@ -3300,31 +3430,33 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
* No more can be submitted until we reset the wedged bit.
*/
list_for_each_entry(tl, &i915->gt.timelines, link) {
- for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
- struct i915_request *rq;
+ struct i915_request *rq;
- rq = i915_gem_active_peek(&tl->engine[i].last_request,
- &i915->drm.struct_mutex);
- if (!rq)
- continue;
+ rq = i915_gem_active_peek(&tl->last_request,
+ &i915->drm.struct_mutex);
+ if (!rq)
+ continue;
- /* We can't use our normal waiter as we want to
- * avoid recursively trying to handle the current
- * reset. The basic dma_fence_default_wait() installs
- * a callback for dma_fence_signal(), which is
- * triggered by our nop handler (indirectly, the
- * callback enables the signaler thread which is
- * woken by the nop_submit_request() advancing the seqno
- * and when the seqno passes the fence, the signaler
- * then signals the fence waking us up).
- */
- if (dma_fence_default_wait(&rq->fence, true,
- MAX_SCHEDULE_TIMEOUT) < 0)
- return false;
- }
+ /*
+ * We can't use our normal waiter as we want to
+ * avoid recursively trying to handle the current
+ * reset. The basic dma_fence_default_wait() installs
+ * a callback for dma_fence_signal(), which is
+ * triggered by our nop handler (indirectly, the
+ * callback enables the signaler thread which is
+ * woken by the nop_submit_request() advancing the seqno
+ * and when the seqno passes the fence, the signaler
+ * then signals the fence waking us up).
+ */
+ if (dma_fence_default_wait(&rq->fence, true,
+ MAX_SCHEDULE_TIMEOUT) < 0)
+ return false;
}
+ i915_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
- /* Undo nop_submit_request. We prevent all new i915 requests from
+ /*
+ * Undo nop_submit_request. We prevent all new i915 requests from
* being queued (by disallowing execbuf whilst wedged) so having
* waited for all active requests above, we know the system is idle
* and do not have to worry about a thread being inside
@@ -3335,6 +3467,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
intel_engines_reset_default_submission(i915);
i915_gem_contexts_lost(i915);
+ GEM_TRACE("end\n");
+
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
@@ -3473,36 +3607,9 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (new_requests_since_last_retire(dev_priv))
goto out_unlock;
- /*
- * Be paranoid and flush a concurrent interrupt to make sure
- * we don't reactivate any irq tasklets after parking.
- *
- * FIXME: Note that even though we have waited for execlists to be idle,
- * there may still be an in-flight interrupt even though the CSB
- * is now empty. synchronize_irq() makes sure that a residual interrupt
- * is completed before we continue, but it doesn't prevent the HW from
- * raising a spurious interrupt later. To complete the shield we should
- * coordinate disabling the CS irq with flushing the interrupts.
- */
- synchronize_irq(dev_priv->drm.irq);
-
- intel_engines_park(dev_priv);
- i915_gem_timelines_park(dev_priv);
+ epoch = __i915_gem_park(dev_priv);
- i915_pmu_gt_parked(dev_priv);
-
- GEM_BUG_ON(!dev_priv->gt.awake);
- dev_priv->gt.awake = false;
- epoch = dev_priv->gt.epoch;
- GEM_BUG_ON(epoch == I915_EPOCH_INVALID);
rearm_hangcheck = false;
-
- if (INTEL_GEN(dev_priv) >= 6)
- gen6_rps_idle(dev_priv);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
-
- intel_runtime_pm_put(dev_priv);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3648,17 +3755,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return ret;
}
-static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
+static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
{
- int ret, i;
-
- for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
- ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
- if (ret)
- return ret;
- }
-
- return 0;
+ return i915_gem_active_wait(&tl->last_request, flags);
}
static int wait_for_engines(struct drm_i915_private *i915)
@@ -3666,16 +3765,7 @@ static int wait_for_engines(struct drm_i915_private *i915)
if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
dev_err(i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
- if (drm_debug & DRM_UT_DRIVER) {
- struct drm_printer p = drm_debug_printer(__func__);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id)
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
- }
-
+ GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
return -EIO;
}
@@ -3685,30 +3775,37 @@ static int wait_for_engines(struct drm_i915_private *i915)
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
- int ret;
-
/* If the device is asleep, we have no requests outstanding */
if (!READ_ONCE(i915->gt.awake))
return 0;
if (flags & I915_WAIT_LOCKED) {
- struct i915_gem_timeline *tl;
+ struct i915_timeline *tl;
+ int err;
lockdep_assert_held(&i915->drm.struct_mutex);
list_for_each_entry(tl, &i915->gt.timelines, link) {
- ret = wait_for_timeline(tl, flags);
- if (ret)
- return ret;
+ err = wait_for_timeline(tl, flags);
+ if (err)
+ return err;
}
i915_retire_requests(i915);
- ret = wait_for_engines(i915);
+ return wait_for_engines(i915);
} else {
- ret = wait_for_timeline(&i915->gt.global_timeline, flags);
- }
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
- return ret;
+ for_each_engine(engine, i915, id) {
+ err = wait_for_timeline(&engine->timeline, flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+ }
}
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
@@ -4088,9 +4185,10 @@ out:
}
/*
- * Prepare buffer for display plane (scanout, cursors, etc).
- * Can be called from an uninterruptible phase (modesetting) and allows
- * any flushes to be pipelined (for pageflips).
+ * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
+ * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
+ * (for pageflips). We only flush the caches while preparing the buffer for
+ * display, the callers are responsible for frontbuffer flush.
*/
struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
@@ -4146,9 +4244,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
__i915_gem_object_flush_for_display(obj);
- intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
@@ -4723,7 +4819,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
&obj->vma_list, obj_link) {
GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_close(vma);
+ i915_vma_destroy(vma);
}
GEM_BUG_ON(!list_empty(&obj->vma_list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
@@ -4878,7 +4974,7 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
+ GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
GEM_BUG_ON(engine->last_retired_context != kernel_context);
}
}
@@ -4973,6 +5069,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
+ intel_uc_sanitize(dev_priv);
i915_gem_sanitize(dev_priv);
intel_runtime_pm_put(dev_priv);
@@ -5118,6 +5215,8 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
}
}
+ intel_gt_workarounds_apply(dev_priv);
+
i915_gem_init_swizzling(dev_priv);
/*
@@ -5140,6 +5239,12 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
goto out;
}
+ ret = intel_wopcm_init_hw(&dev_priv->wopcm);
+ if (ret) {
+ DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
+ goto out;
+ }
+
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(dev_priv);
if (ret) {
@@ -5207,7 +5312,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) {
struct i915_vma *state;
- state = ctx->engine[id].state;
+ state = to_intel_context(ctx, engine)->state;
if (!state)
continue;
@@ -5297,6 +5402,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ ret = intel_wopcm_init(&dev_priv->wopcm);
+ if (ret)
+ return ret;
+
ret = intel_uc_init_misc(dev_priv);
if (ret)
return ret;
@@ -5478,8 +5587,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
-int
-i915_gem_load_init(struct drm_i915_private *dev_priv)
+int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
int err = -ENOMEM;
@@ -5512,12 +5620,9 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (!dev_priv->priorities)
goto err_dependencies;
- mutex_lock(&dev_priv->drm.struct_mutex);
INIT_LIST_HEAD(&dev_priv->gt.timelines);
- err = i915_gem_timeline_init__global(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (err)
- goto err_priorities;
+ INIT_LIST_HEAD(&dev_priv->gt.active_rings);
+ INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
i915_gem_init__mm(dev_priv);
@@ -5538,8 +5643,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
return 0;
-err_priorities:
- kmem_cache_destroy(dev_priv->priorities);
err_dependencies:
kmem_cache_destroy(dev_priv->dependencies);
err_requests:
@@ -5554,17 +5657,13 @@ err_out:
return err;
}
-void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
+void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
{
i915_gem_drain_freed_objects(dev_priv);
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.object_count);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
WARN_ON(!list_empty(&dev_priv->gt.timelines));
- mutex_unlock(&dev_priv->drm.struct_mutex);
kmem_cache_destroy(dev_priv->priorities);
kmem_cache_destroy(dev_priv->dependencies);
@@ -5659,6 +5758,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_engine = -1;
+ file_priv->hang_timestamp = jiffies;
ret = i915_gem_context_open(i915, file);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index f54c4ff74ded..525920404ede 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -27,7 +27,12 @@
#include <linux/bug.h>
+struct drm_i915_private;
+
#ifdef CONFIG_DRM_I915_DEBUG_GEM
+
+#define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
+
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
pr_err("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
@@ -43,6 +48,9 @@
#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
#else
+
+#define GEM_SHOW_DEBUG() (0)
+
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
@@ -53,10 +61,15 @@
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
+#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
#else
#define GEM_TRACE(...) do { } while (0)
+#define GEM_TRACE_DUMP() do { } while (0)
#endif
#define I915_NUM_ENGINES 8
+void i915_gem_park(struct drm_i915_private *i915);
+void i915_gem_unpark(struct drm_i915_private *i915);
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index d3cbe8432f48..f3890b664e3f 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -1,29 +1,11 @@
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2014-2018 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_gem_batch_pool.h"
+#include "i915_drv.h"
/**
* DOC: batch pool
@@ -41,11 +23,11 @@
/**
* i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @engine: the associated request submission engine
* @pool: the batch buffer pool
+ * @engine: the associated request submission engine
*/
-void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
- struct i915_gem_batch_pool *pool)
+void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
+ struct intel_engine_cs *engine)
{
int n;
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
index 10d5ac4c00d3..56947daaaf65 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -1,31 +1,13 @@
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2014-2018 Intel Corporation
*/
#ifndef I915_GEM_BATCH_POOL_H
#define I915_GEM_BATCH_POOL_H
-#include "i915_drv.h"
+#include <linux/types.h>
struct intel_engine_cs;
@@ -34,9 +16,8 @@ struct i915_gem_batch_pool {
struct list_head cache_list[4];
};
-/* i915_gem_batch_pool.c */
-void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
- struct i915_gem_batch_pool *pool);
+void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
+ struct intel_engine_cs *engine);
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
struct drm_i915_gem_object*
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f2cbea7cf940..060335d3d9e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -90,6 +90,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include "intel_workarounds.h"
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
@@ -116,15 +117,15 @@ static void lut_close(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- int i;
+ unsigned int n;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
i915_ppgtt_put(ctx->ppgtt);
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_context *ce = &ctx->engine[i];
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+ struct intel_context *ce = &ctx->__engine[n];
if (!ce->state)
continue;
@@ -280,7 +281,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
- ctx->priority = I915_PRIORITY_NORMAL;
+ ctx->sched.priority = I915_PRIORITY_NORMAL;
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
@@ -318,12 +319,13 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->desc_template =
default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
- /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
+ /*
+ * GuC requires the ring to be placed in Non-WOPCM memory. If GuC is not
* present or not in use we still need a small bias as ring wraparound
* at offset 0 sometimes hangs. No idea why.
*/
if (USES_GUC(dev_priv))
- ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
+ ctx->ggtt_offset_bias = dev_priv->guc.ggtt_pin_bias;
else
ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
@@ -429,7 +431,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
return ctx;
i915_gem_context_clear_bannable(ctx);
- ctx->priority = prio;
+ ctx->sched.priority = prio;
ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -458,11 +460,16 @@ static bool needs_preempt_context(struct drm_i915_private *i915)
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
+ int ret;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
+ ret = intel_ctx_workarounds_init(dev_priv);
+ if (ret)
+ return ret;
+
INIT_LIST_HEAD(&dev_priv->contexts.list);
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
init_llist_head(&dev_priv->contexts.free_list);
@@ -514,7 +521,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
if (!engine->last_retired_context)
continue;
- engine->context_unpin(engine, engine->last_retired_context);
+ intel_context_unpin(engine->last_retired_context, engine);
engine->last_retired_context = NULL;
}
}
@@ -570,19 +577,29 @@ void i915_gem_context_close(struct drm_file *file)
idr_destroy(&file_priv->context_idr);
}
-static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
+static struct i915_request *
+last_request_on_engine(struct i915_timeline *timeline,
+ struct intel_engine_cs *engine)
{
- struct i915_gem_timeline *timeline;
+ struct i915_request *rq;
- list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
- struct intel_timeline *tl;
+ if (timeline == &engine->timeline)
+ return NULL;
- if (timeline == &engine->i915->gt.global_timeline)
- continue;
+ rq = i915_gem_active_raw(&timeline->last_request,
+ &engine->i915->drm.struct_mutex);
+ if (rq && rq->engine == engine)
+ return rq;
+
+ return NULL;
+}
- tl = &timeline->engine[engine->id];
- if (i915_gem_active_peek(&tl->last_request,
- &engine->i915->drm.struct_mutex))
+static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
+{
+ struct i915_timeline *timeline;
+
+ list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
+ if (last_request_on_engine(timeline, engine))
return false;
}
@@ -592,7 +609,7 @@ static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- struct i915_gem_timeline *timeline;
+ struct i915_timeline *timeline;
enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -612,11 +629,8 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
struct i915_request *prev;
- struct intel_timeline *tl;
- tl = &timeline->engine[engine->id];
- prev = i915_gem_active_raw(&tl->last_request,
- &dev_priv->drm.struct_mutex);
+ prev = last_request_on_engine(timeline, engine);
if (prev)
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
@@ -638,7 +652,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
static bool client_is_banned(struct drm_i915_file_private *file_priv)
{
- return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
+ return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -746,7 +760,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_bannable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
- args->value = ctx->priority;
+ args->value = ctx->sched.priority;
break;
default:
ret = -EINVAL;
@@ -819,7 +833,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
!capable(CAP_SYS_NICE))
ret = -EPERM;
else
- ctx->priority = priority;
+ ctx->sched.priority = priority;
}
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 7854262ddfd9..ace3b129c189 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -137,18 +137,7 @@ struct i915_gem_context {
*/
u32 user_handle;
- /**
- * @priority: execution and service priority
- *
- * All clients are equal, but some are more equal than others!
- *
- * Requests from a context with a greater (more positive) value of
- * @priority will be executed before those with a lower @priority
- * value, forming a simple QoS.
- *
- * The &drm_i915_private.kernel_context is assigned the lowest priority.
- */
- int priority;
+ struct i915_sched_attr sched;
/** ggtt_offset_bias: placement restriction for context objects */
u32 ggtt_offset_bias;
@@ -160,7 +149,7 @@ struct i915_gem_context {
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
- } engine[I915_NUM_ENGINES];
+ } __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
u32 ring_size;
@@ -267,6 +256,34 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
return !ctx->file_priv;
}
+static inline struct intel_context *
+to_intel_context(struct i915_gem_context *ctx,
+ const struct intel_engine_cs *engine)
+{
+ return &ctx->__engine[engine->id];
+}
+
+static inline struct intel_ring *
+intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+ return engine->context_pin(engine, ctx);
+}
+
+static inline void __intel_context_pin(struct i915_gem_context *ctx,
+ const struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = to_intel_context(ctx, engine);
+
+ GEM_BUG_ON(!ce->pin_count);
+ ce->pin_count++;
+}
+
+static inline void intel_context_unpin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ engine->context_unpin(engine, ctx);
+}
+
/* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0414228cd2b5..22df17c8ca9b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -81,6 +81,35 @@ enum {
* but this remains just a hint as the kernel may choose a new location for
* any object in the future.
*
+ * At the level of talking to the hardware, submitting a batchbuffer for the
+ * GPU to execute is to add content to a buffer from which the HW
+ * command streamer is reading.
+ *
+ * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
+ * Execlists, this command is not placed on the same buffer as the
+ * remaining items.
+ *
+ * 2. Add a command to invalidate caches to the buffer.
+ *
+ * 3. Add a batchbuffer start command to the buffer; the start command is
+ * essentially a token together with the GPU address of the batchbuffer
+ * to be executed.
+ *
+ * 4. Add a pipeline flush to the buffer.
+ *
+ * 5. Add a memory write command to the buffer to record when the GPU
+ * is done executing the batchbuffer. The memory write writes the
+ * global sequence number of the request, ``i915_request::global_seqno``;
+ * the i915 driver uses the current value in the register to determine
+ * if the GPU has completed the batchbuffer.
+ *
+ * 6. Add a user interrupt command to the buffer. This command instructs
+ * the GPU to issue an interrupt when the command, pipeline flush and
+ * memory write are completed.
+ *
+ * 7. Inform the hardware of the additional commands added to the buffer
+ * (by updating the tail pointer).
+ *
* Processing an execbuf ioctl is conceptually split up into a few phases.
*
* 1. Validation - Ensure all the pointers, handles and flags are valid.
@@ -460,7 +489,9 @@ eb_validate_vma(struct i915_execbuffer *eb,
}
static int
-eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
+eb_add_vma(struct i915_execbuffer *eb,
+ unsigned int i, unsigned batch_idx,
+ struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
int err;
@@ -493,6 +524,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
eb->flags[i] = entry->flags;
vma->exec_flags = &eb->flags[i];
+ /*
+ * SNA is doing fancy tricks with compressing batch buffers, which leads
+ * to negative relocation deltas. Usually that works out ok since the
+ * relocate address is still positive, except when the batch is placed
+ * very low in the GTT. Ensure this doesn't happen.
+ *
+ * Note that actual hangs have only been observed on gen7, but for
+ * paranoia do it everywhere.
+ */
+ if (i == batch_idx) {
+ if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+ eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+ if (eb->reloc_cache.has_fence)
+ eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+
+ eb->batch = vma;
+ }
+
err = 0;
if (eb_pin_vma(eb, entry, vma)) {
if (entry->offset != vma->node.start) {
@@ -687,7 +736,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
struct drm_i915_gem_object *obj;
- unsigned int i;
+ unsigned int i, batch;
int err;
if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@@ -699,6 +748,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
+ batch = eb_batch_index(eb);
+
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -733,40 +784,24 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
/* transfer ref to ctx */
- vma->open_count++;
+ if (!vma->open_count++)
+ i915_vma_reopen(vma);
list_add(&lut->obj_link, &obj->lut_list);
list_add(&lut->ctx_link, &eb->ctx->handles_list);
lut->ctx = eb->ctx;
lut->handle = handle;
add_vma:
- err = eb_add_vma(eb, i, vma);
+ err = eb_add_vma(eb, i, batch, vma);
if (unlikely(err))
goto err_vma;
GEM_BUG_ON(vma != eb->vma[i]);
GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
+ eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
}
- /* take note of the batch buffer before we might reorder the lists */
- i = eb_batch_index(eb);
- eb->batch = eb->vma[i];
- GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
-
- /*
- * SNA is doing fancy tricks with compressing batch buffers, which leads
- * to negative relocation deltas. Usually that works out ok since the
- * relocate address is still positive, except when the batch is placed
- * very low in the GTT. Ensure this doesn't happen.
- *
- * Note that actual hangs have only been observed on gen7, but for
- * paranoia do it everywhere.
- */
- if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
- eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
- if (eb->reloc_cache.has_fence)
- eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
-
eb->args->flags |= __EXEC_VALIDATED;
return eb_reserve(eb);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 21d72f695adb..996ab2ad6c45 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -110,7 +110,8 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma);
static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
- /* Note that as an uncached mmio write, this should flush the
+ /*
+ * Note that as an uncached mmio write, this will flush the
* WCB of the writes into the GGTT before it triggers the invalidate.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
@@ -1161,6 +1162,27 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
kunmap_atomic(vaddr);
page_size = I915_GTT_PAGE_SIZE_64K;
+
+ /*
+ * We write all 4K page entries, even when using 64K
+ * pages. In order to verify that the HW isn't cheating
+ * by using the 4K PTE instead of the 64K PTE, we want
+ * to remove all the surplus entries. If the HW skipped
+ * the 64K PTE, it will read/write into the scratch page
+ * instead - which we detect as missing results during
+ * selftests.
+ */
+ if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
+ u16 i;
+
+ encode = pte_encode | vma->vm->scratch_page.daddr;
+ vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
+
+ for (i = 1; i < index; i += 16)
+ memset64(vaddr + i, encode, 15);
+
+ kunmap_atomic(vaddr);
+ }
}
vma->page_sizes.gtt |= page_size;
@@ -2111,8 +2133,6 @@ static void i915_address_space_init(struct i915_address_space *vm,
struct drm_i915_private *dev_priv,
const char *name)
{
- i915_gem_timeline_init(dev_priv, &vm->timeline, name);
-
drm_mm_init(&vm->mm, 0, vm->total);
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
@@ -2129,7 +2149,6 @@ static void i915_address_space_fini(struct i915_address_space *vm)
if (pagevec_count(&vm->free_pages))
vm_free_pages_release(vm, true);
- i915_gem_timeline_fini(&vm->timeline);
drm_mm_takedown(&vm->mm);
list_del(&vm->global_link);
}
@@ -2140,15 +2159,15 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
- /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
+ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
else if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+ else if (INTEL_GEN(dev_priv) >= 9)
+ I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
/*
* To support 64K PTEs we need to first enable the use of the
@@ -2222,6 +2241,12 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
void i915_ppgtt_close(struct i915_address_space *vm)
{
+ GEM_BUG_ON(vm->closed);
+ vm->closed = true;
+}
+
+static void ppgtt_destroy_vma(struct i915_address_space *vm)
+{
struct list_head *phases[] = {
&vm->active_list,
&vm->inactive_list,
@@ -2229,15 +2254,12 @@ void i915_ppgtt_close(struct i915_address_space *vm)
NULL,
}, **phase;
- GEM_BUG_ON(vm->closed);
vm->closed = true;
-
for (phase = phases; *phase; phase++) {
struct i915_vma *vma, *vn;
list_for_each_entry_safe(vma, vn, *phase, vm_link)
- if (!i915_vma_is_closed(vma))
- i915_vma_close(vma);
+ i915_vma_destroy(vma);
}
}
@@ -2248,7 +2270,8 @@ void i915_ppgtt_release(struct kref *kref)
trace_i915_ppgtt_release(&ppgtt->base);
- /* vmas should already be unbound and destroyed */
+ ppgtt_destroy_vma(&ppgtt->base);
+
GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
@@ -2417,11 +2440,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
for_each_sgt_dma(addr, sgt_iter, vma->pages)
gen8_set_pte(gtt_entries++, pte_encode | addr);
- wmb();
-
- /* This next bit makes the above posting read even more important. We
- * want to flush the TLBs only after we're certain all the PTE updates
- * have finished.
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
*/
ggtt->invalidate(vm->i915);
}
@@ -2459,11 +2480,10 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
dma_addr_t addr;
for_each_sgt_dma(addr, iter, vma->pages)
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
- wmb();
- /* This next bit makes the above posting read even more important. We
- * want to flush the TLBs only after we're certain all the PTE updates
- * have finished.
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
*/
ggtt->invalidate(vm->i915);
}
@@ -3325,14 +3345,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
- if (INTEL_GEN(dev_priv) >= 9) {
- size = gen8_get_total_gtt_size(snb_gmch_ctl);
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
size = chv_get_total_gtt_size(snb_gmch_ctl);
- } else {
+ else
size = gen8_get_total_gtt_size(snb_gmch_ctl);
- }
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
ggtt->base.cleanup = gen6_gmch_remove;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 6efc017e8bb3..aec4f73574f4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -38,10 +38,9 @@
#include <linux/mm.h>
#include <linux/pagevec.h>
-#include "i915_gem_timeline.h"
-
#include "i915_request.h"
#include "i915_selftest.h"
+#include "i915_timeline.h"
#define I915_GTT_PAGE_SIZE_4K BIT(12)
#define I915_GTT_PAGE_SIZE_64K BIT(16)
@@ -257,7 +256,6 @@ struct i915_pml4 {
struct i915_address_space {
struct drm_mm mm;
- struct i915_gem_timeline timeline;
struct drm_i915_private *i915;
struct device *dma;
/* Every address space belongs to a struct file - except for the global
@@ -344,6 +342,7 @@ struct i915_address_space {
void (*clear_pages)(struct i915_vma *vma);
I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
+ I915_SELFTEST_DECLARE(bool scrub_64K);
};
#define i915_is_ggtt(V) (!(V)->file)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 62aa67960bf4..ad949cc30928 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -51,6 +51,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
+ /* WaSkipStolenMemoryFirstPage:bdw+ */
+ if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
+ start = 4096;
+
mutex_lock(&dev_priv->mm.stolen_lock);
ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
size, alignment, 0,
@@ -121,8 +125,8 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
- DRM_DEBUG_KMS("GTT within stolen memory at %pR\n", &ggtt_res);
- DRM_DEBUG_KMS("Stolen memory adjusted to %pR\n", dsm);
+ DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
+ DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
}
}
@@ -174,18 +178,19 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
}
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
- resource_size_t *base, resource_size_t *size)
+ resource_size_t *base,
+ resource_size_t *size)
{
- uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
- CTG_STOLEN_RESERVED :
- ELK_STOLEN_RESERVED);
+ u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
+ CTG_STOLEN_RESERVED :
+ ELK_STOLEN_RESERVED);
resource_size_t stolen_top = dev_priv->dsm.end + 1;
- if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) {
- *base = 0;
- *size = 0;
+ DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
+ IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
+
+ if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
return;
- }
/*
* Whether ILK really reuses the ELK register for this is unclear.
@@ -193,30 +198,25 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
*/
WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
- *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
+ if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
+ return;
+ *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
- /* On these platforms, the register doesn't have a size field, so the
- * size is the distance between the base and the top of the stolen
- * memory. We also have the genuine case where base is zero and there's
- * nothing reserved. */
- if (*base == 0)
- *size = 0;
- else
- *size = stolen_top - *base;
+ *size = stolen_top - *base;
}
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
- resource_size_t *base, resource_size_t *size)
+ resource_size_t *base,
+ resource_size_t *size)
{
- uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+ DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
- if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
- *base = 0;
- *size = 0;
+ if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
- }
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
@@ -239,17 +239,44 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
- resource_size_t *base, resource_size_t *size)
+static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ resource_size_t *base,
+ resource_size_t *size)
{
- uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ resource_size_t stolen_top = dev_priv->dsm.end + 1;
+
+ DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
- if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
- *base = 0;
- *size = 0;
+ if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
+
+ switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
+ default:
+ MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+ case GEN7_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
}
+ /*
+ * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
+ * reserved location as (top - size).
+ */
+ *base = stolen_top - *size;
+}
+
+static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ resource_size_t *base,
+ resource_size_t *size)
+{
+ u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+ DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+
+ if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
+ return;
+
*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
@@ -266,15 +293,15 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
- resource_size_t *base, resource_size_t *size)
+ resource_size_t *base,
+ resource_size_t *size)
{
- uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
- *base = 0;
- *size = 0;
+ DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+
+ if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
- }
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
@@ -298,36 +325,28 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
- resource_size_t *base, resource_size_t *size)
+ resource_size_t *base,
+ resource_size_t *size)
{
- uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top;
+ u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ resource_size_t stolen_top = dev_priv->dsm.end + 1;
- if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
- *base = 0;
- *size = 0;
+ DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+
+ if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
- }
- stolen_top = dev_priv->dsm.end + 1;
+ if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
+ return;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
-
- /* On these platforms, the register doesn't have a size field, so the
- * size is the distance between the base and the top of the stolen
- * memory. We also have the genuine case where base is zero and there's
- * nothing reserved. */
- if (*base == 0)
- *size = 0;
- else
- *size = stolen_top - *base;
+ *size = stolen_top - *base;
}
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
- resource_size_t stolen_usable_start;
mutex_init(&dev_priv->mm.stolen_lock);
@@ -353,7 +372,7 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
stolen_top = dev_priv->dsm.end + 1;
- reserved_base = 0;
+ reserved_base = stolen_top;
reserved_size = 0;
switch (INTEL_GEN(dev_priv)) {
@@ -373,8 +392,12 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
&reserved_base, &reserved_size);
break;
case 7:
- gen7_get_stolen_reserved(dev_priv,
- &reserved_base, &reserved_size);
+ if (IS_VALLEYVIEW(dev_priv))
+ vlv_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
+ else
+ gen7_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
default:
if (IS_LP(dev_priv))
@@ -386,11 +409,16 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
break;
}
- /* It is possible for the reserved base to be zero, but the register
- * field for size doesn't have a zero option. */
- if (reserved_base == 0) {
- reserved_size = 0;
+ /*
+ * Our expectation is that the reserved space is at the top of the
+ * stolen region and *never* at the bottom. If we see !reserved_base,
+ * it likely means we failed to read the registers correctly.
+ */
+ if (!reserved_base) {
+ DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
+ &reserved_base, &reserved_size);
reserved_base = stolen_top;
+ reserved_size = 0;
}
dev_priv->dsm_reserved =
@@ -406,21 +434,15 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
- DRM_DEBUG_KMS("Memory reserved for graphics device: %lluK, usable: %lluK\n",
- (u64)resource_size(&dev_priv->dsm) >> 10,
- ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
-
- stolen_usable_start = 0;
- /* WaSkipStolenMemoryFirstPage:bdw+ */
- if (INTEL_GEN(dev_priv) >= 8)
- stolen_usable_start = 4096;
+ DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
+ (u64)resource_size(&dev_priv->dsm) >> 10,
+ ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
dev_priv->stolen_usable_size =
- resource_size(&dev_priv->dsm) - reserved_total - stolen_usable_start;
+ resource_size(&dev_priv->dsm) - reserved_total;
/* Basic memrange allocator for stolen space. */
- drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
- dev_priv->stolen_usable_size);
+ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
return 0;
}
@@ -580,8 +602,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
- &stolen_offset, &gtt_offset, &size);
+ DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
+ &stolen_offset, &gtt_offset, &size);
/* KISS and expect everything to be page-aligned */
if (WARN_ON(size == 0) ||
@@ -599,14 +621,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
mutex_unlock(&dev_priv->mm.stolen_lock);
if (ret) {
- DRM_DEBUG_KMS("failed to allocate stolen space\n");
+ DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
kfree(stolen);
return NULL;
}
obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj == NULL) {
- DRM_DEBUG_KMS("failed to allocate stolen object\n");
+ DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
return NULL;
@@ -635,7 +657,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
size, gtt_offset, obj->cache_level,
0);
if (ret) {
- DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+ DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
goto err_pages;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
deleted file mode 100644
index e9fd87604067..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_drv.h"
-#include "i915_syncmap.h"
-
-static void __intel_timeline_init(struct intel_timeline *tl,
- struct i915_gem_timeline *parent,
- u64 context,
- struct lock_class_key *lockclass,
- const char *lockname)
-{
- tl->fence_context = context;
- tl->common = parent;
- spin_lock_init(&tl->lock);
- lockdep_set_class_and_name(&tl->lock, lockclass, lockname);
- init_request_active(&tl->last_request, NULL);
- INIT_LIST_HEAD(&tl->requests);
- i915_syncmap_init(&tl->sync);
-}
-
-static void __intel_timeline_fini(struct intel_timeline *tl)
-{
- GEM_BUG_ON(!list_empty(&tl->requests));
-
- i915_syncmap_free(&tl->sync);
-}
-
-static int __i915_gem_timeline_init(struct drm_i915_private *i915,
- struct i915_gem_timeline *timeline,
- const char *name,
- struct lock_class_key *lockclass,
- const char *lockname)
-{
- unsigned int i;
- u64 fences;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- /*
- * Ideally we want a set of engines on a single leaf as we expect
- * to mostly be tracking synchronisation between engines. It is not
- * a huge issue if this is not the case, but we may want to mitigate
- * any page crossing penalties if they become an issue.
- */
- BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
-
- timeline->i915 = i915;
- timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
- if (!timeline->name)
- return -ENOMEM;
-
- list_add(&timeline->link, &i915->gt.timelines);
-
- /* Called during early_init before we know how many engines there are */
- fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
- for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
- __intel_timeline_init(&timeline->engine[i],
- timeline, fences++,
- lockclass, lockname);
-
- return 0;
-}
-
-int i915_gem_timeline_init(struct drm_i915_private *i915,
- struct i915_gem_timeline *timeline,
- const char *name)
-{
- static struct lock_class_key class;
-
- return __i915_gem_timeline_init(i915, timeline, name,
- &class, "&timeline->lock");
-}
-
-int i915_gem_timeline_init__global(struct drm_i915_private *i915)
-{
- static struct lock_class_key class;
-
- return __i915_gem_timeline_init(i915,
- &i915->gt.global_timeline,
- "[execution]",
- &class, "&global_timeline->lock");
-}
-
-/**
- * i915_gem_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void i915_gem_timelines_park(struct drm_i915_private *i915)
-{
- struct i915_gem_timeline *timeline;
- int i;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- list_for_each_entry(timeline, &i915->gt.timelines, link) {
- for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
- struct intel_timeline *tl = &timeline->engine[i];
-
- /*
- * All known fences are completed so we can scrap
- * the current sync point tracking and start afresh,
- * any attempt to wait upon a previous sync point
- * will be skipped as the fence was signaled.
- */
- i915_syncmap_free(&tl->sync);
- }
- }
-}
-
-void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
-{
- int i;
-
- lockdep_assert_held(&timeline->i915->drm.struct_mutex);
-
- for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
- __intel_timeline_fini(&timeline->engine[i]);
-
- list_del(&timeline->link);
- kfree(timeline->name);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_timeline.c"
-#include "selftests/i915_gem_timeline.c"
-#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f89ac7a8f95f..df234dc23274 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -32,6 +32,7 @@
#include <linux/zlib.h>
#include <drm/drm_print.h>
+#include "i915_gpu_error.h"
#include "i915_drv.h"
static inline const struct intel_engine_cs *
@@ -403,16 +404,17 @@ static const char *bannable(const struct drm_i915_error_context *ctx)
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
- const struct drm_i915_error_request *erq)
+ const struct drm_i915_error_request *erq,
+ const unsigned long epoch)
{
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
+ err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
prefix, erq->pid, erq->ban_score,
- erq->context, erq->seqno, erq->priority,
- jiffies_to_msecs(jiffies - erq->jiffies),
- erq->head, erq->tail);
+ erq->context, erq->seqno, erq->sched_attr.priority,
+ jiffies_to_msecs(erq->jiffies - epoch),
+ erq->start, erq->head, erq->tail);
}
static void error_print_context(struct drm_i915_error_state_buf *m,
@@ -421,12 +423,13 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
{
err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
- ctx->priority, ctx->ban_score, bannable(ctx),
+ ctx->sched_attr.priority, ctx->ban_score, bannable(ctx),
ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
- const struct drm_i915_error_engine *ee)
+ const struct drm_i915_error_engine *ee,
+ const unsigned long epoch)
{
int n;
@@ -496,14 +499,15 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
err_printf(m, " hangcheck action: %s\n",
hangcheck_action_to_str(ee->hangcheck_action));
- err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
+ err_printf(m, " hangcheck action timestamp: %dms (%lu%s)\n",
+ jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
ee->hangcheck_timestamp,
- jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
+ ee->hangcheck_timestamp == epoch ? "; epoch" : "");
err_printf(m, " engine reset count: %u\n", ee->reset_count);
for (n = 0; n < ee->num_ports; n++) {
err_printf(m, " ELSP[%d]:", n);
- error_print_request(m, " ", &ee->execlist[n]);
+ error_print_request(m, " ", &ee->execlist[n], epoch);
}
error_print_context(m, " Active context: ", &ee->context);
@@ -649,6 +653,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
ts = ktime_to_timespec64(error->uptime);
err_printf(m, "Uptime: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
+ err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ);
+ err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n",
+ error->capture,
+ jiffies_to_msecs(jiffies - error->capture),
+ jiffies_to_msecs(error->capture - error->epoch));
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
if (error->engine[i].hangcheck_stalled &&
@@ -709,7 +718,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
if (error->engine[i].engine_id != -1)
- error_print_engine(m, &error->engine[i]);
+ error_print_engine(m, &error->engine[i], error->epoch);
}
for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
@@ -768,7 +777,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
dev_priv->engine[i]->name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++)
- error_print_request(m, " ", &ee->requests[j]);
+ error_print_request(m, " ",
+ &ee->requests[j],
+ error->epoch);
}
if (IS_ERR(ee->waiters)) {
@@ -1277,10 +1288,11 @@ static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
- erq->priority = request->priotree.priority;
+ erq->sched_attr = request->sched.attr;
erq->ban_score = atomic_read(&request->ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
+ erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head;
erq->tail = request->tail;
@@ -1298,7 +1310,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->timeline->requests, link)
+ list_for_each_entry_from(request, &engine->timeline.requests, link)
count++;
if (!count)
return;
@@ -1311,7 +1323,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->timeline->requests, link) {
+ list_for_each_entry_from(request, &engine->timeline.requests, link) {
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
@@ -1371,7 +1383,7 @@ static void record_context(struct drm_i915_error_context *e,
e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id;
- e->priority = ctx->priority;
+ e->sched_attr = ctx->sched;
e->ban_score = atomic_read(&ctx->ban_score);
e->bannable = i915_gem_context_is_bannable(ctx);
e->guilty = atomic_read(&ctx->guilty_count);
@@ -1471,7 +1483,8 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->ctx =
i915_error_object_create(i915,
- request->ctx->engine[i].state);
+ to_intel_context(request->ctx,
+ engine)->state);
error->simulated |=
i915_gem_context_no_error_capture(request->ctx);
@@ -1734,6 +1747,22 @@ static void capture_params(struct i915_gpu_state *error)
#undef DUP
}
+static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
+{
+ unsigned long epoch = error->capture;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+ const struct drm_i915_error_engine *ee = &error->engine[i];
+
+ if (ee->hangcheck_stalled &&
+ time_before(ee->hangcheck_timestamp, epoch))
+ epoch = ee->hangcheck_timestamp;
+ }
+
+ return epoch;
+}
+
static int capture(void *data)
{
struct i915_gpu_state *error = data;
@@ -1742,6 +1771,7 @@ static int capture(void *data)
error->boottime = ktime_get_boottime();
error->uptime = ktime_sub(ktime_get(),
error->i915->gt.last_init_time);
+ error->capture = jiffies;
capture_params(error);
capture_gen_state(error);
@@ -1755,6 +1785,8 @@ static int capture(void *data)
error->overlay = intel_overlay_capture_error_state(error->i915);
error->display = intel_display_capture_error_state(error->i915);
+ error->epoch = capture_find_epoch(error);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
new file mode 100644
index 000000000000..dac0f8c4c1cf
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -0,0 +1,366 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright � 2008-2018 Intel Corporation
+ */
+
+#ifndef _I915_GPU_ERROR_H_
+#define _I915_GPU_ERROR_H_
+
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
+
+#include <drm/drm_mm.h>
+
+#include "intel_device_info.h"
+#include "intel_ringbuffer.h"
+#include "intel_uc_fw.h"
+
+#include "i915_gem.h"
+#include "i915_gem_gtt.h"
+#include "i915_params.h"
+#include "i915_scheduler.h"
+
+struct drm_i915_private;
+struct intel_overlay_error_state;
+struct intel_display_error_state;
+
+struct i915_gpu_state {
+ struct kref ref;
+ ktime_t time;
+ ktime_t boottime;
+ ktime_t uptime;
+ unsigned long capture;
+ unsigned long epoch;
+
+ struct drm_i915_private *i915;
+
+ char error_msg[128];
+ bool simulated;
+ bool awake;
+ bool wakelock;
+ bool suspended;
+ int iommu;
+ u32 reset_count;
+ u32 suspend_count;
+ struct intel_device_info device_info;
+ struct intel_driver_caps driver_caps;
+ struct i915_params params;
+
+ struct i915_error_uc {
+ struct intel_uc_fw guc_fw;
+ struct intel_uc_fw huc_fw;
+ struct drm_i915_error_object *guc_log;
+ } uc;
+
+ /* Generic register state */
+ u32 eir;
+ u32 pgtbl_er;
+ u32 ier;
+ u32 gtier[4], ngtier;
+ u32 ccid;
+ u32 derrmr;
+ u32 forcewake;
+ u32 error; /* gen6+ */
+ u32 err_int; /* gen7 */
+ u32 fault_data0; /* gen8, gen9 */
+ u32 fault_data1; /* gen8, gen9 */
+ u32 done_reg;
+ u32 gac_eco;
+ u32 gam_ecochk;
+ u32 gab_ctl;
+ u32 gfx_mode;
+
+ u32 nfence;
+ u64 fence[I915_MAX_NUM_FENCES];
+ struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
+
+ struct drm_i915_error_engine {
+ int engine_id;
+ /* Software tracked state */
+ bool idle;
+ bool waiting;
+ int num_waiters;
+ unsigned long hangcheck_timestamp;
+ bool hangcheck_stalled;
+ enum intel_engine_hangcheck_action hangcheck_action;
+ struct i915_address_space *vm;
+ int num_requests;
+ u32 reset_count;
+
+ /* position of active request inside the ring */
+ u32 rq_head, rq_post, rq_tail;
+
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head;
+ u32 cpu_ring_tail;
+
+ u32 last_seqno;
+
+ /* Register state */
+ u32 start;
+ u32 tail;
+ u32 head;
+ u32 ctl;
+ u32 mode;
+ u32 hws;
+ u32 ipeir;
+ u32 ipehr;
+ u32 bbstate;
+ u32 instpm;
+ u32 instps;
+ u32 seqno;
+ u64 bbaddr;
+ u64 acthd;
+ u32 fault_reg;
+ u64 faddr;
+ u32 rc_psmi; /* sleep state */
+ u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+ struct intel_instdone instdone;
+
+ struct drm_i915_error_context {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ u32 handle;
+ u32 hw_id;
+ int ban_score;
+ int active;
+ int guilty;
+ bool bannable;
+ struct i915_sched_attr sched_attr;
+ } context;
+
+ struct drm_i915_error_object {
+ u64 gtt_offset;
+ u64 gtt_size;
+ int page_count;
+ int unused;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+
+ struct drm_i915_error_object **user_bo;
+ long user_bo_count;
+
+ struct drm_i915_error_object *wa_ctx;
+ struct drm_i915_error_object *default_state;
+
+ struct drm_i915_error_request {
+ long jiffies;
+ pid_t pid;
+ u32 context;
+ int ban_score;
+ u32 seqno;
+ u32 start;
+ u32 head;
+ u32 tail;
+ struct i915_sched_attr sched_attr;
+ } *requests, execlist[EXECLIST_MAX_PORTS];
+ unsigned int num_ports;
+
+ struct drm_i915_error_waiter {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ u32 seqno;
+ } *waiters;
+
+ struct {
+ u32 gfx_mode;
+ union {
+ u64 pdp[4];
+ u32 pp_dir_base;
+ };
+ } vm_info;
+ } engine[I915_NUM_ENGINES];
+
+ struct drm_i915_error_buffer {
+ u32 size;
+ u32 name;
+ u32 rseqno[I915_NUM_ENGINES], wseqno;
+ u64 gtt_offset;
+ u32 read_domains;
+ u32 write_domain;
+ s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+ u32 tiling:2;
+ u32 dirty:1;
+ u32 purgeable:1;
+ u32 userptr:1;
+ s32 engine:4;
+ u32 cache_level:3;
+ } *active_bo[I915_NUM_ENGINES], *pinned_bo;
+ u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
+ struct i915_address_space *active_vm[I915_NUM_ENGINES];
+};
+
+struct i915_gpu_error {
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+
+ struct delayed_work hangcheck_work;
+
+ /* For reset and error_state handling. */
+ spinlock_t lock;
+ /* Protected by the above dev->gpu_error.lock. */
+ struct i915_gpu_state *first_error;
+
+ atomic_t pending_fb_pin;
+
+ unsigned long missed_irq_rings;
+
+ /**
+ * State variable controlling the reset flow and count
+ *
+ * This is a counter which gets incremented when reset is triggered,
+ *
+ * Before the reset commences, the I915_RESET_BACKOFF bit is set
+ * meaning that any waiters holding onto the struct_mutex should
+ * relinquish the lock immediately in order for the reset to start.
+ *
+ * If reset is not completed successfully, the I915_WEDGE bit is
+ * set meaning that hardware is terminally sour and there is no
+ * recovery. All waiters on the reset_queue will be woken when
+ * that happens.
+ *
+ * This counter is used by the wait_seqno code to notice that reset
+ * event happened and it needs to restart the entire ioctl (since most
+ * likely the seqno it waited for won't ever signal anytime soon).
+ *
+ * This is important for lock-free wait paths, where no contended lock
+ * naturally enforces the correct ordering between the bail-out of the
+ * waiter and the gpu reset work code.
+ */
+ unsigned long reset_count;
+
+ /**
+ * flags: Control various stages of the GPU reset
+ *
+ * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
+ * other users acquiring the struct_mutex. To do this we set the
+ * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
+ * and then check for that bit before acquiring the struct_mutex (in
+ * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
+ * secondary role in preventing two concurrent global reset attempts.
+ *
+ * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
+ * struct_mutex. We try to acquire the struct_mutex in the reset worker,
+ * but it may be held by some long running waiter (that we cannot
+ * interrupt without causing trouble). Once we are ready to do the GPU
+ * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
+ * they already hold the struct_mutex and want to participate they can
+ * inspect the bit and do the reset directly, otherwise the worker
+ * waits for the struct_mutex.
+ *
+ * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
+ * acquire the struct_mutex to reset an engine, we need an explicit
+ * flag to prevent two concurrent reset attempts in the same engine.
+ * As the number of engines continues to grow, allocate the flags from
+ * the most significant bits.
+ *
+ * #I915_WEDGED - If reset fails and we can no longer use the GPU,
+ * we set the #I915_WEDGED bit. Prior to command submission, e.g.
+ * i915_request_alloc(), this bit is checked and the sequence
+ * aborted (with -EIO reported to userspace) if set.
+ */
+ unsigned long flags;
+#define I915_RESET_BACKOFF 0
+#define I915_RESET_HANDOFF 1
+#define I915_RESET_MODESET 2
+#define I915_WEDGED (BITS_PER_LONG - 1)
+#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
+
+ /** Number of times an engine has been reset */
+ u32 reset_engine_count[I915_NUM_ENGINES];
+
+ /** Set of stalled engines with guilty requests, in the current reset */
+ u32 stalled_mask;
+
+ /** Reason for the current *global* reset */
+ const char *reason;
+
+ /**
+ * Waitqueue to signal when a hang is detected. Used to for waiters
+ * to release the struct_mutex for the reset to procede.
+ */
+ wait_queue_head_t wait_queue;
+
+ /**
+ * Waitqueue to signal when the reset has completed. Used by clients
+ * that wait for dev_priv->mm.wedged to settle.
+ */
+ wait_queue_head_t reset_queue;
+
+ /* For missed irq/seqno simulation. */
+ unsigned long test_irq_rings;
+};
+
+struct drm_i915_error_state_buf {
+ struct drm_i915_private *i915;
+ unsigned int bytes;
+ unsigned int size;
+ int err;
+ u8 *buf;
+ loff_t start;
+ loff_t pos;
+};
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+__printf(2, 3)
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
+ const struct i915_gpu_state *gpu);
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+ struct drm_i915_private *i915,
+ size_t count, loff_t pos);
+
+static inline void
+i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
+{
+ kfree(eb->buf);
+}
+
+struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
+ const char *error_msg);
+
+static inline struct i915_gpu_state *
+i915_gpu_state_get(struct i915_gpu_state *gpu)
+{
+ kref_get(&gpu->ref);
+ return gpu;
+}
+
+void __i915_gpu_state_free(struct kref *kref);
+static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
+{
+ if (gpu)
+ kref_put(&gpu->ref, __i915_gpu_state_free);
+}
+
+struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
+void i915_reset_error_state(struct drm_i915_private *i915);
+
+#else
+
+static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
+ const char *error_msg)
+{
+}
+
+static inline struct i915_gpu_state *
+i915_first_error_state(struct drm_i915_private *i915)
+{
+ return NULL;
+}
+
+static inline void i915_reset_error_state(struct drm_i915_private *i915)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
+
+#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 633c18785c1e..4a02747ac658 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -243,6 +243,41 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
}
+static u32
+gen11_gt_engine_identity(struct drm_i915_private * const i915,
+ const unsigned int bank, const unsigned int bit);
+
+bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+ const unsigned int bank,
+ const unsigned int bit)
+{
+ void __iomem * const regs = i915->regs;
+ u32 dw;
+
+ lockdep_assert_held(&i915->irq_lock);
+
+ dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+ if (dw & BIT(bit)) {
+ /*
+ * According to the BSpec, DW_IIR bits cannot be cleared without
+ * first servicing the Selector & Shared IIR registers.
+ */
+ gen11_gt_engine_identity(i915, bank, bit);
+
+ /*
+ * We locked GT INT DW by reading it. If we want to (try
+ * to) recover from this succesfully, we need to clear
+ * our bit, otherwise we are locking the register for
+ * everybody.
+ */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
+
+ return true;
+ }
+
+ return false;
+}
+
/**
* ilk_update_display_irq - update DEIMR
* @dev_priv: driver private
@@ -308,17 +343,29 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
{
+ WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
+
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
{
- return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
+ if (INTEL_GEN(dev_priv) >= 11)
+ return GEN11_GPM_WGBOXPERF_INTR_MASK;
+ else if (INTEL_GEN(dev_priv) >= 8)
+ return GEN8_GT_IMR(2);
+ else
+ return GEN6_PMIMR;
}
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
{
- return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
+ if (INTEL_GEN(dev_priv) >= 11)
+ return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+ else if (INTEL_GEN(dev_priv) >= 8)
+ return GEN8_GT_IER(2);
+ else
+ return GEN6_PMIER;
}
/**
@@ -400,6 +447,18 @@ static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_m
/* though a barrier is missing here, but don't really need a one */
}
+void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
+ ;
+
+ dev_priv->gt_pm.rps.pm_iir = 0;
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
@@ -415,12 +474,14 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
if (READ_ONCE(rps->interrupts_enabled))
return;
- if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
- return;
-
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON_ONCE(rps->pm_iir);
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
+ else
+ WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+
rps->interrupts_enabled = true;
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
@@ -434,9 +495,6 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
if (!READ_ONCE(rps->interrupts_enabled))
return;
- if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
- return;
-
spin_lock_irq(&dev_priv->irq_lock);
rps->interrupts_enabled = false;
@@ -453,7 +511,10 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
* state of the worker can be discarded.
*/
cancel_work_sync(&rps->work);
- gen6_reset_rps_interrupts(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 11)
+ gen11_reset_rps_interrupts(dev_priv);
+ else
+ gen6_reset_rps_interrupts(dev_priv);
}
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
@@ -1399,19 +1460,18 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
}
static void
-gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
+gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
bool tasklet = false;
- if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
- if (READ_ONCE(engine->execlists.active)) {
- __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- tasklet = true;
- }
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
+ if (READ_ONCE(engine->execlists.active))
+ tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
+ &engine->irq_posted);
}
- if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
+ if (iir & GT_RENDER_USER_INTERRUPT) {
notify_ring(engine);
tasklet |= USES_GUC_SUBMISSION(engine->i915);
}
@@ -1466,21 +1526,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
{
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
gen8_cs_irq_handler(i915->engine[RCS],
- gt_iir[0], GEN8_RCS_IRQ_SHIFT);
+ gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
gen8_cs_irq_handler(i915->engine[BCS],
- gt_iir[0], GEN8_BCS_IRQ_SHIFT);
+ gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
}
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
gen8_cs_irq_handler(i915->engine[VCS],
- gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
+ gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
gen8_cs_irq_handler(i915->engine[VCS2],
- gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
+ gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
gen8_cs_irq_handler(i915->engine[VECS],
- gt_iir[3], GEN8_VECS_IRQ_SHIFT);
+ gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
@@ -1627,7 +1687,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
int head, tail;
spin_lock(&pipe_crc->lock);
- if (pipe_crc->source) {
+ if (pipe_crc->source && !crtc->base.crc.opened) {
if (!pipe_crc->entries) {
spin_unlock(&pipe_crc->lock);
DRM_DEBUG_KMS("spurious interrupt\n");
@@ -1667,7 +1727,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* On GEN8+ sometimes the second CRC is bonkers as well, so
* don't trust that one either.
*/
- if (pipe_crc->skipped == 0 ||
+ if (pipe_crc->skipped <= 0 ||
(INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
@@ -1766,37 +1826,8 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
{
- if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
- /* Sample the log buffer flush related bits & clear them out now
- * itself from the message identity register to minimize the
- * probability of losing a flush interrupt, when there are back
- * to back flush interrupts.
- * There can be a new flush interrupt, for different log buffer
- * type (like for ISR), whilst Host is handling one (for DPC).
- * Since same bit is used in message register for ISR & DPC, it
- * could happen that GuC sets the bit for 2nd interrupt but Host
- * clears out the bit on handling the 1st interrupt.
- */
- u32 msg, flush;
-
- msg = I915_READ(SOFT_SCRATCH(15));
- flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
- INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
- if (flush) {
- /* Clear the message bits that are handled */
- I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
-
- /* Handle flush interrupt in bottom half */
- queue_work(dev_priv->guc.log.runtime.flush_wq,
- &dev_priv->guc.log.runtime.flush_work);
-
- dev_priv->guc.log.flush_interrupt_count++;
- } else {
- /* Not clearing of unhandled event bits won't result in
- * re-triggering of the interrupt.
- */
- }
- }
+ if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
+ intel_guc_to_host_event_handler(&dev_priv->guc);
}
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
@@ -1862,9 +1893,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
/*
* Clear the PIPE*STAT regs before the IIR
+ *
+ * Toggle the enable bits to make sure we get an
+ * edge in the ISR pipe event bit if we don't clear
+ * all the enabled status bits. Otherwise the edge
+ * triggered IIR on i965/g4x wouldn't notice that
+ * an interrupt is still pending.
*/
- if (pipe_stats[pipe])
- I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
+ if (pipe_stats[pipe]) {
+ I915_WRITE(reg, pipe_stats[pipe]);
+ I915_WRITE(reg, enable_mask);
+ }
}
spin_unlock(&dev_priv->irq_lock);
}
@@ -2433,6 +2472,13 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev_priv);
+ if (de_iir & DE_EDP_PSR_INT_HSW) {
+ u32 psr_iir = I915_READ(EDP_PSR_IIR);
+
+ intel_psr_irq_handler(dev_priv, psr_iir);
+ I915_WRITE(EDP_PSR_IIR, psr_iir);
+ }
+
if (de_iir & DE_AUX_CHANNEL_A_IVB)
dp_aux_irq_handler(dev_priv);
@@ -2562,11 +2608,25 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = I915_READ(GEN8_DE_MISC_IIR);
if (iir) {
+ bool found = false;
+
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
- if (iir & GEN8_DE_MISC_GSE)
+
+ if (iir & GEN8_DE_MISC_GSE) {
intel_opregion_asle_intr(dev_priv);
- else
+ found = true;
+ }
+
+ if (iir & GEN8_DE_EDP_PSR) {
+ u32 psr_iir = I915_READ(EDP_PSR_IIR);
+
+ intel_psr_irq_handler(dev_priv, psr_iir);
+ I915_WRITE(EDP_PSR_IIR, psr_iir);
+ found = true;
+ }
+
+ if (!found)
DRM_ERROR("Unexpected DE Misc interrupt\n");
}
else
@@ -2762,58 +2822,16 @@ static void __fini_wedge(struct wedge_me *w)
(W)->i915; \
__fini_wedge((W)))
-static __always_inline void
-gen11_cs_irq_handler(struct intel_engine_cs * const engine, const u32 iir)
-{
- gen8_cs_irq_handler(engine, iir, 0);
-}
-
-static void
-gen11_gt_engine_irq_handler(struct drm_i915_private * const i915,
- const unsigned int bank,
- const unsigned int engine_n,
- const u16 iir)
-{
- struct intel_engine_cs ** const engine = i915->engine;
-
- switch (bank) {
- case 0:
- switch (engine_n) {
-
- case GEN11_RCS0:
- return gen11_cs_irq_handler(engine[RCS], iir);
-
- case GEN11_BCS:
- return gen11_cs_irq_handler(engine[BCS], iir);
- }
- case 1:
- switch (engine_n) {
-
- case GEN11_VCS(0):
- return gen11_cs_irq_handler(engine[_VCS(0)], iir);
- case GEN11_VCS(1):
- return gen11_cs_irq_handler(engine[_VCS(1)], iir);
- case GEN11_VCS(2):
- return gen11_cs_irq_handler(engine[_VCS(2)], iir);
- case GEN11_VCS(3):
- return gen11_cs_irq_handler(engine[_VCS(3)], iir);
-
- case GEN11_VECS(0):
- return gen11_cs_irq_handler(engine[_VECS(0)], iir);
- case GEN11_VECS(1):
- return gen11_cs_irq_handler(engine[_VECS(1)], iir);
- }
- }
-}
-
static u32
-gen11_gt_engine_intr(struct drm_i915_private * const i915,
- const unsigned int bank, const unsigned int bit)
+gen11_gt_engine_identity(struct drm_i915_private * const i915,
+ const unsigned int bank, const unsigned int bit)
{
void __iomem * const regs = i915->regs;
u32 timeout_ts;
u32 ident;
+ lockdep_assert_held(&i915->irq_lock);
+
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
/*
@@ -2835,42 +2853,101 @@ gen11_gt_engine_intr(struct drm_i915_private * const i915,
raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
GEN11_INTR_DATA_VALID);
- return ident & GEN11_INTR_ENGINE_MASK;
+ return ident;
}
static void
-gen11_gt_irq_handler(struct drm_i915_private * const i915,
- const u32 master_ctl)
+gen11_other_irq_handler(struct drm_i915_private * const i915,
+ const u8 instance, const u16 iir)
+{
+ if (instance == OTHER_GTPM_INSTANCE)
+ return gen6_rps_irq_handler(i915, iir);
+
+ WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
+ instance, iir);
+}
+
+static void
+gen11_engine_irq_handler(struct drm_i915_private * const i915,
+ const u8 class, const u8 instance, const u16 iir)
+{
+ struct intel_engine_cs *engine;
+
+ if (instance <= MAX_ENGINE_INSTANCE)
+ engine = i915->engine_class[class][instance];
+ else
+ engine = NULL;
+
+ if (likely(engine))
+ return gen8_cs_irq_handler(engine, iir);
+
+ WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
+ class, instance);
+}
+
+static void
+gen11_gt_identity_handler(struct drm_i915_private * const i915,
+ const u32 identity)
+{
+ const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
+ const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
+ const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
+
+ if (unlikely(!intr))
+ return;
+
+ if (class <= COPY_ENGINE_CLASS)
+ return gen11_engine_irq_handler(i915, class, instance, intr);
+
+ if (class == OTHER_CLASS)
+ return gen11_other_irq_handler(i915, instance, intr);
+
+ WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
+ class, instance, intr);
+}
+
+static void
+gen11_gt_bank_handler(struct drm_i915_private * const i915,
+ const unsigned int bank)
{
void __iomem * const regs = i915->regs;
- unsigned int bank;
+ unsigned long intr_dw;
+ unsigned int bit;
- for (bank = 0; bank < 2; bank++) {
- unsigned long intr_dw;
- unsigned int bit;
+ lockdep_assert_held(&i915->irq_lock);
- if (!(master_ctl & GEN11_GT_DW_IRQ(bank)))
- continue;
+ intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
- intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+ if (unlikely(!intr_dw)) {
+ DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
+ return;
+ }
- if (unlikely(!intr_dw)) {
- DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
- continue;
- }
+ for_each_set_bit(bit, &intr_dw, 32) {
+ const u32 ident = gen11_gt_engine_identity(i915,
+ bank, bit);
- for_each_set_bit(bit, &intr_dw, 32) {
- const u16 iir = gen11_gt_engine_intr(i915, bank, bit);
+ gen11_gt_identity_handler(i915, ident);
+ }
- if (unlikely(!iir))
- continue;
+ /* Clear must be after shared has been served for engine */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+}
- gen11_gt_engine_irq_handler(i915, bank, bit, iir);
- }
+static void
+gen11_gt_irq_handler(struct drm_i915_private * const i915,
+ const u32 master_ctl)
+{
+ unsigned int bank;
- /* Clear must be after shared has been served for engine */
- raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+ spin_lock(&i915->irq_lock);
+
+ for (bank = 0; bank < 2; bank++) {
+ if (master_ctl & GEN11_GT_DW_IRQ(bank))
+ gen11_gt_bank_handler(i915, bank);
}
+
+ spin_unlock(&i915->irq_lock);
}
static irqreturn_t gen11_irq_handler(int irq, void *arg)
@@ -2912,15 +2989,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-/**
- * i915_reset_device - do process context error handling work
- * @dev_priv: i915 device private
- *
- * Fire an error uevent so userspace can see that a hang or error
- * was detected.
- */
-static void i915_reset_device(struct drm_i915_private *dev_priv)
+static void i915_reset_device(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
+ const char *reason)
{
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
@@ -2936,29 +3009,35 @@ static void i915_reset_device(struct drm_i915_private *dev_priv)
i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
intel_prepare_reset(dev_priv);
+ error->reason = reason;
+ error->stalled_mask = engine_mask;
+
/* Signal that locked waiters should reset the GPU */
- set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
- wake_up_all(&dev_priv->gpu_error.wait_queue);
+ smp_mb__before_atomic();
+ set_bit(I915_RESET_HANDOFF, &error->flags);
+ wake_up_all(&error->wait_queue);
/* Wait for anyone holding the lock to wakeup, without
* blocking indefinitely on struct_mutex.
*/
do {
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
- i915_reset(dev_priv, 0);
+ i915_reset(dev_priv, engine_mask, reason);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
- } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
+ } while (wait_on_bit_timeout(&error->flags,
I915_RESET_HANDOFF,
TASK_UNINTERRUPTIBLE,
1));
+ error->stalled_mask = 0;
+ error->reason = NULL;
+
intel_finish_reset(dev_priv);
}
- if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
- kobject_uevent_env(kobj,
- KOBJ_CHANGE, reset_done_event);
+ if (!test_bit(I915_WEDGED, &error->flags))
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
@@ -2990,6 +3069,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
* i915_handle_error - handle a gpu error
* @dev_priv: i915 device private
* @engine_mask: mask representing engines that are hung
+ * @flags: control flags
* @fmt: Error message format string
*
* Do some basic checking of register state at error time and
@@ -3000,16 +3080,23 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
*/
void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,
+ unsigned long flags,
const char *fmt, ...)
{
struct intel_engine_cs *engine;
unsigned int tmp;
- va_list args;
char error_msg[80];
+ char *msg = NULL;
- va_start(args, fmt);
- vscnprintf(error_msg, sizeof(error_msg), fmt, args);
- va_end(args);
+ if (fmt) {
+ va_list args;
+
+ va_start(args, fmt);
+ vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+ va_end(args);
+
+ msg = error_msg;
+ }
/*
* In most cases it's guaranteed that we get here with an RPM
@@ -3020,8 +3107,12 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
*/
intel_runtime_pm_get(dev_priv);
- i915_capture_error_state(dev_priv, engine_mask, error_msg);
- i915_clear_error_registers(dev_priv);
+ engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
+
+ if (flags & I915_ERROR_CAPTURE) {
+ i915_capture_error_state(dev_priv, engine_mask, msg);
+ i915_clear_error_registers(dev_priv);
+ }
/*
* Try engine reset when available. We fall back to full reset if
@@ -3034,7 +3125,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
&dev_priv->gpu_error.flags))
continue;
- if (i915_reset_engine(engine, 0) == 0)
+ if (i915_reset_engine(engine, msg) == 0)
engine_mask &= ~intel_engine_flag(engine);
clear_bit(I915_RESET_ENGINE + engine->id,
@@ -3064,7 +3155,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
TASK_UNINTERRUPTIBLE);
}
- i915_reset_device(dev_priv);
+ i915_reset_device(dev_priv, engine_mask, msg);
for_each_engine(engine, dev_priv, tmp) {
clear_bit(I915_RESET_ENGINE + engine->id,
@@ -3286,6 +3377,11 @@ static void ironlake_irq_reset(struct drm_device *dev)
if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
+ if (IS_HASWELL(dev_priv)) {
+ I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+ I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ }
+
gen5_gt_irq_reset(dev_priv);
ibx_irq_reset(dev_priv);
@@ -3324,6 +3420,9 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
+ I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+ I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
@@ -3349,6 +3448,9 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
+
+ I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
}
static void gen11_irq_reset(struct drm_device *dev)
@@ -3697,6 +3799,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
DE_DP_A_HOTPLUG);
}
+ if (IS_HASWELL(dev_priv)) {
+ gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+ display_mask |= DE_EDP_PSR_INT_HSW;
+ }
+
dev_priv->irq_mask = ~display_mask;
ibx_irq_pre_postinstall(dev);
@@ -3807,7 +3915,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
- u32 de_misc_masked = GEN8_DE_MISC_GSE;
+ u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR;
enum pipe pipe;
if (INTEL_GEN(dev_priv) >= 9) {
@@ -3832,6 +3940,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
+ gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+
for_each_pipe(dev_priv, pipe) {
dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
@@ -3887,7 +3998,14 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
- dev_priv->pm_imr = 0xffffffff; /* TODO */
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled.
+ */
+ dev_priv->pm_ier = 0x0;
+ dev_priv->pm_imr = ~dev_priv->pm_ier;
+ I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
}
static int gen11_irq_postinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c
new file mode 100644
index 000000000000..a5667926e3de
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_icl.c
@@ -0,0 +1,118 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_icl.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2770), 0x00000004 },
+ { _MMIO(0x2774), 0x0000ffff },
+ { _MMIO(0x2778), 0x00000003 },
+ { _MMIO(0x277c), 0x0000ffff },
+ { _MMIO(0x2780), 0x00000007 },
+ { _MMIO(0x2784), 0x0000ffff },
+ { _MMIO(0x2788), 0x00100002 },
+ { _MMIO(0x278c), 0x0000fff7 },
+ { _MMIO(0x2790), 0x00100002 },
+ { _MMIO(0x2794), 0x0000ffcf },
+ { _MMIO(0x2798), 0x00100082 },
+ { _MMIO(0x279c), 0x0000ffef },
+ { _MMIO(0x27a0), 0x001000c2 },
+ { _MMIO(0x27a4), 0x0000ffe7 },
+ { _MMIO(0x27a8), 0x00100001 },
+ { _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0xd04), 0x00000200 },
+ { _MMIO(0x9840), 0x00000000 },
+ { _MMIO(0x9884), 0x00000000 },
+ { _MMIO(0x9888), 0x10060000 },
+ { _MMIO(0x9888), 0x22060000 },
+ { _MMIO(0x9888), 0x16060000 },
+ { _MMIO(0x9888), 0x24060000 },
+ { _MMIO(0x9888), 0x18060000 },
+ { _MMIO(0x9888), 0x1a060000 },
+ { _MMIO(0x9888), 0x12060000 },
+ { _MMIO(0x9888), 0x14060000 },
+ { _MMIO(0x9888), 0x10060000 },
+ { _MMIO(0x9888), 0x22060000 },
+ { _MMIO(0x9884), 0x00000003 },
+ { _MMIO(0x9888), 0x16130000 },
+ { _MMIO(0x9888), 0x24000001 },
+ { _MMIO(0x9888), 0x0e130056 },
+ { _MMIO(0x9888), 0x10130000 },
+ { _MMIO(0x9888), 0x1a130000 },
+ { _MMIO(0x9888), 0x541f0001 },
+ { _MMIO(0x9888), 0x181f0000 },
+ { _MMIO(0x9888), 0x4c1f0000 },
+ { _MMIO(0x9888), 0x301f0000 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv)
+{
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
+ "a291665e-244b-4b76-9b9a-01de9d3c8068",
+ sizeof(dev_priv->perf.oa.test_config.uuid));
+ dev_priv->perf.oa.test_config.id = 1;
+
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
new file mode 100644
index 000000000000..ae1c24aafe4f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_icl.h
@@ -0,0 +1,34 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_ICL_H__
+#define __I915_OA_ICL_H__
+
+extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 08108ce5be21..66ea3552c63e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -164,6 +164,9 @@ i915_param_named_unsafe(guc_firmware_path, charp, 0400,
i915_param_named_unsafe(huc_firmware_path, charp, 0400,
"HuC firmware path to use instead of the default one");
+i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
+ "DMC firmware path to use instead of the default one");
+
i915_param_named_unsafe(enable_dp_mst, bool, 0600,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 430f5f9d0ff4..6684025b7af8 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -48,9 +48,10 @@ struct drm_printer;
param(int, enable_ips, 1) \
param(int, invert_brightness, 0) \
param(int, enable_guc, 0) \
- param(int, guc_log_level, 0) \
+ param(int, guc_log_level, -1) \
param(char *, guc_firmware_path, NULL) \
param(char *, huc_firmware_path, NULL) \
+ param(char *, dmc_firmware_path, NULL) \
param(int, mmio_debug, 0) \
param(int, edp_vswing, 0) \
param(int, reset, 2) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 062e91b39085..4364922e935d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -602,6 +602,7 @@ static const struct intel_device_info intel_icelake_11_info = {
PLATFORM(INTEL_ICELAKE),
.is_alpha_support = 1,
.has_resource_streamer = 0,
+ .ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING,
};
#undef GEN
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index abaca6edeb71..019bd2d073ad 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -209,6 +209,7 @@
#include "i915_oa_cflgt2.h"
#include "i915_oa_cflgt3.h"
#include "i915_oa_cnl.h"
+#include "i915_oa_icl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -1042,7 +1043,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
I915_WRITE(GEN7_OASTATUS2,
((head & GEN7_OASTATUS2_HEAD_MASK) |
- OA_MEM_SELECT_GGTT));
+ GEN7_OASTATUS2_MEM_SELECT_GGTT));
dev_priv->perf.oa.oa_buffer.head = head;
spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
@@ -1233,7 +1234,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*
* NB: implied RCS engine...
*/
- ring = engine->context_pin(engine, stream->ctx);
+ ring = intel_context_pin(stream->ctx, engine);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (IS_ERR(ring))
return PTR_ERR(ring);
@@ -1245,7 +1246,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* with gen8+ and execlists
*/
dev_priv->perf.oa.specific_ctx_id =
- i915_ggtt_offset(stream->ctx->engine[engine->id].state);
+ i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
}
return 0;
@@ -1270,7 +1271,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
mutex_lock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- engine->context_unpin(engine, stream->ctx);
+ intel_context_unpin(stream->ctx, engine);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
@@ -1332,7 +1333,8 @@ static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
/* Pre-DevBDW: OABUFFER must be set with counters off,
* before OASTATUS1, but after OASTATUS2
*/
- I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
+ I915_WRITE(GEN7_OASTATUS2,
+ gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
dev_priv->perf.oa.oa_buffer.head = gtt_offset;
I915_WRITE(GEN7_OABUFFER, gtt_offset);
@@ -1392,7 +1394,7 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
* bit."
*/
I915_WRITE(GEN8_OABUFFER, gtt_offset |
- OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT);
+ OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
@@ -1693,7 +1695,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
const struct i915_oa_config *oa_config)
{
struct intel_engine_cs *engine = dev_priv->engine[RCS];
- struct i915_gem_timeline *timeline;
+ struct i915_timeline *timeline;
struct i915_request *rq;
int ret;
@@ -1714,15 +1716,11 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
struct i915_request *prev;
- struct intel_timeline *tl;
- tl = &timeline->engine[engine->id];
- prev = i915_gem_active_raw(&tl->last_request,
+ prev = i915_gem_active_raw(&timeline->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
- i915_sw_fence_await_sw_fence_gfp(&rq->submit,
- &prev->submit,
- GFP_KERNEL);
+ i915_request_await_dma_fence(rq, &prev->fence);
}
i915_request_add(rq);
@@ -1757,6 +1755,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
+ struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct i915_gem_context *ctx;
int ret;
unsigned int wait_flags = I915_WAIT_LOCKED;
@@ -1787,7 +1786,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct intel_context *ce = &ctx->engine[RCS];
+ struct intel_context *ce = to_intel_context(ctx, engine);
u32 *regs;
/* OA settings will be set upon first use */
@@ -1840,7 +1839,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
+ if (IS_GEN(dev_priv, 9, 11)) {
I915_WRITE(GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -1870,7 +1869,6 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
~GT_NOA_ENABLE));
-
}
static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
@@ -1885,6 +1883,13 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
static void gen7_oa_enable(struct drm_i915_private *dev_priv)
{
+ struct i915_gem_context *ctx =
+ dev_priv->perf.oa.exclusive_stream->ctx;
+ u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
+ bool periodic = dev_priv->perf.oa.periodic;
+ u32 period_exponent = dev_priv->perf.oa.period_exponent;
+ u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+
/*
* Reset buf pointers so we don't forward reports from before now.
*
@@ -1896,25 +1901,14 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
*/
gen7_init_oa_buffer(dev_priv);
- if (dev_priv->perf.oa.exclusive_stream->enabled) {
- struct i915_gem_context *ctx =
- dev_priv->perf.oa.exclusive_stream->ctx;
- u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
-
- bool periodic = dev_priv->perf.oa.periodic;
- u32 period_exponent = dev_priv->perf.oa.period_exponent;
- u32 report_format = dev_priv->perf.oa.oa_buffer.format;
-
- I915_WRITE(GEN7_OACONTROL,
- (ctx_id & GEN7_OACONTROL_CTX_MASK) |
- (period_exponent <<
- GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
- (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
- (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
- (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
- GEN7_OACONTROL_ENABLE);
- } else
- I915_WRITE(GEN7_OACONTROL, 0);
+ I915_WRITE(GEN7_OACONTROL,
+ (ctx_id & GEN7_OACONTROL_CTX_MASK) |
+ (period_exponent <<
+ GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
+ (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
+ (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
+ (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
+ GEN7_OACONTROL_ENABLE);
}
static void gen8_oa_enable(struct drm_i915_private *dev_priv)
@@ -1966,11 +1960,19 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
static void gen7_oa_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(GEN7_OACONTROL, 0);
+ if (intel_wait_for_register(dev_priv,
+ GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
+ 50))
+ DRM_ERROR("wait for OA to be disabled timed out\n");
}
static void gen8_oa_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(GEN8_OACONTROL, 0);
+ if (intel_wait_for_register(dev_priv,
+ GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
+ 50))
+ DRM_ERROR("wait for OA to be disabled timed out\n");
}
/**
@@ -2099,13 +2101,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (stream->ctx) {
ret = oa_get_render_ctx_id(stream);
- if (ret)
+ if (ret) {
+ DRM_DEBUG("Invalid context id to filter with\n");
return ret;
+ }
}
ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
- if (ret)
+ if (ret) {
+ DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
goto err_config;
+ }
/* PRM - observability performance counters:
*
@@ -2132,8 +2138,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
stream->oa_config);
- if (ret)
+ if (ret) {
+ DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
+ }
stream->ops = &i915_oa_stream_ops;
@@ -2745,7 +2753,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
props->ctx_handle = value;
break;
case DRM_I915_PERF_PROP_SAMPLE_OA:
- props->sample_flags |= SAMPLE_OA_REPORT;
+ if (value)
+ props->sample_flags |= SAMPLE_OA_REPORT;
break;
case DRM_I915_PERF_PROP_OA_METRICS_SET:
if (value == 0) {
@@ -2935,6 +2944,8 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
i915_perf_load_test_config_cflgt3(dev_priv);
} else if (IS_CANNONLAKE(dev_priv)) {
i915_perf_load_test_config_cnl(dev_priv);
+ } else if (IS_ICELAKE(dev_priv)) {
+ i915_perf_load_test_config_icl(dev_priv);
}
if (dev_priv->perf.oa.test_config.id == 0)
@@ -3292,6 +3303,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev_priv->perf.metrics_lock);
+ DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
+
return oa_config->id;
sysfs_err:
@@ -3348,6 +3361,9 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
&oa_config->sysfs_metric);
idr_remove(&dev_priv->perf.metrics_idr, *arg);
+
+ DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
+
put_oa_config(dev_priv, oa_config);
config_err:
@@ -3467,7 +3483,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
}
- } else if (IS_GEN10(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 10, 11)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
dev_priv->perf.oa.ops.is_valid_mux_reg =
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index f0519e31543a..dc87797db500 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1,33 +1,12 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017-2018 Intel Corporation
*/
-#include <linux/perf_event.h>
-#include <linux/pm_runtime.h>
-
-#include "i915_drv.h"
#include "i915_pmu.h"
#include "intel_ringbuffer.h"
+#include "i915_drv.h"
/* Frequency for the sampling timer for events which need it. */
#define FREQUENCY 200
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index aa1b1a987ea1..2ba735299f7c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -1,29 +1,19 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017-2018 Intel Corporation
*/
+
#ifndef __I915_PMU_H__
#define __I915_PMU_H__
+#include <linux/hrtimer.h>
+#include <linux/perf_event.h>
+#include <linux/spinlock_types.h>
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+
enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 8a69a9275e28..7720569f2024 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -153,9 +153,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
-#define _MMIO_PORT6(port, a, b, c, d, e, f) _MMIO(_PICK(port, a, b, c, d, e, f))
-#define _MMIO_PORT6_LN(port, ln, a0, a1, b, c, d, e, f) \
- _MMIO(_PICK(port, a0, b, c, d, e, f) + (ln * (a1 - a0)))
#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
@@ -191,6 +188,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OTHER_CLASS 4
#define MAX_ENGINE_CLASS 4
+#define OTHER_GTPM_INSTANCE 1
#define MAX_ENGINE_INSTANCE 3
/* PCI config space */
@@ -304,6 +302,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN6_GRDOM_VECS (1 << 4)
#define GEN9_GRDOM_GUC (1 << 5)
#define GEN8_GRDOM_MEDIA2 (1 << 7)
+/* GEN11 changed all bit defs except for FULL & RENDER */
+#define GEN11_GRDOM_FULL GEN6_GRDOM_FULL
+#define GEN11_GRDOM_RENDER GEN6_GRDOM_RENDER
+#define GEN11_GRDOM_BLT (1 << 2)
+#define GEN11_GRDOM_GUC (1 << 3)
+#define GEN11_GRDOM_MEDIA (1 << 5)
+#define GEN11_GRDOM_MEDIA2 (1 << 6)
+#define GEN11_GRDOM_MEDIA3 (1 << 7)
+#define GEN11_GRDOM_MEDIA4 (1 << 8)
+#define GEN11_GRDOM_VECS (1 << 13)
+#define GEN11_GRDOM_VECS2 (1 << 14)
#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base+0x518)
@@ -430,145 +439,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VGA_CR_INDEX_CGA 0x3d4
#define VGA_CR_DATA_CGA 0x3d5
-/*
- * Instruction field definitions used by the command parser
- */
-#define INSTR_CLIENT_SHIFT 29
-#define INSTR_MI_CLIENT 0x0
-#define INSTR_BC_CLIENT 0x2
-#define INSTR_RC_CLIENT 0x3
-#define INSTR_SUBCLIENT_SHIFT 27
-#define INSTR_SUBCLIENT_MASK 0x18000000
-#define INSTR_MEDIA_SUBCLIENT 0x2
-#define INSTR_26_TO_24_MASK 0x7000000
-#define INSTR_26_TO_24_SHIFT 24
-
-/*
- * Memory interface instructions used by the kernel
- */
-#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
-/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
-#define MI_GLOBAL_GTT (1<<22)
-
-#define MI_NOOP MI_INSTR(0, 0)
-#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
-#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
-#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
-#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
-#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
-#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-#define MI_FLUSH MI_INSTR(0x04, 0)
-#define MI_READ_FLUSH (1 << 0)
-#define MI_EXE_FLUSH (1 << 1)
-#define MI_NO_WRITE_FLUSH (1 << 2)
-#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
-#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
-#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
-#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
-#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
-#define MI_ARB_ENABLE (1<<0)
-#define MI_ARB_DISABLE (0<<0)
-#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
-#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
-#define MI_SUSPEND_FLUSH_EN (1<<0)
-#define MI_SET_APPID MI_INSTR(0x0e, 0)
-#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
-#define MI_OVERLAY_CONTINUE (0x0<<21)
-#define MI_OVERLAY_ON (0x1<<21)
-#define MI_OVERLAY_OFF (0x2<<21)
-#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
-#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
-#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
-#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
-/* IVB has funny definitions for which plane to flip. */
-#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
-#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
-#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
-#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
-#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
-#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-/* SKL ones */
-#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8)
-#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8)
-#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8)
-#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8)
-#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8)
-#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8)
-#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8)
-#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8)
-#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8)
-#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
-#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
-#define MI_SEMAPHORE_UPDATE (1<<21)
-#define MI_SEMAPHORE_COMPARE (1<<20)
-#define MI_SEMAPHORE_REGISTER (1<<18)
-#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
-#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
-#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
-#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
-#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
-#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
-#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
-#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
-#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
-#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
-#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
-#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
-#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
-#define MI_SEMAPHORE_SYNC_MASK (3<<16)
-#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
-#define MI_MM_SPACE_GTT (1<<8)
-#define MI_MM_SPACE_PHYSICAL (0<<8)
-#define MI_SAVE_EXT_STATE_EN (1<<3)
-#define MI_RESTORE_EXT_STATE_EN (1<<2)
-#define MI_FORCE_RESTORE (1<<1)
-#define MI_RESTORE_INHIBIT (1<<0)
-#define HSW_MI_RS_SAVE_STATE_EN (1<<3)
-#define HSW_MI_RS_RESTORE_STATE_EN (1<<2)
-#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
-#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
-#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
-#define MI_SEMAPHORE_POLL (1<<15)
-#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
-#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
-#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
-#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
-#define MI_USE_GGTT (1 << 22) /* g4x+ */
-#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
-#define MI_STORE_DWORD_INDEX_SHIFT 2
-/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
- * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
- * simply ignores the register load under certain conditions.
- * - One can actually load arbitrary many arbitrary registers: Simply issue x
- * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
- */
-#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
-#define MI_LRI_FORCE_POSTED (1<<12)
-#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
-#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
-#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
-#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
-#define MI_FLUSH_DW_STORE_INDEX (1<<21)
-#define MI_INVALIDATE_TLB (1<<18)
-#define MI_FLUSH_DW_OP_STOREDW (1<<14)
-#define MI_FLUSH_DW_OP_MASK (3<<14)
-#define MI_FLUSH_DW_NOTIFY (1<<8)
-#define MI_INVALIDATE_BSD (1<<7)
-#define MI_FLUSH_DW_USE_GTT (1<<2)
-#define MI_FLUSH_DW_USE_PPGTT (0<<2)
-#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
-#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
-#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
-#define MI_BATCH_NON_SECURE (1)
-/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
-#define MI_BATCH_PPGTT_HSW (1<<8)
-#define MI_BATCH_NON_SECURE_HSW (1<<13)
-#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
-#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
-#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
-#define MI_BATCH_RESOURCE_STREAMER (1<<10)
-
#define MI_PREDICATE_SRC0 _MMIO(0x2400)
#define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4)
#define MI_PREDICATE_SRC1 _MMIO(0x2408)
@@ -579,130 +449,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define LOWER_SLICE_DISABLED (0<<0)
/*
- * 3D instructions used by the kernel
- */
-#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
-
-#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
-#define GEN9_MEDIA_POOL_ENABLE (1 << 31)
-#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define SC_UPDATE_SCISSOR (0x1<<1)
-#define SC_ENABLE_MASK (0x1<<0)
-#define SC_ENABLE (0x1<<0)
-#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define SCI_YMIN_MASK (0xffff<<16)
-#define SCI_XMIN_MASK (0xffff<<0)
-#define SCI_YMAX_MASK (0xffff<<16)
-#define SCI_XMAX_MASK (0xffff<<0)
-#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-
-#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
-#define BLT_WRITE_A (2<<20)
-#define BLT_WRITE_RGB (1<<20)
-#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
-#define BLT_DEPTH_8 (0<<24)
-#define BLT_DEPTH_16_565 (1<<24)
-#define BLT_DEPTH_16_1555 (2<<24)
-#define BLT_DEPTH_32 (3<<24)
-#define BLT_ROP_SRC_COPY (0xcc<<16)
-#define BLT_ROP_COLOR_COPY (0xf0<<16)
-#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
-#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-#define ASYNC_FLIP (1<<22)
-#define DISPLAY_PLANE_A (0<<20)
-#define DISPLAY_PLANE_B (1<<20)
-#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
-#define PIPE_CONTROL_FLUSH_L3 (1<<27)
-#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
-#define PIPE_CONTROL_MMIO_WRITE (1<<23)
-#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
-#define PIPE_CONTROL_CS_STALL (1<<20)
-#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
-#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
-#define PIPE_CONTROL_QW_WRITE (1<<14)
-#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
-#define PIPE_CONTROL_DEPTH_STALL (1<<13)
-#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
-#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
-#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */
-#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
-#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
-#define PIPE_CONTROL_NOTIFY (1<<8)
-#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
-#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
-#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
-#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
-#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
-#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
-#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
-#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
-
-/*
- * Commands used only by the command parser
- */
-#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
-#define MI_ARB_CHECK MI_INSTR(0x05, 0)
-#define MI_RS_CONTROL MI_INSTR(0x06, 0)
-#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0)
-#define MI_PREDICATE MI_INSTR(0x0C, 0)
-#define MI_RS_CONTEXT MI_INSTR(0x0F, 0)
-#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0)
-#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
-#define MI_URB_CLEAR MI_INSTR(0x19, 0)
-#define MI_UPDATE_GTT MI_INSTR(0x23, 0)
-#define MI_CLFLUSH MI_INSTR(0x27, 0)
-#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
-#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
-#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
-#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
-#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
-#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
-
-#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
-#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
-#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
-#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
-#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
-#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
-#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
- ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
-#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
- ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
-#define GFX_OP_3DSTATE_SO_DECL_LIST \
- ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
-
-#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
- ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
-#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
- ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
-#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
- ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
-#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
- ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
-#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
- ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
-
-#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16))
-
-#define COLOR_BLT ((0x2<<29)|(0x40<<22))
-#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
-
-/*
* Registers used only by the command parser
*/
#define BCS_SWCTRL _MMIO(0x22200)
@@ -802,6 +548,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_OABUFFER_UDW _MMIO(0x23b4)
#define GEN8_OABUFFER _MMIO(0x2b14)
+#define GEN8_OABUFFER_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
#define GEN7_OASTATUS1 _MMIO(0x2364)
#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0
@@ -810,7 +557,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OASTATUS1_REPORT_LOST (1<<0)
#define GEN7_OASTATUS2 _MMIO(0x2368)
-#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0
+#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0
+#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
#define GEN8_OASTATUS _MMIO(0x2b08)
#define GEN8_OASTATUS_OVERRUN_STATUS (1<<3)
@@ -832,8 +580,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OABUFFER_SIZE_8M (6<<3)
#define OABUFFER_SIZE_16M (7<<3)
-#define OA_MEM_SELECT_GGTT (1<<0)
-
/*
* Flexible, Aggregate EU Counter Registers.
* Note: these aren't contiguous
@@ -1127,6 +873,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3
#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
@@ -1948,79 +1700,100 @@ enum i915_power_well_id {
#define _CNL_PORT_PCS_DW1_LN0_C 0x162C04
#define _CNL_PORT_PCS_DW1_LN0_D 0x162E04
#define _CNL_PORT_PCS_DW1_LN0_F 0x162804
-#define CNL_PORT_PCS_DW1_GRP(port) _MMIO_PORT6(port, \
+#define CNL_PORT_PCS_DW1_GRP(port) _MMIO(_PICK(port, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_B, \
_CNL_PORT_PCS_DW1_GRP_C, \
_CNL_PORT_PCS_DW1_GRP_D, \
_CNL_PORT_PCS_DW1_GRP_AE, \
- _CNL_PORT_PCS_DW1_GRP_F)
-#define CNL_PORT_PCS_DW1_LN0(port) _MMIO_PORT6(port, \
+ _CNL_PORT_PCS_DW1_GRP_F))
+
+#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_B, \
_CNL_PORT_PCS_DW1_LN0_C, \
_CNL_PORT_PCS_DW1_LN0_D, \
_CNL_PORT_PCS_DW1_LN0_AE, \
- _CNL_PORT_PCS_DW1_LN0_F)
+ _CNL_PORT_PCS_DW1_LN0_F))
+#define _ICL_PORT_PCS_DW1_GRP_A 0x162604
+#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604
+#define _ICL_PORT_PCS_DW1_LN0_A 0x162804
+#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804
+#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\
+ _ICL_PORT_PCS_DW1_GRP_A, \
+ _ICL_PORT_PCS_DW1_GRP_B)
+#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \
+ _ICL_PORT_PCS_DW1_LN0_A, \
+ _ICL_PORT_PCS_DW1_LN0_B)
#define COMMON_KEEPER_EN (1 << 26)
-#define _CNL_PORT_TX_DW2_GRP_AE 0x162348
-#define _CNL_PORT_TX_DW2_GRP_B 0x1623C8
-#define _CNL_PORT_TX_DW2_GRP_C 0x162B48
-#define _CNL_PORT_TX_DW2_GRP_D 0x162BC8
-#define _CNL_PORT_TX_DW2_GRP_F 0x162A48
-#define _CNL_PORT_TX_DW2_LN0_AE 0x162448
-#define _CNL_PORT_TX_DW2_LN0_B 0x162648
-#define _CNL_PORT_TX_DW2_LN0_C 0x162C48
-#define _CNL_PORT_TX_DW2_LN0_D 0x162E48
-#define _CNL_PORT_TX_DW2_LN0_F 0x162848
-#define CNL_PORT_TX_DW2_GRP(port) _MMIO_PORT6(port, \
- _CNL_PORT_TX_DW2_GRP_AE, \
- _CNL_PORT_TX_DW2_GRP_B, \
- _CNL_PORT_TX_DW2_GRP_C, \
- _CNL_PORT_TX_DW2_GRP_D, \
- _CNL_PORT_TX_DW2_GRP_AE, \
- _CNL_PORT_TX_DW2_GRP_F)
-#define CNL_PORT_TX_DW2_LN0(port) _MMIO_PORT6(port, \
- _CNL_PORT_TX_DW2_LN0_AE, \
- _CNL_PORT_TX_DW2_LN0_B, \
- _CNL_PORT_TX_DW2_LN0_C, \
- _CNL_PORT_TX_DW2_LN0_D, \
- _CNL_PORT_TX_DW2_LN0_AE, \
- _CNL_PORT_TX_DW2_LN0_F)
-#define SWING_SEL_UPPER(x) ((x >> 3) << 15)
+/* CNL Port TX registers */
+#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340
+#define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0
+#define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40
+#define _CNL_PORT_TX_D_GRP_OFFSET 0x162BC0
+#define _CNL_PORT_TX_F_GRP_OFFSET 0x162A40
+#define _CNL_PORT_TX_AE_LN0_OFFSET 0x162440
+#define _CNL_PORT_TX_B_LN0_OFFSET 0x162640
+#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40
+#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40
+#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840
+#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \
+ _CNL_PORT_TX_AE_GRP_OFFSET, \
+ _CNL_PORT_TX_B_GRP_OFFSET, \
+ _CNL_PORT_TX_B_GRP_OFFSET, \
+ _CNL_PORT_TX_D_GRP_OFFSET, \
+ _CNL_PORT_TX_AE_GRP_OFFSET, \
+ _CNL_PORT_TX_F_GRP_OFFSET) + \
+ 4*(dw))
+#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \
+ _CNL_PORT_TX_AE_LN0_OFFSET, \
+ _CNL_PORT_TX_B_LN0_OFFSET, \
+ _CNL_PORT_TX_B_LN0_OFFSET, \
+ _CNL_PORT_TX_D_LN0_OFFSET, \
+ _CNL_PORT_TX_AE_LN0_OFFSET, \
+ _CNL_PORT_TX_F_LN0_OFFSET) + \
+ 4*(dw))
+
+#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
+#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
+#define _ICL_PORT_TX_DW2_GRP_A 0x162688
+#define _ICL_PORT_TX_DW2_GRP_B 0x6C688
+#define _ICL_PORT_TX_DW2_LN0_A 0x162888
+#define _ICL_PORT_TX_DW2_LN0_B 0x6C888
+#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW2_GRP_A, \
+ _ICL_PORT_TX_DW2_GRP_B)
+#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW2_LN0_A, \
+ _ICL_PORT_TX_DW2_LN0_B)
+#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
-#define SWING_SEL_LOWER(x) ((x & 0x7) << 11)
+#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
#define SWING_SEL_LOWER_MASK (0x7 << 11)
#define RCOMP_SCALAR(x) ((x) << 0)
#define RCOMP_SCALAR_MASK (0xFF << 0)
-#define _CNL_PORT_TX_DW4_GRP_AE 0x162350
-#define _CNL_PORT_TX_DW4_GRP_B 0x1623D0
-#define _CNL_PORT_TX_DW4_GRP_C 0x162B50
-#define _CNL_PORT_TX_DW4_GRP_D 0x162BD0
-#define _CNL_PORT_TX_DW4_GRP_F 0x162A50
#define _CNL_PORT_TX_DW4_LN0_AE 0x162450
#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
-#define _CNL_PORT_TX_DW4_LN0_B 0x162650
-#define _CNL_PORT_TX_DW4_LN0_C 0x162C50
-#define _CNL_PORT_TX_DW4_LN0_D 0x162E50
-#define _CNL_PORT_TX_DW4_LN0_F 0x162850
-#define CNL_PORT_TX_DW4_GRP(port) _MMIO_PORT6(port, \
- _CNL_PORT_TX_DW4_GRP_AE, \
- _CNL_PORT_TX_DW4_GRP_B, \
- _CNL_PORT_TX_DW4_GRP_C, \
- _CNL_PORT_TX_DW4_GRP_D, \
- _CNL_PORT_TX_DW4_GRP_AE, \
- _CNL_PORT_TX_DW4_GRP_F)
-#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO_PORT6_LN(port, ln, \
- _CNL_PORT_TX_DW4_LN0_AE, \
- _CNL_PORT_TX_DW4_LN1_AE, \
- _CNL_PORT_TX_DW4_LN0_B, \
- _CNL_PORT_TX_DW4_LN0_C, \
- _CNL_PORT_TX_DW4_LN0_D, \
- _CNL_PORT_TX_DW4_LN0_AE, \
- _CNL_PORT_TX_DW4_LN0_F)
+#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
+#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
+#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
+ (ln * (_CNL_PORT_TX_DW4_LN1_AE - \
+ _CNL_PORT_TX_DW4_LN0_AE)))
+#define _ICL_PORT_TX_DW4_GRP_A 0x162690
+#define _ICL_PORT_TX_DW4_GRP_B 0x6C690
+#define _ICL_PORT_TX_DW4_LN0_A 0x162890
+#define _ICL_PORT_TX_DW4_LN1_A 0x162990
+#define _ICL_PORT_TX_DW4_LN0_B 0x6C890
+#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW4_GRP_A, \
+ _ICL_PORT_TX_DW4_GRP_B)
+#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \
+ _ICL_PORT_TX_DW4_LN0_A, \
+ _ICL_PORT_TX_DW4_LN0_B) + \
+ (ln * (_ICL_PORT_TX_DW4_LN1_A - \
+ _ICL_PORT_TX_DW4_LN0_A)))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -2029,64 +1802,147 @@ enum i915_power_well_id {
#define CURSOR_COEFF(x) ((x) << 0)
#define CURSOR_COEFF_MASK (0x3F << 0)
-#define _CNL_PORT_TX_DW5_GRP_AE 0x162354
-#define _CNL_PORT_TX_DW5_GRP_B 0x1623D4
-#define _CNL_PORT_TX_DW5_GRP_C 0x162B54
-#define _CNL_PORT_TX_DW5_GRP_D 0x162BD4
-#define _CNL_PORT_TX_DW5_GRP_F 0x162A54
-#define _CNL_PORT_TX_DW5_LN0_AE 0x162454
-#define _CNL_PORT_TX_DW5_LN0_B 0x162654
-#define _CNL_PORT_TX_DW5_LN0_C 0x162C54
-#define _CNL_PORT_TX_DW5_LN0_D 0x162E54
-#define _CNL_PORT_TX_DW5_LN0_F 0x162854
-#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \
- _CNL_PORT_TX_DW5_GRP_AE, \
- _CNL_PORT_TX_DW5_GRP_B, \
- _CNL_PORT_TX_DW5_GRP_C, \
- _CNL_PORT_TX_DW5_GRP_D, \
- _CNL_PORT_TX_DW5_GRP_AE, \
- _CNL_PORT_TX_DW5_GRP_F)
-#define CNL_PORT_TX_DW5_LN0(port) _MMIO_PORT6(port, \
- _CNL_PORT_TX_DW5_LN0_AE, \
- _CNL_PORT_TX_DW5_LN0_B, \
- _CNL_PORT_TX_DW5_LN0_C, \
- _CNL_PORT_TX_DW5_LN0_D, \
- _CNL_PORT_TX_DW5_LN0_AE, \
- _CNL_PORT_TX_DW5_LN0_F)
+#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 5))
+#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 5))
+#define _ICL_PORT_TX_DW5_GRP_A 0x162694
+#define _ICL_PORT_TX_DW5_GRP_B 0x6C694
+#define _ICL_PORT_TX_DW5_LN0_A 0x162894
+#define _ICL_PORT_TX_DW5_LN0_B 0x6C894
+#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW5_GRP_A, \
+ _ICL_PORT_TX_DW5_GRP_B)
+#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW5_LN0_A, \
+ _ICL_PORT_TX_DW5_LN0_B)
#define TX_TRAINING_EN (1 << 31)
+#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
#define SCALING_MODE_SEL(x) ((x) << 18)
#define SCALING_MODE_SEL_MASK (0x7 << 18)
#define RTERM_SELECT(x) ((x) << 3)
#define RTERM_SELECT_MASK (0x7 << 3)
-#define _CNL_PORT_TX_DW7_GRP_AE 0x16235C
-#define _CNL_PORT_TX_DW7_GRP_B 0x1623DC
-#define _CNL_PORT_TX_DW7_GRP_C 0x162B5C
-#define _CNL_PORT_TX_DW7_GRP_D 0x162BDC
-#define _CNL_PORT_TX_DW7_GRP_F 0x162A5C
-#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C
-#define _CNL_PORT_TX_DW7_LN0_B 0x16265C
-#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C
-#define _CNL_PORT_TX_DW7_LN0_D 0x162E5C
-#define _CNL_PORT_TX_DW7_LN0_F 0x16285C
-#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \
- _CNL_PORT_TX_DW7_GRP_AE, \
- _CNL_PORT_TX_DW7_GRP_B, \
- _CNL_PORT_TX_DW7_GRP_C, \
- _CNL_PORT_TX_DW7_GRP_D, \
- _CNL_PORT_TX_DW7_GRP_AE, \
- _CNL_PORT_TX_DW7_GRP_F)
-#define CNL_PORT_TX_DW7_LN0(port) _MMIO_PORT6(port, \
- _CNL_PORT_TX_DW7_LN0_AE, \
- _CNL_PORT_TX_DW7_LN0_B, \
- _CNL_PORT_TX_DW7_LN0_C, \
- _CNL_PORT_TX_DW7_LN0_D, \
- _CNL_PORT_TX_DW7_LN0_AE, \
- _CNL_PORT_TX_DW7_LN0_F)
+#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7))
+#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
+#define _ICL_MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
+ _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
+
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
+#define ICL_PORT_MG_TX1_LINK_PARAMS(port, ln) \
+ _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+ _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
+ _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1)
+
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
+#define ICL_PORT_MG_TX2_LINK_PARAMS(port, ln) \
+ _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+ _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
+ _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1)
+#define CRI_USE_FS32 (1 << 5)
+
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
+#define ICL_PORT_MG_TX1_PISO_READLOAD(port, ln) \
+ _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+ _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
+ _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1)
+
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
+#define ICL_PORT_MG_TX2_PISO_READLOAD(port, ln) \
+ _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+ _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
+ _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1)
+#define CRI_CALCINIT (1 << 1)
+
+#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148
+#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548
+#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148
+#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548
+#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148
+#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
+#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
+#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
+#define ICL_PORT_MG_TX1_SWINGCTRL(port, ln) \
+ _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+ _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2, \
+ _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1)
+
+#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
+#define ICL_PORT_MG_TX2_SWINGCTRL(port, ln) \
+ _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+ _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2, \
+ _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1)
+#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
+#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0)
+
+#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1 0x168144
+#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1 0x168544
+#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2 0x169144
+#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT2 0x169544
+#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT3 0x16A144
+#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT3 0x16A544
+#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT4 0x16B144
+#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT4 0x16B544
+#define ICL_PORT_MG_TX1_DRVCTRL(port, ln) \
+ _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1, \
+ _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2, \
+ _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1)
+
+#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
+#define ICL_PORT_MG_TX2_DRVCTRL(port, ln) \
+ _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1, \
+ _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2, \
+ _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1)
+#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
+#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24)
+#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22)
+#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16)
+#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16)
+
/* The spec defines this only for BXT PHY0, but lets assume that this
* would exist for PHY1 too if it had a second channel.
*/
@@ -2473,6 +2329,10 @@ enum i915_power_well_id {
#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
+#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27)
+#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
+#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
+#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
#define RING_IPEIR(base) _MMIO((base)+0x64)
#define RING_IPEHR(base) _MMIO((base)+0x68)
/*
@@ -2565,12 +2425,17 @@ enum i915_power_well_id {
#define _3D_CHICKEN _MMIO(0x2084)
#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 _MMIO(0x208c)
+
+#define FF_SLICE_CHICKEN _MMIO(0x2088)
+#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
+
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
* particular danger of not doing so is not specified.
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 _MMIO(0x2090)
+#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
@@ -2867,6 +2732,19 @@ enum i915_power_well_id {
#define GEN10_EU_DISABLE3 _MMIO(0x9140)
#define GEN10_EU_DIS_SS_MASK 0xff
+#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
+#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
+#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
+#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+
+#define GEN11_EU_DISABLE _MMIO(0x9134)
+#define GEN11_EU_DIS_MASK 0xFF
+
+#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138)
+#define GEN11_GT_S_ENA_MASK 0xFF
+
+#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -3951,6 +3829,9 @@ enum {
#define _CLKGATE_DIS_PSL_A 0x46520
#define _CLKGATE_DIS_PSL_B 0x46524
#define _CLKGATE_DIS_PSL_C 0x46528
+#define DUPS1_GATING_DIS (1 << 15)
+#define DUPS2_GATING_DIS (1 << 19)
+#define DUPS3_GATING_DIS (1 << 23)
#define DPF_GATING_DIS (1 << 10)
#define DPF_RAM_GATING_DIS (1 << 9)
#define DPFR_GATING_DIS (1 << 8)
@@ -3964,6 +3845,7 @@ enum {
#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
+#define MSCUNIT_CLKGATE_DIS (1 << 10)
#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
#define GWUNIT_CLKGATE_DIS (1 << 16)
@@ -3971,6 +3853,9 @@ enum {
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
#define VFUNIT_CLKGATE_DIS (1 << 20)
+#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560)
+#define CGPSF_CLKGATE_DIS (1 << 3)
+
/*
* Display engine regs
*/
@@ -4150,7 +4035,20 @@ enum {
#define EDP_PSR_TP1_TIME_0us (3<<4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
+/* Bspec claims those aren't shifted but stay at 0x64800 */
+#define EDP_PSR_IMR _MMIO(0x64834)
+#define EDP_PSR_IIR _MMIO(0x64838)
+#define EDP_PSR_ERROR(trans) (1 << (((trans) * 8 + 10) & 31))
+#define EDP_PSR_POST_EXIT(trans) (1 << (((trans) * 8 + 9) & 31))
+#define EDP_PSR_PRE_ENTRY(trans) (1 << (((trans) * 8 + 8) & 31))
+
#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
+#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
+#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
+#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16)
+#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11)
+#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff)
+
#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
@@ -4180,17 +4078,19 @@ enum {
#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
-#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60)
+#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16)
-#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15)
+#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */
#define EDP_PSR2_CTL _MMIO(0x6f900)
#define EDP_PSR2_ENABLE (1<<31)
#define EDP_SU_TRACK_ENABLE (1<<30)
+#define EDP_Y_COORDINATE_VALID (1<<26) /* GLK and CNL+ */
+#define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */
#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
#define EDP_PSR2_TP2_TIME_500 (0<<8)
@@ -4200,8 +4100,32 @@ enum {
#define EDP_PSR2_TP2_TIME_MASK (3<<8)
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
-#define EDP_PSR2_IDLE_MASK 0xf
#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
+#define EDP_PSR2_IDLE_FRAME_MASK 0xf
+#define EDP_PSR2_IDLE_FRAME_SHIFT 0
+
+#define _PSR_EVENT_TRANS_A 0x60848
+#define _PSR_EVENT_TRANS_B 0x61848
+#define _PSR_EVENT_TRANS_C 0x62848
+#define _PSR_EVENT_TRANS_D 0x63848
+#define _PSR_EVENT_TRANS_EDP 0x6F848
+#define PSR_EVENT(trans) _MMIO_TRANS2(trans, _PSR_EVENT_TRANS_A)
+#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17)
+#define PSR_EVENT_PSR2_DISABLED (1 << 16)
+#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15)
+#define PSR_EVENT_SU_CRC_FIFO_UNDERRUN (1 << 14)
+#define PSR_EVENT_GRAPHICS_RESET (1 << 12)
+#define PSR_EVENT_PCH_INTERRUPT (1 << 11)
+#define PSR_EVENT_MEMORY_UP (1 << 10)
+#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9)
+#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8)
+#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6)
+#define PSR_EVENT_REGISTER_UPDATE (1 << 5)
+#define PSR_EVENT_HDCP_ENABLE (1 << 4)
+#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3)
+#define PSR_EVENT_VBI_ENABLE (1 << 2)
+#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
+#define PSR_EVENT_PSR_DISABLE (1 << 0)
#define EDP_PSR2_STATUS _MMIO(0x6f940)
#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
@@ -5265,8 +5189,6 @@ enum {
#define DP_LINK_TRAIN_OFF (3 << 28)
#define DP_LINK_TRAIN_MASK (3 << 28)
#define DP_LINK_TRAIN_SHIFT 28
-#define DP_LINK_TRAIN_PAT_3_CHV (1 << 14)
-#define DP_LINK_TRAIN_MASK_CHV ((3 << 28)|(1<<14))
/* CPT Link training mode */
#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
@@ -6009,6 +5931,7 @@ enum {
#define CURSIZE _MMIO(0x700a0) /* 845/865 */
#define _CUR_FBC_CTL_A 0x700a0 /* ivb+ */
#define CUR_FBC_CTL_EN (1 << 31)
+#define _CURASURFLIVE 0x700ac /* g4x+ */
#define _CURBCNTR 0x700c0
#define _CURBBASE 0x700c4
#define _CURBPOS 0x700c8
@@ -6025,6 +5948,7 @@ enum {
#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
#define CUR_FBC_CTL(pipe) _CURSOR2(pipe, _CUR_FBC_CTL_A)
+#define CURSURFLIVE(pipe) _CURSOR2(pipe, _CURASURFLIVE)
#define CURSOR_A_OFFSET 0x70080
#define CURSOR_B_OFFSET 0x700c0
@@ -6492,9 +6416,9 @@ enum {
#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */
#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
-#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30)
+#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */
#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
-#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23)
+#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */
#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17)
#define PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709 (2 << 17)
@@ -6589,6 +6513,9 @@ enum {
#define _PLANE_BUF_CFG_1_B 0x7127c
#define _PLANE_BUF_CFG_2_B 0x7137c
+#define SKL_DDB_ENTRY_MASK 0x3FF
+#define ICL_DDB_ENTRY_MASK 0x7FF
+#define DDB_ENTRY_END_SHIFT 16
#define _PLANE_BUF_CFG_1(pipe) \
_PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
#define _PLANE_BUF_CFG_2(pipe) \
@@ -6779,6 +6706,8 @@ enum {
#define PS_SCALER_MODE_MASK (3 << 28)
#define PS_SCALER_MODE_DYN (0 << 28)
#define PS_SCALER_MODE_HQ (1 << 28)
+#define SKL_PS_SCALER_MODE_NV12 (2 << 28)
+#define PS_SCALER_MODE_PLANAR (1 << 29)
#define PS_PLANE_SEL_MASK (7 << 25)
#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
#define PS_FILTER_MASK (3 << 23)
@@ -6950,6 +6879,7 @@ enum {
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
+#define DE_EDP_PSR_INT_HSW (1<<19)
#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
#define DE_PIPEC_VBLANK_IVB (1<<10)
@@ -7074,6 +7004,7 @@ enum {
#define GEN8_DE_MISC_IIR _MMIO(0x44468)
#define GEN8_DE_MISC_IER _MMIO(0x4446c)
#define GEN8_DE_MISC_GSE (1 << 27)
+#define GEN8_DE_EDP_PSR (1 << 19)
#define GEN8_PCU_ISR _MMIO(0x444e0)
#define GEN8_PCU_IMR _MMIO(0x444e4)
@@ -7117,7 +7048,9 @@ enum {
#define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060)
#define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064)
#define GEN11_INTR_DATA_VALID (1 << 31)
-#define GEN11_INTR_ENGINE_MASK (0xffff)
+#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16)
+#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
+#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + (x * 4))
@@ -7197,6 +7130,7 @@ enum {
#define CHICKEN_TRANS_A 0x420c0
#define CHICKEN_TRANS_B 0x420c4
#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL (1<<25) /* GLK and CNL+ */
#define DDI_TRAINING_OVERRIDE_ENABLE (1<<19)
#define DDI_TRAINING_OVERRIDE_VALUE (1<<18)
#define DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */
@@ -7301,18 +7235,22 @@ enum {
#define GEN7_L3CNTLREG3 _MMIO(0xB024)
#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xB030)
-#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+#define GEN10_L3_CHICKEN_MODE_REGISTER _MMIO(0xB114)
+#define GEN11_I2M_WRITE_DISABLE (1 << 28)
#define GEN7_L3SQCREG4 _MMIO(0xb034)
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
#define GEN8_L3SQCREG4 _MMIO(0xb118)
-#define GEN8_LQSC_RO_PERF_DIS (1<<27)
-#define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21)
+#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
+#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
+#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0)
+#define ICL_HDC_MODE _MMIO(0xE5F4)
#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
@@ -8327,8 +8265,30 @@ enum {
#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6)
-#define GEN8_GARBCNTL _MMIO(0xB004)
-#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
+#define GEN8_GARBCNTL _MMIO(0xB004)
+#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7)
+#define GEN11_ARBITRATION_PRIO_ORDER_MASK (0x3f << 22)
+#define GEN11_HASH_CTRL_EXCL_MASK (0x7f << 0)
+#define GEN11_HASH_CTRL_EXCL_BIT0 (1 << 0)
+
+#define GEN11_GLBLINVL _MMIO(0xB404)
+#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5)
+#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5)
+
+#define GEN10_DFR_RATIO_EN_AND_CHICKEN _MMIO(0x9550)
+#define DFR_DISABLE (1 << 9)
+
+#define GEN11_GACB_PERF_CTRL _MMIO(0x4B80)
+#define GEN11_HASH_CTRL_MASK (0x3 << 12 | 0xf << 0)
+#define GEN11_HASH_CTRL_BIT0 (1 << 0)
+#define GEN11_HASH_CTRL_BIT4 (1 << 12)
+
+#define GEN11_LSN_UNSLCVC _MMIO(0xB43C)
+#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
+#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
+
+#define GAMW_ECO_DEV_RW_IA_REG _MMIO(0x4080)
+#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
@@ -8837,6 +8797,12 @@ enum skl_power_gate {
#define PORT_CLK_SEL_NONE (7<<29)
#define PORT_CLK_SEL_MASK (7<<29)
+/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
+#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
+#define DDI_CLK_SEL_NONE (0x0 << 28)
+#define DDI_CLK_SEL_MG (0x8 << 28)
+#define DDI_CLK_SEL_MASK (0xF << 28)
+
/* Transcoder clock selection */
#define _TRANS_CLK_SEL_A 0x46140
#define _TRANS_CLK_SEL_B 0x46144
@@ -8967,6 +8933,7 @@ enum skl_power_gate {
* CNL Clocks
*/
#define DPCLKA_CFGCR0 _MMIO(0x6C200)
+#define DPCLKA_CFGCR0_ICL _MMIO(0x164280)
#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \
(port)+10))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
@@ -8983,10 +8950,141 @@ enum skl_power_gate {
#define PLL_POWER_STATE (1 << 26)
#define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
+#define _MG_PLL1_ENABLE 0x46030
+#define _MG_PLL2_ENABLE 0x46034
+#define _MG_PLL3_ENABLE 0x46038
+#define _MG_PLL4_ENABLE 0x4603C
+/* Bits are the same as DPLL0_ENABLE */
+#define MG_PLL_ENABLE(port) _MMIO_PORT((port) - PORT_C, _MG_PLL1_ENABLE, \
+ _MG_PLL2_ENABLE)
+
+#define _MG_REFCLKIN_CTL_PORT1 0x16892C
+#define _MG_REFCLKIN_CTL_PORT2 0x16992C
+#define _MG_REFCLKIN_CTL_PORT3 0x16A92C
+#define _MG_REFCLKIN_CTL_PORT4 0x16B92C
+#define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8)
+#define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \
+ _MG_REFCLKIN_CTL_PORT1, \
+ _MG_REFCLKIN_CTL_PORT2)
+
+#define _MG_CLKTOP2_CORECLKCTL1_PORT1 0x1688D8
+#define _MG_CLKTOP2_CORECLKCTL1_PORT2 0x1698D8
+#define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8
+#define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8
+#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16)
+#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8)
+#define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \
+ _MG_CLKTOP2_CORECLKCTL1_PORT1, \
+ _MG_CLKTOP2_CORECLKCTL1_PORT2)
+
+#define _MG_CLKTOP2_HSCLKCTL_PORT1 0x1688D4
+#define _MG_CLKTOP2_HSCLKCTL_PORT2 0x1698D4
+#define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4
+#define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4
+#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16)
+#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12)
+#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8)
+#define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \
+ _MG_CLKTOP2_HSCLKCTL_PORT1, \
+ _MG_CLKTOP2_HSCLKCTL_PORT2)
+
+#define _MG_PLL_DIV0_PORT1 0x168A00
+#define _MG_PLL_DIV0_PORT2 0x169A00
+#define _MG_PLL_DIV0_PORT3 0x16AA00
+#define _MG_PLL_DIV0_PORT4 0x16BA00
+#define MG_PLL_DIV0_FRACNEN_H (1 << 30)
+#define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8)
+#define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
+#define MG_PLL_DIV0(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV0_PORT1, \
+ _MG_PLL_DIV0_PORT2)
+
+#define _MG_PLL_DIV1_PORT1 0x168A04
+#define _MG_PLL_DIV1_PORT2 0x169A04
+#define _MG_PLL_DIV1_PORT3 0x16AA04
+#define _MG_PLL_DIV1_PORT4 0x16BA04
+#define MG_PLL_DIV1_IREF_NDIVRATIO(x) ((x) << 16)
+#define MG_PLL_DIV1_DITHER_DIV_1 (0 << 12)
+#define MG_PLL_DIV1_DITHER_DIV_2 (1 << 12)
+#define MG_PLL_DIV1_DITHER_DIV_4 (2 << 12)
+#define MG_PLL_DIV1_DITHER_DIV_8 (3 << 12)
+#define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4)
+#define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0)
+#define MG_PLL_DIV1(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV1_PORT1, \
+ _MG_PLL_DIV1_PORT2)
+
+#define _MG_PLL_LF_PORT1 0x168A08
+#define _MG_PLL_LF_PORT2 0x169A08
+#define _MG_PLL_LF_PORT3 0x16AA08
+#define _MG_PLL_LF_PORT4 0x16BA08
+#define MG_PLL_LF_TDCTARGETCNT(x) ((x) << 24)
+#define MG_PLL_LF_AFCCNTSEL_256 (0 << 20)
+#define MG_PLL_LF_AFCCNTSEL_512 (1 << 20)
+#define MG_PLL_LF_GAINCTRL(x) ((x) << 16)
+#define MG_PLL_LF_INT_COEFF(x) ((x) << 8)
+#define MG_PLL_LF_PROP_COEFF(x) ((x) << 0)
+#define MG_PLL_LF(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_LF_PORT1, \
+ _MG_PLL_LF_PORT2)
+
+#define _MG_PLL_FRAC_LOCK_PORT1 0x168A0C
+#define _MG_PLL_FRAC_LOCK_PORT2 0x169A0C
+#define _MG_PLL_FRAC_LOCK_PORT3 0x16AA0C
+#define _MG_PLL_FRAC_LOCK_PORT4 0x16BA0C
+#define MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 (1 << 18)
+#define MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 (1 << 16)
+#define MG_PLL_FRAC_LOCK_LOCKTHRESH(x) ((x) << 11)
+#define MG_PLL_FRAC_LOCK_DCODITHEREN (1 << 10)
+#define MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN (1 << 8)
+#define MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(x) ((x) << 0)
+#define MG_PLL_FRAC_LOCK(port) _MMIO_PORT((port) - PORT_C, \
+ _MG_PLL_FRAC_LOCK_PORT1, \
+ _MG_PLL_FRAC_LOCK_PORT2)
+
+#define _MG_PLL_SSC_PORT1 0x168A10
+#define _MG_PLL_SSC_PORT2 0x169A10
+#define _MG_PLL_SSC_PORT3 0x16AA10
+#define _MG_PLL_SSC_PORT4 0x16BA10
+#define MG_PLL_SSC_EN (1 << 28)
+#define MG_PLL_SSC_TYPE(x) ((x) << 26)
+#define MG_PLL_SSC_STEPLENGTH(x) ((x) << 16)
+#define MG_PLL_SSC_STEPNUM(x) ((x) << 10)
+#define MG_PLL_SSC_FLLEN (1 << 9)
+#define MG_PLL_SSC_STEPSIZE(x) ((x) << 0)
+#define MG_PLL_SSC(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_SSC_PORT1, \
+ _MG_PLL_SSC_PORT2)
+
+#define _MG_PLL_BIAS_PORT1 0x168A14
+#define _MG_PLL_BIAS_PORT2 0x169A14
+#define _MG_PLL_BIAS_PORT3 0x16AA14
+#define _MG_PLL_BIAS_PORT4 0x16BA14
+#define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30)
+#define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24)
+#define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16)
+#define MG_PLL_BIAS_BIASCAL_EN (1 << 15)
+#define MG_PLL_BIAS_CTRIM(x) ((x) << 8)
+#define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5)
+#define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0)
+#define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \
+ _MG_PLL_BIAS_PORT2)
+
+#define _MG_PLL_TDC_COLDST_BIAS_PORT1 0x168A18
+#define _MG_PLL_TDC_COLDST_BIAS_PORT2 0x169A18
+#define _MG_PLL_TDC_COLDST_BIAS_PORT3 0x16AA18
+#define _MG_PLL_TDC_COLDST_BIAS_PORT4 0x16BA18
+#define MG_PLL_TDC_COLDST_IREFINT_EN (1 << 27)
+#define MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(x) ((x) << 17)
+#define MG_PLL_TDC_COLDST_COLDSTART (1 << 16)
+#define MG_PLL_TDC_TDCOVCCORR_EN (1 << 2)
+#define MG_PLL_TDC_TDCSEL(x) ((x) << 0)
+#define MG_PLL_TDC_COLDST_BIAS(port) _MMIO_PORT((port) - PORT_C, \
+ _MG_PLL_TDC_COLDST_BIAS_PORT1, \
+ _MG_PLL_TDC_COLDST_BIAS_PORT2)
+
#define _CNL_DPLL0_CFGCR0 0x6C000
#define _CNL_DPLL1_CFGCR0 0x6C080
#define DPLL_CFGCR0_HDMI_MODE (1 << 30)
#define DPLL_CFGCR0_SSC_ENABLE (1 << 29)
+#define DPLL_CFGCR0_SSC_ENABLE_ICL (1 << 25)
#define DPLL_CFGCR0_LINK_RATE_MASK (0xf << 25)
#define DPLL_CFGCR0_LINK_RATE_2700 (0 << 25)
#define DPLL_CFGCR0_LINK_RATE_1350 (1 << 25)
@@ -9020,8 +9118,19 @@ enum skl_power_gate {
#define DPLL_CFGCR1_PDIV_5 (4 << 2)
#define DPLL_CFGCR1_PDIV_7 (8 << 2)
#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
+#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
#define CNL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1)
+#define _ICL_DPLL0_CFGCR0 0x164000
+#define _ICL_DPLL1_CFGCR0 0x164080
+#define ICL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
+ _ICL_DPLL1_CFGCR0)
+
+#define _ICL_DPLL0_CFGCR1 0x164004
+#define _ICL_DPLL1_CFGCR1 0x164084
+#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
+ _ICL_DPLL1_CFGCR1)
+
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
@@ -9793,6 +9902,13 @@ enum skl_power_gate {
#define GEN9_MFX1_MOCS(i) _MMIO(0xca00 + (i) * 4) /* Media 1 MOCS registers */
#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
+/* Media decoder 2 MOCS registers */
+#define GEN11_MFX2_MOCS(i) _MMIO(0x10000 + (i) * 4)
+
+#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
+#define PMFLUSHDONE_LNICRSDROP (1 << 20)
+#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
+#define PMFLUSHDONE_LNEBLK (1 << 22)
/* gamt regs */
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 282f57630cc1..8928894dd9c7 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -49,7 +49,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled";
- return to_request(fence)->timeline->common->name;
+ return to_request(fence)->timeline->name;
}
static bool i915_fence_signaled(struct dma_fence *fence)
@@ -59,11 +59,7 @@ static bool i915_fence_signaled(struct dma_fence *fence)
static bool i915_fence_enable_signaling(struct dma_fence *fence)
{
- if (i915_fence_signaled(fence))
- return false;
-
- intel_engine_enable_signaling(to_request(fence), true);
- return !i915_fence_signaled(fence);
+ return intel_engine_enable_signaling(to_request(fence), true);
}
static signed long i915_fence_wait(struct dma_fence *fence,
@@ -129,22 +125,22 @@ i915_dependency_free(struct drm_i915_private *i915,
}
static void
-__i915_priotree_add_dependency(struct i915_priotree *pt,
- struct i915_priotree *signal,
- struct i915_dependency *dep,
- unsigned long flags)
+__i915_sched_node_add_dependency(struct i915_sched_node *node,
+ struct i915_sched_node *signal,
+ struct i915_dependency *dep,
+ unsigned long flags)
{
INIT_LIST_HEAD(&dep->dfs_link);
list_add(&dep->wait_link, &signal->waiters_list);
- list_add(&dep->signal_link, &pt->signalers_list);
+ list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
dep->flags = flags;
}
static int
-i915_priotree_add_dependency(struct drm_i915_private *i915,
- struct i915_priotree *pt,
- struct i915_priotree *signal)
+i915_sched_node_add_dependency(struct drm_i915_private *i915,
+ struct i915_sched_node *node,
+ struct i915_sched_node *signal)
{
struct i915_dependency *dep;
@@ -152,16 +148,18 @@ i915_priotree_add_dependency(struct drm_i915_private *i915,
if (!dep)
return -ENOMEM;
- __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
+ __i915_sched_node_add_dependency(node, signal, dep,
+ I915_DEPENDENCY_ALLOC);
return 0;
}
static void
-i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
+i915_sched_node_fini(struct drm_i915_private *i915,
+ struct i915_sched_node *node)
{
- struct i915_dependency *dep, *next;
+ struct i915_dependency *dep, *tmp;
- GEM_BUG_ON(!list_empty(&pt->link));
+ GEM_BUG_ON(!list_empty(&node->link));
/*
* Everyone we depended upon (the fences we wait to be signaled)
@@ -169,8 +167,8 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
* However, retirement is run independently on each timeline and
* so we may be called out-of-order.
*/
- list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
- GEM_BUG_ON(!i915_priotree_signaled(dep->signaler));
+ list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
+ GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->wait_link);
@@ -179,8 +177,8 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
}
/* Remove ourselves from everyone who depends upon us */
- list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
- GEM_BUG_ON(dep->signaler != pt);
+ list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
+ GEM_BUG_ON(dep->signaler != node);
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->signal_link);
@@ -190,17 +188,18 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
}
static void
-i915_priotree_init(struct i915_priotree *pt)
+i915_sched_node_init(struct i915_sched_node *node)
{
- INIT_LIST_HEAD(&pt->signalers_list);
- INIT_LIST_HEAD(&pt->waiters_list);
- INIT_LIST_HEAD(&pt->link);
- pt->priority = I915_PRIORITY_INVALID;
+ INIT_LIST_HEAD(&node->signalers_list);
+ INIT_LIST_HEAD(&node->waiters_list);
+ INIT_LIST_HEAD(&node->link);
+ node->attr.priority = I915_PRIORITY_INVALID;
}
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct intel_engine_cs *engine;
+ struct i915_timeline *timeline;
enum intel_engine_id id;
int ret;
@@ -211,30 +210,37 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
if (ret)
return ret;
+ GEM_BUG_ON(i915->gt.active_requests);
+
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
for_each_engine(engine, i915, id) {
- struct i915_gem_timeline *timeline;
- struct intel_timeline *tl = engine->timeline;
+ GEM_TRACE("%s seqno %d (current %d) -> %d\n",
+ engine->name,
+ engine->timeline.seqno,
+ intel_engine_get_seqno(engine),
+ seqno);
- if (!i915_seqno_passed(seqno, tl->seqno)) {
+ if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
/* Flush any waiters before we reuse the seqno */
intel_engine_disarm_breadcrumbs(engine);
+ intel_engine_init_hangcheck(engine);
GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
}
/* Check we are idle before we fiddle with hw state! */
GEM_BUG_ON(!intel_engine_is_idle(engine));
- GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+ GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
/* Finally reset hw state */
intel_engine_init_global_seqno(engine, seqno);
- tl->seqno = seqno;
-
- list_for_each_entry(timeline, &i915->gt.timelines, link)
- memset(timeline->engine[id].global_sync, 0,
- sizeof(timeline->engine[id].global_sync));
+ engine->timeline.seqno = seqno;
}
+ list_for_each_entry(timeline, &i915->gt.timelines, link)
+ memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
+
+ i915->gt.request_serial = seqno;
+
return 0;
}
@@ -251,83 +257,37 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
return reset_all_global_seqno(i915, seqno - 1);
}
-static void mark_busy(struct drm_i915_private *i915)
+static int reserve_gt(struct drm_i915_private *i915)
{
- if (i915->gt.awake)
- return;
-
- GEM_BUG_ON(!i915->gt.active_requests);
-
- intel_runtime_pm_get_noresume(i915);
+ int ret;
/*
- * It seems that the DMC likes to transition between the DC states a lot
- * when there are no connected displays (no active power domains) during
- * command submission.
- *
- * This activity has negative impact on the performance of the chip with
- * huge latencies observed in the interrupt handler and elsewhere.
+ * Reservation is fine until we may need to wrap around
*
- * Work around it by grabbing a GT IRQ power domain whilst there is any
- * GT activity, preventing any DC state transitions.
+ * By incrementing the serial for every request, we know that no
+ * individual engine may exceed that serial (as each is reset to 0
+ * on any wrap). This protects even the most pessimistic of migrations
+ * of every request from all engines onto just one.
*/
- intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
-
- i915->gt.awake = true;
- if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
- i915->gt.epoch = 1;
-
- intel_enable_gt_powersave(i915);
- i915_update_gfx_val(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_busy(i915);
- i915_pmu_gt_unparked(i915);
-
- intel_engines_unpark(i915);
-
- i915_queue_hangcheck(i915);
-
- queue_delayed_work(i915->wq,
- &i915->gt.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
-static int reserve_engine(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *i915 = engine->i915;
- u32 active = ++engine->timeline->inflight_seqnos;
- u32 seqno = engine->timeline->seqno;
- int ret;
-
- /* Reservation is fine until we need to wrap around */
- if (unlikely(add_overflows(seqno, active))) {
+ while (unlikely(++i915->gt.request_serial == 0)) {
ret = reset_all_global_seqno(i915, 0);
if (ret) {
- engine->timeline->inflight_seqnos--;
+ i915->gt.request_serial--;
return ret;
}
}
if (!i915->gt.active_requests++)
- mark_busy(i915);
+ i915_gem_unpark(i915);
return 0;
}
-static void unreserve_engine(struct intel_engine_cs *engine)
+static void unreserve_gt(struct drm_i915_private *i915)
{
- struct drm_i915_private *i915 = engine->i915;
-
- if (!--i915->gt.active_requests) {
- /* Cancel the mark_busy() from our reserve_engine() */
- GEM_BUG_ON(!i915->gt.awake);
- mod_delayed_work(i915->wq,
- &i915->gt.idle_work,
- msecs_to_jiffies(100));
- }
-
- GEM_BUG_ON(!engine->timeline->inflight_seqnos);
- engine->timeline->inflight_seqnos--;
+ GEM_BUG_ON(!i915->gt.active_requests);
+ if (!--i915->gt.active_requests)
+ i915_gem_park(i915);
}
void i915_gem_retire_noop(struct i915_gem_active *active,
@@ -338,6 +298,7 @@ void i915_gem_retire_noop(struct i915_gem_active *active,
static void advance_ring(struct i915_request *request)
{
+ struct intel_ring *ring = request->ring;
unsigned int tail;
/*
@@ -349,7 +310,8 @@ static void advance_ring(struct i915_request *request)
* Note this requires that we are always called in request
* completion order.
*/
- if (list_is_last(&request->ring_link, &request->ring->request_list)) {
+ GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
+ if (list_is_last(&request->ring_link, &ring->request_list)) {
/*
* We may race here with execlists resubmitting this request
* as we retire it. The resubmission will move the ring->tail
@@ -358,13 +320,14 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset.
*/
- tail = READ_ONCE(request->ring->tail);
+ tail = READ_ONCE(request->tail);
+ list_del(&ring->active_link);
} else {
tail = request->postfix;
}
- list_del(&request->ring_link);
+ list_del_init(&request->ring_link);
- request->ring->head = tail;
+ ring->head = tail;
}
static void free_capture_list(struct i915_request *request)
@@ -380,25 +343,84 @@ static void free_capture_list(struct i915_request *request)
}
}
+static void __retire_engine_request(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
+ __func__, engine->name,
+ rq->fence.context, rq->fence.seqno,
+ rq->global_seqno,
+ intel_engine_get_seqno(engine));
+
+ GEM_BUG_ON(!i915_request_completed(rq));
+
+ local_irq_disable();
+
+ spin_lock(&engine->timeline.lock);
+ GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
+ list_del_init(&rq->link);
+ spin_unlock(&engine->timeline.lock);
+
+ spin_lock(&rq->lock);
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ dma_fence_signal_locked(&rq->fence);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
+ intel_engine_cancel_signaling(rq);
+ if (rq->waitboost) {
+ GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
+ atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
+ }
+ spin_unlock(&rq->lock);
+
+ local_irq_enable();
+
+ /*
+ * The backing object for the context is done after switching to the
+ * *next* context. Therefore we cannot retire the previous context until
+ * the next context has already started running. However, since we
+ * cannot take the required locks at i915_request_submit() we
+ * defer the unpinning of the active context to now, retirement of
+ * the subsequent request.
+ */
+ if (engine->last_retired_context)
+ intel_context_unpin(engine->last_retired_context, engine);
+ engine->last_retired_context = rq->ctx;
+}
+
+static void __retire_engine_upto(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ struct i915_request *tmp;
+
+ if (list_empty(&rq->link))
+ return;
+
+ do {
+ tmp = list_first_entry(&engine->timeline.requests,
+ typeof(*tmp), link);
+
+ GEM_BUG_ON(tmp->engine != engine);
+ __retire_engine_request(engine, tmp);
+ } while (tmp != rq);
+}
+
static void i915_request_retire(struct i915_request *request)
{
- struct intel_engine_cs *engine = request->engine;
struct i915_gem_active *active, *next;
+ GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
+ request->engine->name,
+ request->fence.context, request->fence.seqno,
+ request->global_seqno,
+ intel_engine_get_seqno(request->engine));
+
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
GEM_BUG_ON(!i915_request_completed(request));
- GEM_BUG_ON(!request->i915->gt.active_requests);
trace_i915_request_retire(request);
- spin_lock_irq(&engine->timeline->lock);
- list_del_init(&request->link);
- spin_unlock_irq(&engine->timeline->lock);
-
- unreserve_engine(request->engine);
advance_ring(request);
-
free_capture_list(request);
/*
@@ -434,73 +456,74 @@ static void i915_request_retire(struct i915_request *request)
/* Retirement decays the ban score as it is a sign of ctx progress */
atomic_dec_if_positive(&request->ctx->ban_score);
+ intel_context_unpin(request->ctx, request->engine);
- /*
- * The backing object for the context is done after switching to the
- * *next* context. Therefore we cannot retire the previous context until
- * the next context has already started running. However, since we
- * cannot take the required locks at i915_request_submit() we
- * defer the unpinning of the active context to now, retirement of
- * the subsequent request.
- */
- if (engine->last_retired_context)
- engine->context_unpin(engine, engine->last_retired_context);
- engine->last_retired_context = request->ctx;
+ __retire_engine_upto(request->engine, request);
- spin_lock_irq(&request->lock);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags))
- dma_fence_signal_locked(&request->fence);
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
- intel_engine_cancel_signaling(request);
- if (request->waitboost) {
- GEM_BUG_ON(!atomic_read(&request->i915->gt_pm.rps.num_waiters));
- atomic_dec(&request->i915->gt_pm.rps.num_waiters);
- }
- spin_unlock_irq(&request->lock);
+ unreserve_gt(request->i915);
- i915_priotree_fini(request->i915, &request->priotree);
+ i915_sched_node_fini(request->i915, &request->sched);
i915_request_put(request);
}
void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
+ struct intel_ring *ring = rq->ring;
struct i915_request *tmp;
+ GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
+ rq->engine->name,
+ rq->fence.context, rq->fence.seqno,
+ rq->global_seqno,
+ intel_engine_get_seqno(rq->engine));
+
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_request_completed(rq));
- if (list_empty(&rq->link))
+ if (list_empty(&rq->ring_link))
return;
do {
- tmp = list_first_entry(&engine->timeline->requests,
- typeof(*tmp), link);
+ tmp = list_first_entry(&ring->request_list,
+ typeof(*tmp), ring_link);
i915_request_retire(tmp);
} while (tmp != rq);
}
-static u32 timeline_get_seqno(struct intel_timeline *tl)
+static u32 timeline_get_seqno(struct i915_timeline *tl)
{
return ++tl->seqno;
}
+static void move_to_timeline(struct i915_request *request,
+ struct i915_timeline *timeline)
+{
+ GEM_BUG_ON(request->timeline == &request->engine->timeline);
+ lockdep_assert_held(&request->engine->timeline.lock);
+
+ spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
+ list_move_tail(&request->link, &timeline->requests);
+ spin_unlock(&request->timeline->lock);
+}
+
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct intel_timeline *timeline;
u32 seqno;
+ GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
+ engine->name,
+ request->fence.context, request->fence.seqno,
+ engine->timeline.seqno + 1,
+ intel_engine_get_seqno(engine));
+
GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&engine->timeline->lock);
+ lockdep_assert_held(&engine->timeline.lock);
- /* Transfer from per-context onto the global per-engine timeline */
- timeline = engine->timeline;
- GEM_BUG_ON(timeline == request->timeline);
GEM_BUG_ON(request->global_seqno);
- seqno = timeline_get_seqno(timeline);
+ seqno = timeline_get_seqno(&engine->timeline);
GEM_BUG_ON(!seqno);
GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
@@ -514,9 +537,8 @@ void __i915_request_submit(struct i915_request *request)
engine->emit_breadcrumb(request,
request->ring->vaddr + request->postfix);
- spin_lock(&request->timeline->lock);
- list_move_tail(&request->link, &timeline->requests);
- spin_unlock(&request->timeline->lock);
+ /* Transfer from per-context onto the global per-engine timeline */
+ move_to_timeline(request, &engine->timeline);
trace_i915_request_execute(request);
@@ -529,30 +551,35 @@ void i915_request_submit(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
__i915_request_submit(request);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct intel_timeline *timeline;
+
+ GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
+ engine->name,
+ request->fence.context, request->fence.seqno,
+ request->global_seqno,
+ intel_engine_get_seqno(engine));
GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&engine->timeline->lock);
+ lockdep_assert_held(&engine->timeline.lock);
/*
* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
GEM_BUG_ON(!request->global_seqno);
- GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
+ GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
request->global_seqno));
- engine->timeline->seqno--;
+ engine->timeline.seqno--;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
@@ -562,12 +589,7 @@ void __i915_request_unsubmit(struct i915_request *request)
spin_unlock(&request->lock);
/* Transfer back from the global per-engine timeline to per-context */
- timeline = request->timeline;
- GEM_BUG_ON(timeline == engine->timeline);
-
- spin_lock(&timeline->lock);
- list_move(&request->link, &timeline->requests);
- spin_unlock(&timeline->lock);
+ move_to_timeline(request, request->timeline);
/*
* We don't need to wake_up any waiters on request->execute, they
@@ -584,11 +606,11 @@ void i915_request_unsubmit(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
__i915_request_unsubmit(request);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static int __i915_sw_fence_call
@@ -659,12 +681,12 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ring = engine->context_pin(engine, ctx);
+ ring = intel_context_pin(ctx, engine);
if (IS_ERR(ring))
return ERR_CAST(ring);
GEM_BUG_ON(!ring);
- ret = reserve_engine(engine);
+ ret = reserve_gt(i915);
if (ret)
goto err_unpin;
@@ -672,10 +694,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (ret)
goto err_unreserve;
- /* Move the oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry_or_null(&engine->timeline->requests,
- typeof(*rq), link);
- if (rq && i915_request_completed(rq))
+ /* Move our oldest request to the slab-cache (if not in use!) */
+ rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+ if (!list_is_last(&rq->ring_link, &ring->request_list) &&
+ i915_request_completed(rq))
i915_request_retire(rq);
/*
@@ -735,8 +757,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
}
}
- rq->timeline = i915_gem_context_lookup_timeline(ctx, engine);
- GEM_BUG_ON(rq->timeline == engine->timeline);
+ INIT_LIST_HEAD(&rq->active_list);
+ rq->i915 = i915;
+ rq->engine = engine;
+ rq->ctx = ctx;
+ rq->ring = ring;
+ rq->timeline = ring->timeline;
+ GEM_BUG_ON(rq->timeline == &engine->timeline);
spin_lock_init(&rq->lock);
dma_fence_init(&rq->fence,
@@ -749,13 +776,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
init_waitqueue_head(&rq->execute);
- i915_priotree_init(&rq->priotree);
-
- INIT_LIST_HEAD(&rq->active_list);
- rq->i915 = i915;
- rq->engine = engine;
- rq->ctx = ctx;
- rq->ring = ring;
+ i915_sched_node_init(&rq->sched);
/* No zalloc, must clear what we need by hand */
rq->global_seqno = 0;
@@ -792,6 +813,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (ret)
goto err_unwind;
+ /* Keep a second pin for the dual retirement along engine and ring */
+ __intel_context_pin(rq->ctx, engine);
+
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
return rq;
@@ -801,14 +825,14 @@ err_unwind:
/* Make sure we didn't add ourselves to external state before freeing */
GEM_BUG_ON(!list_empty(&rq->active_list));
- GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list));
- GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list));
+ GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
+ GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
kmem_cache_free(i915->requests, rq);
err_unreserve:
- unreserve_engine(engine);
+ unreserve_gt(i915);
err_unpin:
- engine->context_unpin(engine, ctx);
+ intel_context_unpin(ctx, engine);
return ERR_PTR(ret);
}
@@ -824,9 +848,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return 0;
if (to->engine->schedule) {
- ret = i915_priotree_add_dependency(to->i915,
- &to->priotree,
- &from->priotree);
+ ret = i915_sched_node_add_dependency(to->i915,
+ &to->sched,
+ &from->sched);
if (ret < 0)
return ret;
}
@@ -904,7 +928,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
/* Squash repeated waits to the same timelines */
if (fence->context != rq->i915->mm.unordered_timeline &&
- intel_timeline_sync_is_later(rq->timeline, fence))
+ i915_timeline_sync_is_later(rq->timeline, fence))
continue;
if (dma_fence_is_i915(fence))
@@ -918,7 +942,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
/* Record the latest fence used against each timeline */
if (fence->context != rq->i915->mm.unordered_timeline)
- intel_timeline_sync_set(rq->timeline, fence);
+ i915_timeline_sync_set(rq->timeline, fence);
} while (--nchild);
return 0;
@@ -995,11 +1019,14 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
{
struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
- struct intel_timeline *timeline = request->timeline;
+ struct i915_timeline *timeline = request->timeline;
struct i915_request *prev;
u32 *cs;
int err;
+ GEM_TRACE("%s fence %llx:%d\n",
+ engine->name, request->fence.context, request->fence.seqno);
+
lockdep_assert_held(&request->i915->drm.struct_mutex);
trace_i915_request_add(request);
@@ -1054,10 +1081,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
if (engine->schedule)
- __i915_priotree_add_dependency(&request->priotree,
- &prev->priotree,
- &request->dep,
- 0);
+ __i915_sched_node_add_dependency(&request->sched,
+ &prev->sched,
+ &request->dep,
+ 0);
}
spin_lock_irq(&timeline->lock);
@@ -1068,6 +1095,8 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
i915_gem_active_set(&timeline->last_request, request);
list_add_tail(&request->ring_link, &ring->request_list);
+ if (list_is_first(&request->ring_link, &ring->request_list))
+ list_add(&ring->active_link, &request->i915->gt.active_rings);
request->emitted_jiffies = jiffies;
/*
@@ -1081,12 +1110,11 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
- rcu_read_lock();
+ local_bh_disable();
+ rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
- engine->schedule(request, request->ctx->priority);
+ engine->schedule(request, &request->ctx->sched);
rcu_read_unlock();
-
- local_bh_disable();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1206,11 +1234,13 @@ static bool __i915_spin_request(const struct i915_request *rq,
static bool __i915_wait_request_check_and_reset(struct i915_request *request)
{
- if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
+ struct i915_gpu_error *error = &request->i915->gpu_error;
+
+ if (likely(!i915_reset_handoff(error)))
return false;
__set_current_state(TASK_RUNNING);
- i915_reset(request->i915, 0);
+ i915_reset(request->i915, error->stalled_mask, error->reason);
return true;
}
@@ -1373,38 +1403,30 @@ complete:
return timeout;
}
-static void engine_retire_requests(struct intel_engine_cs *engine)
+static void ring_retire_requests(struct intel_ring *ring)
{
struct i915_request *request, *next;
- u32 seqno = intel_engine_get_seqno(engine);
- LIST_HEAD(retire);
- spin_lock_irq(&engine->timeline->lock);
list_for_each_entry_safe(request, next,
- &engine->timeline->requests, link) {
- if (!i915_seqno_passed(seqno, request->global_seqno))
+ &ring->request_list, ring_link) {
+ if (!i915_request_completed(request))
break;
- list_move_tail(&request->link, &retire);
- }
- spin_unlock_irq(&engine->timeline->lock);
-
- list_for_each_entry_safe(request, next, &retire, link)
i915_request_retire(request);
+ }
}
void i915_retire_requests(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct intel_ring *ring, *tmp;
lockdep_assert_held(&i915->drm.struct_mutex);
if (!i915->gt.active_requests)
return;
- for_each_engine(engine, i915, id)
- engine_retire_requests(engine);
+ list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
+ ring_retire_requests(ring);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7d6eb82eeb91..eddbd4245cb3 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,13 +28,16 @@
#include <linux/dma-fence.h>
#include "i915_gem.h"
+#include "i915_scheduler.h"
#include "i915_sw_fence.h"
+#include "i915_scheduler.h"
#include <uapi/drm/i915_drm.h>
struct drm_file;
struct drm_i915_gem_object;
struct i915_request;
+struct i915_timeline;
struct intel_wait {
struct rb_node node;
@@ -48,44 +51,6 @@ struct intel_signal_node {
struct list_head link;
};
-struct i915_dependency {
- struct i915_priotree *signaler;
- struct list_head signal_link;
- struct list_head wait_link;
- struct list_head dfs_link;
- unsigned long flags;
-#define I915_DEPENDENCY_ALLOC BIT(0)
-};
-
-/*
- * "People assume that time is a strict progression of cause to effect, but
- * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
- * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
- *
- * Requests exist in a complex web of interdependencies. Each request
- * has to wait for some other request to complete before it is ready to be run
- * (e.g. we have to wait until the pixels have been rendering into a texture
- * before we can copy from it). We track the readiness of a request in terms
- * of fences, but we also need to keep the dependency tree for the lifetime
- * of the request (beyond the life of an individual fence). We use the tree
- * at various points to reorder the requests whilst keeping the requests
- * in order with respect to their various dependencies.
- */
-struct i915_priotree {
- struct list_head signalers_list; /* those before us, we depend upon */
- struct list_head waiters_list; /* those after us, they depend upon us */
- struct list_head link;
- int priority;
-};
-
-enum {
- I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
- I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
- I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
- I915_PRIORITY_INVALID = INT_MIN
-};
-
struct i915_capture_list {
struct i915_capture_list *next;
struct i915_vma *vma;
@@ -131,7 +96,7 @@ struct i915_request {
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
struct intel_ring *ring;
- struct intel_timeline *timeline;
+ struct i915_timeline *timeline;
struct intel_signal_node signaling;
/*
@@ -154,7 +119,7 @@ struct i915_request {
* to retirement), i.e. bidirectional dependency information for the
* request not tied to individual fences.
*/
- struct i915_priotree priotree;
+ struct i915_sched_node sched;
struct i915_dependency dep;
/**
@@ -343,10 +308,10 @@ static inline bool i915_request_started(const struct i915_request *rq)
seqno - 1);
}
-static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
+static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
{
const struct i915_request *rq =
- container_of(pt, const struct i915_request, priotree);
+ container_of(node, const struct i915_request, sched);
return i915_request_completed(rq);
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
new file mode 100644
index 000000000000..70a42220358d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -0,0 +1,72 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_SCHEDULER_H_
+#define _I915_SCHEDULER_H_
+
+#include <linux/bitops.h>
+
+#include <uapi/drm/i915_drm.h>
+
+enum {
+ I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+ I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+ I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+ I915_PRIORITY_INVALID = INT_MIN
+};
+
+struct i915_sched_attr {
+ /**
+ * @priority: execution and service priority
+ *
+ * All clients are equal, but some are more equal than others!
+ *
+ * Requests from a context with a greater (more positive) value of
+ * @priority will be executed before those with a lower @priority
+ * value, forming a simple QoS.
+ *
+ * The &drm_i915_private.kernel_context is assigned the lowest priority.
+ */
+ int priority;
+};
+
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ *
+ * There is no active component to the "scheduler". As we know the dependency
+ * DAG of each request, we are able to insert it into a sorted queue when it
+ * is ready, and are able to reorder its portion of the graph to accommodate
+ * dynamic priority changes.
+ */
+struct i915_sched_node {
+ struct list_head signalers_list; /* those before us, we depend upon */
+ struct list_head waiters_list; /* those after us, they depend upon us */
+ struct list_head link;
+ struct i915_sched_attr attr;
+};
+
+struct i915_dependency {
+ struct i915_sched_node *signaler;
+ struct list_head signal_link;
+ struct list_head wait_link;
+ struct list_head dfs_link;
+ unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
new file mode 100644
index 000000000000..4667cc08c416
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -0,0 +1,105 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016-2018 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "i915_timeline.h"
+#include "i915_syncmap.h"
+
+void i915_timeline_init(struct drm_i915_private *i915,
+ struct i915_timeline *timeline,
+ const char *name)
+{
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ /*
+ * Ideally we want a set of engines on a single leaf as we expect
+ * to mostly be tracking synchronisation between engines. It is not
+ * a huge issue if this is not the case, but we may want to mitigate
+ * any page crossing penalties if they become an issue.
+ */
+ BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
+
+ timeline->name = name;
+
+ list_add(&timeline->link, &i915->gt.timelines);
+
+ /* Called during early_init before we know how many engines there are */
+
+ timeline->fence_context = dma_fence_context_alloc(1);
+
+ spin_lock_init(&timeline->lock);
+
+ init_request_active(&timeline->last_request, NULL);
+ INIT_LIST_HEAD(&timeline->requests);
+
+ i915_syncmap_init(&timeline->sync);
+}
+
+/**
+ * i915_timelines_park - called when the driver idles
+ * @i915: the drm_i915_private device
+ *
+ * When the driver is completely idle, we know that all of our sync points
+ * have been signaled and our tracking is then entirely redundant. Any request
+ * to wait upon an older sync point will be completed instantly as we know
+ * the fence is signaled and therefore we will not even look them up in the
+ * sync point map.
+ */
+void i915_timelines_park(struct drm_i915_private *i915)
+{
+ struct i915_timeline *timeline;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ list_for_each_entry(timeline, &i915->gt.timelines, link) {
+ /*
+ * All known fences are completed so we can scrap
+ * the current sync point tracking and start afresh,
+ * any attempt to wait upon a previous sync point
+ * will be skipped as the fence was signaled.
+ */
+ i915_syncmap_free(&timeline->sync);
+ }
+}
+
+void i915_timeline_fini(struct i915_timeline *timeline)
+{
+ GEM_BUG_ON(!list_empty(&timeline->requests));
+
+ i915_syncmap_free(&timeline->sync);
+
+ list_del(&timeline->link);
+}
+
+struct i915_timeline *
+i915_timeline_create(struct drm_i915_private *i915, const char *name)
+{
+ struct i915_timeline *timeline;
+
+ timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
+ if (!timeline)
+ return ERR_PTR(-ENOMEM);
+
+ i915_timeline_init(i915, timeline, name);
+ kref_init(&timeline->kref);
+
+ return timeline;
+}
+
+void __i915_timeline_free(struct kref *kref)
+{
+ struct i915_timeline *timeline =
+ container_of(kref, typeof(*timeline), kref);
+
+ i915_timeline_fini(timeline);
+ kfree(timeline);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_timeline.c"
+#include "selftests/i915_timeline.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index 33e01bf6aa36..dc2a4632faa7 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -22,27 +22,20 @@
*
*/
-#ifndef I915_GEM_TIMELINE_H
-#define I915_GEM_TIMELINE_H
+#ifndef I915_TIMELINE_H
+#define I915_TIMELINE_H
#include <linux/list.h>
+#include <linux/kref.h>
#include "i915_request.h"
#include "i915_syncmap.h"
#include "i915_utils.h"
-struct i915_gem_timeline;
-
-struct intel_timeline {
+struct i915_timeline {
u64 fence_context;
u32 seqno;
- /**
- * Count of outstanding requests, from the time they are constructed
- * to the moment they are retired. Loosely coupled to hardware.
- */
- u32 inflight_seqnos;
-
spinlock_t lock;
/**
@@ -77,47 +70,57 @@ struct intel_timeline {
*/
u32 global_sync[I915_NUM_ENGINES];
- struct i915_gem_timeline *common;
-};
-
-struct i915_gem_timeline {
struct list_head link;
-
- struct drm_i915_private *i915;
const char *name;
- struct intel_timeline engine[I915_NUM_ENGINES];
+ struct kref kref;
};
-int i915_gem_timeline_init(struct drm_i915_private *i915,
- struct i915_gem_timeline *tl,
- const char *name);
-int i915_gem_timeline_init__global(struct drm_i915_private *i915);
-void i915_gem_timelines_park(struct drm_i915_private *i915);
-void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+void i915_timeline_init(struct drm_i915_private *i915,
+ struct i915_timeline *tl,
+ const char *name);
+void i915_timeline_fini(struct i915_timeline *tl);
+
+struct i915_timeline *
+i915_timeline_create(struct drm_i915_private *i915, const char *name);
-static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
- u64 context, u32 seqno)
+static inline struct i915_timeline *
+i915_timeline_get(struct i915_timeline *timeline)
+{
+ kref_get(&timeline->kref);
+ return timeline;
+}
+
+void __i915_timeline_free(struct kref *kref);
+static inline void i915_timeline_put(struct i915_timeline *timeline)
+{
+ kref_put(&timeline->kref, __i915_timeline_free);
+}
+
+static inline int __i915_timeline_sync_set(struct i915_timeline *tl,
+ u64 context, u32 seqno)
{
return i915_syncmap_set(&tl->sync, context, seqno);
}
-static inline int intel_timeline_sync_set(struct intel_timeline *tl,
- const struct dma_fence *fence)
+static inline int i915_timeline_sync_set(struct i915_timeline *tl,
+ const struct dma_fence *fence)
{
- return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
+ return __i915_timeline_sync_set(tl, fence->context, fence->seqno);
}
-static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
- u64 context, u32 seqno)
+static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl,
+ u64 context, u32 seqno)
{
return i915_syncmap_is_later(&tl->sync, context, seqno);
}
-static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
- const struct dma_fence *fence)
+static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
+ const struct dma_fence *fence)
{
- return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
+ return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
}
+void i915_timelines_park(struct drm_i915_private *i915);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 408827bf5d96..8cc3a256f29d 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -679,45 +679,68 @@ DEFINE_EVENT(i915_request, i915_request_execute,
TP_ARGS(rq)
);
-DECLARE_EVENT_CLASS(i915_request_hw,
- TP_PROTO(struct i915_request *rq, unsigned int port),
- TP_ARGS(rq, port),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
- __field(u32, seqno)
- __field(u32, global_seqno)
- __field(u32, port)
- ),
-
- TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
- __entry->ctx = rq->fence.context;
- __entry->seqno = rq->fence.seqno;
- __entry->global_seqno = rq->global_seqno;
- __entry->port = port;
- ),
-
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
- __entry->dev, __entry->hw_id, __entry->ring,
- __entry->ctx, __entry->seqno,
- __entry->global_seqno, __entry->port)
-);
+TRACE_EVENT(i915_request_in,
+ TP_PROTO(struct i915_request *rq, unsigned int port),
+ TP_ARGS(rq, port),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, hw_id)
+ __field(u32, ring)
+ __field(u32, ctx)
+ __field(u32, seqno)
+ __field(u32, global_seqno)
+ __field(u32, port)
+ __field(u32, prio)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global_seqno = rq->global_seqno;
+ __entry->prio = rq->sched.attr.priority;
+ __entry->port = port;
+ ),
-DEFINE_EVENT(i915_request_hw, i915_request_in,
- TP_PROTO(struct i915_request *rq, unsigned int port),
- TP_ARGS(rq, port)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, prio=%u, global=%u, port=%u",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->prio, __entry->global_seqno,
+ __entry->port)
);
-DEFINE_EVENT(i915_request, i915_request_out,
- TP_PROTO(struct i915_request *rq),
- TP_ARGS(rq)
+TRACE_EVENT(i915_request_out,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, hw_id)
+ __field(u32, ring)
+ __field(u32, ctx)
+ __field(u32, seqno)
+ __field(u32, global_seqno)
+ __field(u32, completed)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global_seqno = rq->global_seqno;
+ __entry->completed = i915_request_completed(rq);
+ ),
+
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, completed?=%u",
+ __entry->dev, __entry->hw_id, __entry->ring,
+ __entry->ctx, __entry->seqno,
+ __entry->global_seqno, __entry->completed)
);
+
#else
#if !defined(TRACE_HEADER_MULTI_READ)
static inline void
@@ -811,42 +834,6 @@ DEFINE_EVENT(i915_request, i915_request_wait_end,
TP_ARGS(rq)
);
-TRACE_EVENT(i915_flip_request,
- TP_PROTO(int plane, struct drm_i915_gem_object *obj),
-
- TP_ARGS(plane, obj),
-
- TP_STRUCT__entry(
- __field(int, plane)
- __field(struct drm_i915_gem_object *, obj)
- ),
-
- TP_fast_assign(
- __entry->plane = plane;
- __entry->obj = obj;
- ),
-
- TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
-);
-
-TRACE_EVENT(i915_flip_complete,
- TP_PROTO(int plane, struct drm_i915_gem_object *obj),
-
- TP_ARGS(plane, obj),
-
- TP_STRUCT__entry(
- __field(int, plane)
- __field(struct drm_i915_gem_object *, obj)
- ),
-
- TP_fast_assign(
- __entry->plane = plane;
- __entry->obj = obj;
- ),
-
- TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
-);
-
TRACE_EVENT_CONDITION(i915_reg_rw,
TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 51dbfe5bb418..00165ad55fb3 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -40,8 +40,8 @@
#undef WARN_ON_ONCE
#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
-#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
- (long)(x), __func__)
+#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
+ __stringify(x), (long)(x))
#if GCC_VERSION >= 70000
#define add_overflows(A, B) \
@@ -120,6 +120,12 @@ static inline u64 ptr_to_u64(const void *ptr)
#include <linux/list.h>
+static inline int list_is_first(const struct list_head *list,
+ const struct list_head *head)
+{
+ return head->next == list;
+}
+
static inline void __list_del_many(struct list_head *head,
struct list_head *first)
{
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4bda3bd29bf5..9324d476e0a7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -46,8 +46,6 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
- WARN_ON(i915_vma_unbind(vma));
GEM_BUG_ON(!i915_gem_object_is_active(obj));
if (--obj->active_count)
@@ -232,7 +230,6 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
if (!vma)
vma = vma_create(obj, vm, view);
- GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
return vma;
@@ -684,13 +681,43 @@ err_unpin:
return ret;
}
-static void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_close(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+ GEM_BUG_ON(i915_vma_is_closed(vma));
+ vma->flags |= I915_VMA_CLOSED;
+
+ /*
+ * We defer actually closing, unbinding and destroying the VMA until
+ * the next idle point, or if the object is freed in the meantime. By
+ * postponing the unbind, we allow for it to be resurrected by the
+ * client, avoiding the work required to rebind the VMA. This is
+ * advantageous for DRI, where the client/server pass objects
+ * between themselves, temporarily opening a local VMA to the
+ * object, and then closing it again. The same object is then reused
+ * on the next frame (or two, depending on the depth of the swap queue)
+ * causing us to rebind the VMA once more. This ends up being a lot
+ * of wasted work for the steady state.
+ */
+ list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
+}
+
+void i915_vma_reopen(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+ if (vma->flags & I915_VMA_CLOSED) {
+ vma->flags &= ~I915_VMA_CLOSED;
+ list_del(&vma->closed_link);
+ }
+}
+
+static void __i915_vma_destroy(struct i915_vma *vma)
{
int i;
GEM_BUG_ON(vma->node.allocated);
- GEM_BUG_ON(i915_vma_is_active(vma));
- GEM_BUG_ON(!i915_vma_is_closed(vma));
GEM_BUG_ON(vma->fence);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
@@ -699,6 +726,7 @@ static void i915_vma_destroy(struct i915_vma *vma)
list_del(&vma->obj_link);
list_del(&vma->vm_link);
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
@@ -706,15 +734,30 @@ static void i915_vma_destroy(struct i915_vma *vma)
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
-void i915_vma_close(struct i915_vma *vma)
+void i915_vma_destroy(struct i915_vma *vma)
{
- GEM_BUG_ON(i915_vma_is_closed(vma));
- vma->flags |= I915_VMA_CLOSED;
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
+
+ if (i915_vma_is_closed(vma))
+ list_del(&vma->closed_link);
+
+ WARN_ON(i915_vma_unbind(vma));
+ __i915_vma_destroy(vma);
+}
+
+void i915_vma_parked(struct drm_i915_private *i915)
+{
+ struct i915_vma *vma, *next;
- if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
- WARN_ON(i915_vma_unbind(vma));
+ list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
+ GEM_BUG_ON(!i915_vma_is_closed(vma));
+ i915_vma_destroy(vma);
+ }
+
+ GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -804,7 +847,7 @@ int i915_vma_unbind(struct i915_vma *vma)
return -EBUSY;
if (!drm_mm_node_allocated(&vma->node))
- goto destroy;
+ return 0;
GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -841,10 +884,6 @@ int i915_vma_unbind(struct i915_vma *vma)
i915_vma_remove(vma);
-destroy:
- if (unlikely(i915_vma_is_closed(vma)))
- i915_vma_destroy(vma);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 8c5022095418..fc4294cfaa91 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -119,6 +119,8 @@ struct i915_vma {
/** This vma's place in the eviction list */
struct list_head evict_link;
+ struct list_head closed_link;
+
/**
* Used for performing relocations during execbuffer insertion.
*/
@@ -285,6 +287,8 @@ void i915_vma_revoke_mmap(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
+void i915_vma_reopen(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
@@ -408,6 +412,8 @@ i915_vma_unpin_fence(struct i915_vma *vma)
__i915_vma_unpin_fence(vma);
}
+void i915_vma_parked(struct drm_i915_private *i915);
+
#define for_each_until(cond) if (cond) break; else
/**
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index e9fb692076d7..40285d1b91b7 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -227,6 +227,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct drm_atomic_state *drm_state = crtc_state->base.state;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
int i, j;
@@ -304,8 +305,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
continue;
}
- plane_state = intel_atomic_get_existing_plane_state(drm_state,
- intel_plane);
+ plane_state = intel_atomic_get_new_plane_state(intel_state,
+ intel_plane);
scaler_id = &plane_state->scaler_id;
}
@@ -328,8 +329,18 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
}
/* set scaler mode */
- if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- scaler_state->scalers[*scaler_id].mode = 0;
+ if ((INTEL_GEN(dev_priv) >= 9) &&
+ plane_state && plane_state->base.fb &&
+ plane_state->base.fb->format->format ==
+ DRM_FORMAT_NV12) {
+ if (INTEL_GEN(dev_priv) == 9 &&
+ !IS_GEMINILAKE(dev_priv) &&
+ !IS_SKYLAKE(dev_priv))
+ scaler_state->scalers[*scaler_id].mode =
+ SKL_PS_SCALER_MODE_NV12;
+ else
+ scaler_state->scalers[*scaler_id].mode =
+ PS_SCALER_MODE_PLANAR;
} else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
/*
* when only 1 scaler is in use on either pipe A or B,
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 7481ce85746b..6d068786eb41 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -183,11 +183,16 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
}
/* FIXME pre-g4x don't work like this */
- if (intel_state->base.visible)
+ if (state->visible)
crtc_state->active_planes |= BIT(intel_plane->id);
else
crtc_state->active_planes &= ~BIT(intel_plane->id);
+ if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
+ crtc_state->nv12_planes |= BIT(intel_plane->id);
+ else
+ crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+
return intel_plane_atomic_calc_changes(old_crtc_state,
&crtc_state->base,
old_plane_state,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 447b721c3be9..54270bdde100 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -530,6 +530,7 @@ parse_driver_features(struct drm_i915_private *dev_priv,
*/
if (!driver->drrs_enabled)
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+ dev_priv->vbt.psr.enable = driver->psr_enabled;
}
static void
@@ -1215,10 +1216,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
{
struct child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
- uint8_t hdmi_level_shift;
int i, j;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
- uint8_t aux_channel, ddc_pin;
/* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port.
*/
@@ -1255,8 +1254,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (!child)
return;
- aux_channel = child->aux_channel;
-
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
@@ -1270,13 +1267,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
is_hdmi = false;
}
- if (port == PORT_A && is_dvi) {
- DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
- is_hdmi ? "/HDMI" : "");
- is_dvi = false;
- is_hdmi = false;
- }
-
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
@@ -1302,6 +1292,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
+ u8 ddc_pin;
+
ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
info->alternate_ddc_pin = ddc_pin;
@@ -1314,14 +1306,14 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
}
if (is_dp) {
- info->alternate_aux_channel = aux_channel;
+ info->alternate_aux_channel = child->aux_channel;
sanitize_aux_ch(dev_priv, port);
}
if (bdb_version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
- hdmi_level_shift = child->hdmi_level_shifter_value;
+ u8 hdmi_level_shift = child->hdmi_level_shifter_value;
DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
port_name(port),
hdmi_level_shift);
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 1f79e7a47433..18e643df523e 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -82,7 +82,7 @@ static unsigned long wait_timeout(void)
static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
{
- if (drm_debug & DRM_UT_DRIVER) {
+ if (GEM_SHOW_DEBUG()) {
struct drm_printer p = drm_debug_printer(__func__);
intel_engine_dump(engine, &p,
@@ -130,11 +130,12 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t)
static void intel_breadcrumbs_fake_irq(struct timer_list *t)
{
- struct intel_engine_cs *engine = from_timer(engine, t,
- breadcrumbs.fake_irq);
+ struct intel_engine_cs *engine =
+ from_timer(engine, t, breadcrumbs.fake_irq);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- /* The timer persists in case we cannot enable interrupts,
+ /*
+ * The timer persists in case we cannot enable interrupts,
* or if we have previously seen seqno/interrupt incoherency
* ("missed interrupt" syndrome, better known as a "missed breadcrumb").
* Here the worker will wake up every jiffie in order to kick the
@@ -148,6 +149,12 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
if (!b->irq_armed)
return;
+ /* If the user has disabled the fake-irq, restore the hangchecking */
+ if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) {
+ mod_timer(&b->hangcheck, wait_timeout());
+ return;
+ }
+
mod_timer(&b->fake_irq, jiffies + 1);
}
@@ -730,10 +737,11 @@ static void insert_signal(struct intel_breadcrumbs *b,
list_add(&request->signaling.link, &iter->signaling.link);
}
-void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
+bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
{
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_wait *wait = &request->signaling.wait;
u32 seqno;
/*
@@ -750,12 +758,12 @@ void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
seqno = i915_request_global_seqno(request);
if (!seqno) /* will be enabled later upon execution */
- return;
+ return true;
- GEM_BUG_ON(request->signaling.wait.seqno);
- request->signaling.wait.tsk = b->signaler;
- request->signaling.wait.request = request;
- request->signaling.wait.seqno = seqno;
+ GEM_BUG_ON(wait->seqno);
+ wait->tsk = b->signaler;
+ wait->request = request;
+ wait->seqno = seqno;
/*
* Add ourselves into the list of waiters, but registering our
@@ -768,11 +776,15 @@ void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
*/
spin_lock(&b->rb_lock);
insert_signal(b, request, seqno);
- wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
+ wakeup &= __intel_engine_add_wait(engine, wait);
spin_unlock(&b->rb_lock);
- if (wakeup)
+ if (wakeup) {
wake_up_process(b->signaler);
+ return !intel_wait_complete(wait);
+ }
+
+ return true;
}
void intel_engine_cancel_signaling(struct i915_request *request)
@@ -826,8 +838,8 @@ static void cancel_fake_irq(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */
del_timer_sync(&b->hangcheck);
- del_timer_sync(&b->fake_irq);
clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
}
@@ -835,15 +847,22 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- cancel_fake_irq(engine);
spin_lock_irq(&b->irq_lock);
+ /*
+ * Leave the fake_irq timer enabled (if it is running), but clear the
+ * bit so that it turns itself off on its next wake up and goes back
+ * to the long hangcheck interval if still required.
+ */
+ clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+
if (b->irq_enabled)
irq_enable(engine);
else
irq_disable(engine);
- /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
+ /*
+ * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
* GPU is active and may have already executed the MI_USER_INTERRUPT
* before the CPU is ready to receive. However, the engine is currently
* idle (we haven't started it yet), there is no possibility for a
@@ -852,9 +871,6 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
*/
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
- if (b->irq_armed)
- enable_fake_irq(b);
-
spin_unlock_irq(&b->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c0a8805b277f..072b326d5ee0 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
int max_dotclk = dev_priv->max_dotclk_freq;
int max_clock;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
@@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
return true;
}
@@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
pipe_config->has_pch_encoder = true;
return true;
@@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
pipe_config->has_pch_encoder = true;
@@ -748,6 +768,11 @@ intel_crt_detect(struct drm_connector *connector,
connector->base.id, connector->name,
force);
+ if (i915_modparams.load_detect_test) {
+ intel_display_power_get(dev_priv, intel_encoder->power_domain);
+ goto load_detect;
+ }
+
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
@@ -776,11 +801,12 @@ intel_crt_detect(struct drm_connector *connector,
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
status = connector_status_disconnected;
goto out;
}
+load_detect:
if (!force) {
status = connector->status;
goto out;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index f9550ea46c26..cf9b600cca79 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -298,7 +298,10 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
- if (IS_CANNONLAKE(dev_priv)) {
+ if (csr->fw_path == i915_modparams.dmc_firmware_path) {
+ /* Bypass version check for firmware override. */
+ required_version = csr->version;
+ } else if (IS_CANNONLAKE(dev_priv)) {
required_version = CNL_CSR_VERSION_REQUIRED;
} else if (IS_GEMINILAKE(dev_priv)) {
required_version = GLK_CSR_VERSION_REQUIRED;
@@ -453,7 +456,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (IS_CANNONLAKE(dev_priv))
+ if (i915_modparams.dmc_firmware_path)
+ csr->fw_path = i915_modparams.dmc_firmware_path;
+ else if (IS_CANNONLAKE(dev_priv))
csr->fw_path = I915_CSR_CNL;
else if (IS_GEMINILAKE(dev_priv))
csr->fw_path = I915_CSR_GLK;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8c2d778560f0..f4a8598a2d39 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -493,6 +493,125 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
{ 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
};
+struct icl_combo_phy_ddi_buf_trans {
+ u32 dw2_swing_select;
+ u32 dw2_swing_scalar;
+ u32 dw4_scaling;
+};
+
+/* Voltage Swing Programming for VccIO 0.85V for DP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = {
+ /* Voltage mV db */
+ { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
+ { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
+ { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
+ { 0x2, 0x98, 0x900F }, /* 400 9.5 */
+ { 0xB, 0x70, 0x0018 }, /* 600 0.0 */
+ { 0xB, 0x70, 0x3015 }, /* 600 3.5 */
+ { 0xB, 0x70, 0x6012 }, /* 600 6.0 */
+ { 0x5, 0x00, 0x0018 }, /* 800 0.0 */
+ { 0x5, 0x00, 0x3015 }, /* 800 3.5 */
+ { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
+};
+
+/* FIXME - After table is updated in Bspec */
+/* Voltage Swing Programming for VccIO 0.85V for eDP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
+ /* Voltage mV db */
+ { 0x0, 0x00, 0x00 }, /* 200 0.0 */
+ { 0x0, 0x00, 0x00 }, /* 200 1.5 */
+ { 0x0, 0x00, 0x00 }, /* 200 4.0 */
+ { 0x0, 0x00, 0x00 }, /* 200 6.0 */
+ { 0x0, 0x00, 0x00 }, /* 250 0.0 */
+ { 0x0, 0x00, 0x00 }, /* 250 1.5 */
+ { 0x0, 0x00, 0x00 }, /* 250 4.0 */
+ { 0x0, 0x00, 0x00 }, /* 300 0.0 */
+ { 0x0, 0x00, 0x00 }, /* 300 1.5 */
+ { 0x0, 0x00, 0x00 }, /* 350 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.95V for DP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
+ /* Voltage mV db */
+ { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
+ { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
+ { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
+ { 0x2, 0x98, 0x900F }, /* 400 9.5 */
+ { 0x4, 0x98, 0x0018 }, /* 600 0.0 */
+ { 0x4, 0x98, 0x3015 }, /* 600 3.5 */
+ { 0x4, 0x98, 0x6012 }, /* 600 6.0 */
+ { 0x5, 0x76, 0x0018 }, /* 800 0.0 */
+ { 0x5, 0x76, 0x3015 }, /* 800 3.5 */
+ { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
+};
+
+/* FIXME - After table is updated in Bspec */
+/* Voltage Swing Programming for VccIO 0.95V for eDP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = {
+ /* Voltage mV db */
+ { 0x0, 0x00, 0x00 }, /* 200 0.0 */
+ { 0x0, 0x00, 0x00 }, /* 200 1.5 */
+ { 0x0, 0x00, 0x00 }, /* 200 4.0 */
+ { 0x0, 0x00, 0x00 }, /* 200 6.0 */
+ { 0x0, 0x00, 0x00 }, /* 250 0.0 */
+ { 0x0, 0x00, 0x00 }, /* 250 1.5 */
+ { 0x0, 0x00, 0x00 }, /* 250 4.0 */
+ { 0x0, 0x00, 0x00 }, /* 300 0.0 */
+ { 0x0, 0x00, 0x00 }, /* 300 1.5 */
+ { 0x0, 0x00, 0x00 }, /* 350 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 1.05V for DP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = {
+ /* Voltage mV db */
+ { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
+ { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
+ { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
+ { 0x2, 0x98, 0x900F }, /* 400 9.5 */
+ { 0x4, 0x98, 0x0018 }, /* 600 0.0 */
+ { 0x4, 0x98, 0x3015 }, /* 600 3.5 */
+ { 0x4, 0x98, 0x6012 }, /* 600 6.0 */
+ { 0x5, 0x71, 0x0018 }, /* 800 0.0 */
+ { 0x5, 0x71, 0x3015 }, /* 800 3.5 */
+ { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
+};
+
+/* FIXME - After table is updated in Bspec */
+/* Voltage Swing Programming for VccIO 1.05V for eDP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = {
+ /* Voltage mV db */
+ { 0x0, 0x00, 0x00 }, /* 200 0.0 */
+ { 0x0, 0x00, 0x00 }, /* 200 1.5 */
+ { 0x0, 0x00, 0x00 }, /* 200 4.0 */
+ { 0x0, 0x00, 0x00 }, /* 200 6.0 */
+ { 0x0, 0x00, 0x00 }, /* 250 0.0 */
+ { 0x0, 0x00, 0x00 }, /* 250 1.5 */
+ { 0x0, 0x00, 0x00 }, /* 250 4.0 */
+ { 0x0, 0x00, 0x00 }, /* 300 0.0 */
+ { 0x0, 0x00, 0x00 }, /* 300 1.5 */
+ { 0x0, 0x00, 0x00 }, /* 350 0.0 */
+};
+
+struct icl_mg_phy_ddi_buf_trans {
+ u32 cri_txdeemph_override_5_0;
+ u32 cri_txdeemph_override_11_6;
+ u32 cri_txdeemph_override_17_12;
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
+ /* Voltage swing pre-emphasis */
+ { 0x0, 0x1B, 0x00 }, /* 0 0 */
+ { 0x0, 0x23, 0x08 }, /* 0 1 */
+ { 0x0, 0x2D, 0x12 }, /* 0 2 */
+ { 0x0, 0x00, 0x00 }, /* 0 3 */
+ { 0x0, 0x23, 0x00 }, /* 1 0 */
+ { 0x0, 0x2B, 0x09 }, /* 1 1 */
+ { 0x0, 0x2E, 0x11 }, /* 1 2 */
+ { 0x0, 0x2F, 0x00 }, /* 2 0 */
+ { 0x0, 0x33, 0x0C }, /* 2 1 */
+ { 0x0, 0x00, 0x00 }, /* 3 0 */
+};
+
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -751,6 +870,45 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
+static const struct icl_combo_phy_ddi_buf_trans *
+icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
+ int type, int *n_entries)
+{
+ u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK;
+
+ if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
+ switch (voltage) {
+ case VOLTAGE_INFO_0_85V:
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V);
+ return icl_combo_phy_ddi_translations_edp_0_85V;
+ case VOLTAGE_INFO_0_95V:
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V);
+ return icl_combo_phy_ddi_translations_edp_0_95V;
+ case VOLTAGE_INFO_1_05V:
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
+ return icl_combo_phy_ddi_translations_edp_1_05V;
+ default:
+ MISSING_CASE(voltage);
+ return NULL;
+ }
+ } else {
+ switch (voltage) {
+ case VOLTAGE_INFO_0_85V:
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
+ return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
+ case VOLTAGE_INFO_0_95V:
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
+ return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
+ case VOLTAGE_INFO_1_05V:
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
+ return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
+ default:
+ MISSING_CASE(voltage);
+ return NULL;
+ }
+ }
+}
+
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
{
int n_entries, level, default_entry;
@@ -875,7 +1033,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
{
- switch (pll->id) {
+ switch (pll->info->id) {
case DPLL_ID_WRPLL1:
return PORT_CLK_SEL_WRPLL1;
case DPLL_ID_WRPLL2:
@@ -889,11 +1047,30 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
case DPLL_ID_LCPLL_2700:
return PORT_CLK_SEL_LCPLL_2700;
default:
- MISSING_CASE(pll->id);
+ MISSING_CASE(pll->info->id);
return PORT_CLK_SEL_NONE;
}
}
+static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
+ const struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+
+ switch (id) {
+ default:
+ MISSING_CASE(id);
+ case DPLL_ID_ICL_DPLL0:
+ case DPLL_ID_ICL_DPLL1:
+ return DDI_CLK_SEL_NONE;
+ case DPLL_ID_ICL_MGPLL1:
+ case DPLL_ID_ICL_MGPLL2:
+ case DPLL_ID_ICL_MGPLL3:
+ case DPLL_ID_ICL_MGPLL4:
+ return DDI_CLK_SEL_MG;
+ }
+}
+
/* Starting with Haswell, different DDI ports can work in FDI mode for
* connection to the PCH-located connectors. For this, it is necessary to train
* both the DDI port and PCH receiver for the desired DDI buffer settings.
@@ -1906,7 +2083,13 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
enum port port = encoder->port;
int n_entries;
- if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ if (port == PORT_A || port == PORT_B)
+ icl_get_combo_buf_trans(dev_priv, port, encoder->type,
+ &n_entries);
+ else
+ n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ } else if (IS_CANNONLAKE(dev_priv)) {
if (encoder->type == INTEL_OUTPUT_EDP)
cnl_get_buf_trans_edp(dev_priv, &n_entries);
else
@@ -2063,6 +2246,146 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
}
+static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
+ u32 level, enum port port, int type)
+{
+ const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL;
+ u32 n_entries, val;
+ int ln;
+
+ ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
+ &n_entries);
+ if (!ddi_translations)
+ return;
+
+ if (level >= n_entries) {
+ DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
+ level = n_entries - 1;
+ }
+
+ /* Set PORT_TX_DW5 Rterm Sel to 110b. */
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val &= ~RTERM_SELECT_MASK;
+ val |= RTERM_SELECT(0x6);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+
+ /* Program PORT_TX_DW5 */
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ /* Set DisableTap2 and DisableTap3 if MIPI DSI
+ * Clear DisableTap2 and DisableTap3 for all other Ports
+ */
+ if (type == INTEL_OUTPUT_DSI) {
+ val |= TAP2_DISABLE;
+ val |= TAP3_DISABLE;
+ } else {
+ val &= ~TAP2_DISABLE;
+ val &= ~TAP3_DISABLE;
+ }
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+
+ /* Program PORT_TX_DW2 */
+ val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK);
+ val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select);
+ val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select);
+ /* Program Rcomp scalar for every table entry */
+ val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
+
+ /* Program PORT_TX_DW4 */
+ /* We cannot write to GRP. It would overwrite individual loadgen. */
+ for (ln = 0; ln <= 3; ln++) {
+ val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+ val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK);
+ val |= ddi_translations[level].dw4_scaling;
+ I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+ }
+}
+
+static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
+ u32 level,
+ enum intel_output_type type)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ int width = 0;
+ int rate = 0;
+ u32 val;
+ int ln = 0;
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ width = 4;
+ /* Rate is always < than 6GHz for HDMI */
+ } else {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ width = intel_dp->lane_count;
+ rate = intel_dp->link_rate;
+ }
+
+ /*
+ * 1. If port type is eDP or DP,
+ * set PORT_PCS_DW1 cmnkeeper_enable to 1b,
+ * else clear to 0b.
+ */
+ val = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+ if (type == INTEL_OUTPUT_HDMI)
+ val &= ~COMMON_KEEPER_EN;
+ else
+ val |= COMMON_KEEPER_EN;
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), val);
+
+ /* 2. Program loadgen select */
+ /*
+ * Program PORT_TX_DW4_LN depending on Bit rate and used lanes
+ * <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1)
+ * <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0)
+ * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
+ */
+ for (ln = 0; ln <= 3; ln++) {
+ val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+ val &= ~LOADGEN_SELECT;
+
+ if ((rate <= 600000 && width == 4 && ln >= 1) ||
+ (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
+ val |= LOADGEN_SELECT;
+ }
+ I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+ }
+
+ /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
+ val = I915_READ(ICL_PORT_CL_DW5(port));
+ val |= SUS_CLOCK_CONFIG;
+ I915_WRITE(ICL_PORT_CL_DW5(port), val);
+
+ /* 4. Clear training enable to change swing values */
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val &= ~TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+
+ /* 5. Program swing and de-emphasis */
+ icl_ddi_combo_vswing_program(dev_priv, level, port, type);
+
+ /* 6. Set training enable to trigger update */
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val |= TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+}
+
+static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level,
+ enum intel_output_type type)
+{
+ enum port port = encoder->port;
+
+ if (port == PORT_A || port == PORT_B)
+ icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
+ else
+ /* Not Implemented Yet */
+ WARN_ON(1);
+}
+
static uint32_t translate_signal_level(int signal_levels)
{
int i;
@@ -2094,7 +2417,9 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dport->base;
int level = intel_ddi_dp_level(intel_dp);
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icl_ddi_vswing_sequence(encoder, level, encoder->type);
+ else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level, encoder->type);
else
bxt_ddi_vswing_sequence(encoder, level, encoder->type);
@@ -2115,6 +2440,71 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
return DDI_BUF_TRANS_SELECT(level);
}
+void icl_map_plls_to_ports(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct drm_atomic_state *old_state)
+{
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ enum port port;
+ uint32_t val;
+
+ if (conn_state->crtc != crtc)
+ continue;
+
+ port = encoder->port;
+ mutex_lock(&dev_priv->dpll_lock);
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ WARN_ON((val & DPCLKA_CFGCR0_DDI_CLK_OFF(port)) == 0);
+
+ if (port == PORT_A || port == PORT_B) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
+ }
+
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+ mutex_unlock(&dev_priv->dpll_lock);
+ }
+}
+
+void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+ enum port port;
+
+ if (old_conn_state->crtc != crtc)
+ continue;
+
+ port = encoder->port;
+ mutex_lock(&dev_priv->dpll_lock);
+ I915_WRITE(DPCLKA_CFGCR0_ICL,
+ I915_READ(DPCLKA_CFGCR0_ICL) |
+ DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+ mutex_unlock(&dev_priv->dpll_lock);
+ }
+}
+
static void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_shared_dpll *pll)
{
@@ -2127,11 +2517,15 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
mutex_lock(&dev_priv->dpll_lock);
- if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ if (port >= PORT_C)
+ I915_WRITE(DDI_CLK_SEL(port),
+ icl_pll_to_ddi_pll_sel(encoder, pll));
+ } else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
I915_WRITE(DPCLKA_CFGCR0, val);
/*
@@ -2148,7 +2542,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
- val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
+ val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
I915_WRITE(DPLL_CTRL2, val);
@@ -2165,14 +2559,18 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv)) {
+ if (port >= PORT_C)
+ I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ } else if (IS_CANNONLAKE(dev_priv)) {
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
DPCLKA_CFGCR0_DDI_CLK_OFF(port));
- else if (IS_GEN9_BC(dev_priv))
+ } else if (IS_GEN9_BC(dev_priv)) {
I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port));
- else if (INTEL_GEN(dev_priv) < 9)
+ } else if (INTEL_GEN(dev_priv) < 9) {
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ }
}
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
@@ -2197,7 +2595,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icl_ddi_vswing_sequence(encoder, level, encoder->type);
+ else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level, encoder->type);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(encoder, level, encoder->type);
@@ -2205,7 +2605,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
intel_ddi_init_dp_buf_reg(encoder);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ if (!is_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
@@ -2227,7 +2628,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
+ else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
@@ -2303,12 +2706,15 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_dp *intel_dp = &dig_port->dp;
+ bool is_mst = intel_crtc_has_type(old_crtc_state,
+ INTEL_OUTPUT_DP_MST);
/*
* Power down sink before disabling the port, otherwise we end
* up getting interrupts from the sink on detecting link loss.
*/
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ if (!is_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_disable_ddi_buf(encoder);
@@ -2424,12 +2830,14 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
- intel_hdmi_handle_sink_scrambling(encoder,
- conn_state->connector,
- crtc_state->hdmi_high_tmds_clock_ratio,
- crtc_state->hdmi_scrambling);
+ if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
+ crtc_state->hdmi_high_tmds_clock_ratio,
+ crtc_state->hdmi_scrambling))
+ DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
@@ -2520,13 +2928,16 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_connector *connector = old_conn_state->connector;
+
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
- intel_hdmi_handle_sink_scrambling(encoder,
- old_conn_state->connector,
- false, false);
+ if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
+ false, false))
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
}
static void intel_disable_ddi(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 3dd350f7b8e6..0fd13df424cf 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -83,11 +83,11 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
int s;
- drm_printf(p, "slice mask: %04x\n", sseu->slice_mask);
- drm_printf(p, "slice total: %u\n", hweight8(sseu->slice_mask));
+ drm_printf(p, "slice total: %u, mask=%04x\n",
+ hweight8(sseu->slice_mask), sseu->slice_mask);
drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
- for (s = 0; s < ARRAY_SIZE(sseu->subslice_mask); s++) {
- drm_printf(p, "slice%d %u subslices mask=%04x\n",
+ for (s = 0; s < sseu->max_slices; s++) {
+ drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
s, hweight8(sseu->subslice_mask[s]),
sseu->subslice_mask[s]);
}
@@ -158,6 +158,45 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
return total;
}
+static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ u8 s_en;
+ u32 ss_en, ss_en_mask;
+ u8 eu_en;
+ int s;
+
+ sseu->max_slices = 1;
+ sseu->max_subslices = 8;
+ sseu->max_eus_per_subslice = 8;
+
+ s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
+ ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
+ ss_en_mask = BIT(sseu->max_subslices) - 1;
+ eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ if (s_en & BIT(s)) {
+ int ss_idx = sseu->max_subslices * s;
+ int ss;
+
+ sseu->slice_mask |= BIT(s);
+ sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ if (sseu->subslice_mask[s] & BIT(ss))
+ sseu_set_eus(sseu, s, ss, eu_en);
+ }
+ }
+ }
+ sseu->eu_per_subslice = hweight8(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /* ICL has no power gating restrictions. */
+ sseu->has_slice_pg = 1;
+ sseu->has_subslice_pg = 1;
+ sseu->has_eu_pg = 1;
+}
+
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
@@ -557,6 +596,52 @@ static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
return base_freq + frac_freq;
}
+static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
+ u32 rpm_config_reg)
+{
+ u32 f19_2_mhz = 19200;
+ u32 f24_mhz = 24000;
+ u32 crystal_clock = (rpm_config_reg &
+ GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+ switch (crystal_clock) {
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
+static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
+ u32 rpm_config_reg)
+{
+ u32 f19_2_mhz = 19200;
+ u32 f24_mhz = 24000;
+ u32 f25_mhz = 25000;
+ u32 f38_4_mhz = 38400;
+ u32 crystal_clock = (rpm_config_reg &
+ GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+ switch (crystal_clock) {
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
+ return f38_4_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
+ return f25_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
{
u32 f12_5_mhz = 12500;
@@ -597,10 +682,9 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
}
return freq;
- } else if (INTEL_GEN(dev_priv) <= 10) {
+ } else if (INTEL_GEN(dev_priv) <= 11) {
u32 ctc_reg = I915_READ(CTC_MODE);
u32 freq = 0;
- u32 rpm_config_reg = 0;
/* First figure out the reference frequency. There are 2 ways
* we can compute the frequency, either through the
@@ -610,20 +694,14 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(dev_priv);
} else {
- u32 crystal_clock;
-
- rpm_config_reg = I915_READ(RPM_CONFIG0);
- crystal_clock = (rpm_config_reg &
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
- switch (crystal_clock) {
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- freq = f19_2_mhz;
- break;
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- freq = f24_mhz;
- break;
- }
+ u32 rpm_config_reg = I915_READ(RPM_CONFIG0);
+
+ if (INTEL_GEN(dev_priv) <= 10)
+ freq = gen10_get_crystal_clock_freq(dev_priv,
+ rpm_config_reg);
+ else
+ freq = gen11_get_crystal_clock_freq(dev_priv,
+ rpm_config_reg);
/* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
@@ -768,8 +846,10 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
broadwell_sseu_info_init(dev_priv);
else if (INTEL_GEN(dev_priv) == 9)
gen9_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 10)
+ else if (INTEL_GEN(dev_priv) == 10)
gen10_sseu_info_init(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 11)
+ gen11_sseu_info_init(dev_priv);
/* Initialize command stream timestamp frequency */
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
@@ -780,3 +860,50 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
{
drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
+
+/*
+ * Determine which engines are fused off in our particular hardware. Since the
+ * fuse register is in the blitter powerwell, we need forcewake to be ready at
+ * this point (but later we need to prune the forcewake domains for engines that
+ * are indeed fused off).
+ */
+void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ u8 vdbox_disable, vebox_disable;
+ u32 media_fuse;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return;
+
+ media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
+
+ vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
+
+ DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable);
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (!HAS_ENGINE(dev_priv, _VCS(i)))
+ continue;
+
+ if (!(BIT(i) & vdbox_disable))
+ continue;
+
+ info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+ DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ }
+
+ DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable);
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ if (!HAS_ENGINE(dev_priv, _VECS(i)))
+ continue;
+
+ if (!(BIT(i) & vebox_disable))
+ continue;
+
+ info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+ DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 0835752c8b22..933e31669557 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -114,7 +114,7 @@ enum intel_platform {
func(has_ipc);
#define GEN_MAX_SLICES (6) /* CNL upper bound */
-#define GEN_MAX_SUBSLICES (7)
+#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
struct sseu_dev_info {
u8 slice_mask;
@@ -247,6 +247,8 @@ void intel_device_info_dump_runtime(const struct intel_device_info *info,
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p);
+void intel_device_info_init_mmio(struct drm_i915_private *dev_priv);
+
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56004ffbd8bb..2cc6faa1daa8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -88,6 +88,22 @@ static const uint32_t skl_primary_formats[] = {
DRM_FORMAT_VYUY,
};
+static const uint32_t skl_pri_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+};
+
static const uint64_t skl_format_modifiers_noccs[] = {
I915_FORMAT_MOD_Yf_TILED,
I915_FORMAT_MOD_Y_TILED,
@@ -488,6 +504,33 @@ static const struct intel_limit intel_limits_bxt = {
.p2 = { .p2_slow = 1, .p2_fast = 20 },
};
+static void
+skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
+{
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return;
+
+ if (enable)
+ I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
+ else
+ I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
+}
+
+static void
+skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
+{
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return;
+
+ if (enable)
+ I915_WRITE(CLKGATE_DIS_PSL(pipe),
+ DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+ else
+ I915_WRITE(CLKGATE_DIS_PSL(pipe),
+ I915_READ(CLKGATE_DIS_PSL(pipe)) &
+ ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+}
+
static bool
needs_modeset(const struct drm_crtc_state *state)
{
@@ -2657,11 +2700,13 @@ static int i9xx_format_to_fourcc(int format)
}
}
-static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
{
switch (format) {
case PLANE_CTL_FORMAT_RGB_565:
return DRM_FORMAT_RGB565;
+ case PLANE_CTL_FORMAT_NV12:
+ return DRM_FORMAT_NV12;
default:
case PLANE_CTL_FORMAT_XRGB_8888:
if (rgb_order) {
@@ -2824,7 +2869,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
continue;
if (intel_plane_ggtt_offset(state) == plane_config->base) {
- fb = c->primary->fb;
+ fb = state->base.fb;
drm_framebuffer_get(fb);
goto valid_fb;
}
@@ -2858,6 +2903,9 @@ valid_fb:
return;
}
+ obj = intel_fb_obj(fb);
+ intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+
plane_state->src_x = 0;
plane_state->src_y = 0;
plane_state->src_w = fb->width << 16;
@@ -2871,7 +2919,6 @@ valid_fb:
intel_state->base.src = drm_plane_state_src(plane_state);
intel_state->base.dst = drm_plane_state_dest(plane_state);
- obj = intel_fb_obj(fb);
if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true;
@@ -3071,6 +3118,29 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
return 0;
}
+static int
+skl_check_nv12_surface(const struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ /* Display WA #1106 */
+ if (plane_state->base.rotation !=
+ (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
+ plane_state->base.rotation != DRM_MODE_ROTATE_270)
+ return 0;
+
+ /*
+ * src coordinates are rotated here.
+ * We check height but report it as width
+ */
+ if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
+ DRM_DEBUG_KMS("src width must be multiple "
+ "of 4 for rotated NV12\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -3154,6 +3224,9 @@ int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
* the main surface setup depends on it.
*/
if (fb->format->format == DRM_FORMAT_NV12) {
+ ret = skl_check_nv12_surface(crtc_state, plane_state);
+ if (ret)
+ return ret;
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3464,6 +3537,8 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
case DRM_FORMAT_VYUY:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+ case DRM_FORMAT_NV12:
+ return PLANE_CTL_FORMAT_NV12;
default:
MISSING_CASE(pixel_format);
}
@@ -3602,11 +3677,15 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
u32 plane_color_ctl = 0;
- plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
- plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+ if (INTEL_GEN(dev_priv) < 11) {
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+ }
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
@@ -3675,7 +3754,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
struct drm_atomic_state *state;
int ret;
-
/* reset doesn't touch the display */
if (!i915_modparams.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
@@ -3729,19 +3807,17 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
- struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+ struct drm_atomic_state *state;
int ret;
/* reset doesn't touch the display */
- if (!i915_modparams.force_reset_modeset_test &&
- !gpu_reset_clobbers_display(dev_priv))
+ if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
return;
+ state = fetch_and_zero(&dev_priv->modeset_restore_state);
if (!state)
goto unlock;
- dev_priv->modeset_restore_state = NULL;
-
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(dev_priv)) {
/* for testing only restore the display */
@@ -4703,7 +4779,9 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
- int src_w, int src_h, int dst_w, int dst_h)
+ int src_w, int src_h, int dst_w, int dst_h,
+ bool plane_scaler_check,
+ uint32_t pixel_format)
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
@@ -4721,6 +4799,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
need_scaling = src_w != dst_w || src_h != dst_h;
+ if (plane_scaler_check)
+ if (pixel_format == DRM_FORMAT_NV12)
+ need_scaling = true;
+
if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
need_scaling = true;
@@ -4760,12 +4842,21 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
+ if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
+ (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
+ DRM_DEBUG_KMS("NV12: src dimensions not met\n");
+ return -EINVAL;
+ }
+
/* range checks */
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
- dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
-
- src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
- dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
+ dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+ (IS_GEN11(dev_priv) &&
+ (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
+ dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
+ (!IS_GEN11(dev_priv) &&
+ (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+ dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
@@ -4796,9 +4887,10 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id,
- state->pipe_src_w, state->pipe_src_h,
- adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
+ &state->scaler_state.scaler_id,
+ state->pipe_src_w, state->pipe_src_h,
+ adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_vdisplay, false, 0);
}
/**
@@ -4827,7 +4919,8 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
drm_rect_width(&plane_state->base.src) >> 16,
drm_rect_height(&plane_state->base.src) >> 16,
drm_rect_width(&plane_state->base.dst),
- drm_rect_height(&plane_state->base.dst));
+ drm_rect_height(&plane_state->base.dst),
+ fb ? true : false, fb ? fb->format->format : 0);
if (ret || plane_state->scaler_id < 0)
return ret;
@@ -4853,6 +4946,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
break;
default:
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
@@ -5096,16 +5190,34 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
return !old_crtc_state->ips_enabled;
}
+static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->nv12_planes)
+ return false;
+
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+
+ if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
+ IS_CANNONLAKE(dev_priv))
+ return true;
+
+ return false;
+}
+
static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
struct intel_crtc_state *pipe_config =
intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
crtc);
struct drm_plane *primary = crtc->base.primary;
- struct drm_plane_state *old_pri_state =
- drm_atomic_get_existing_plane_state(old_state, primary);
+ struct drm_plane_state *old_primary_state =
+ drm_atomic_get_old_plane_state(old_state, primary);
intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
@@ -5115,20 +5227,24 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
hsw_enable_ips(pipe_config);
- if (old_pri_state) {
- struct intel_plane_state *primary_state =
- intel_atomic_get_new_plane_state(to_intel_atomic_state(old_state),
- to_intel_plane(primary));
- struct intel_plane_state *old_primary_state =
- to_intel_plane_state(old_pri_state);
+ if (old_primary_state) {
+ struct drm_plane_state *new_primary_state =
+ drm_atomic_get_new_plane_state(old_state, primary);
intel_fbc_post_update(crtc);
- if (primary_state->base.visible &&
+ if (new_primary_state->visible &&
(needs_modeset(&pipe_config->base) ||
- !old_primary_state->base.visible))
+ !old_primary_state->visible))
intel_post_enable_primary(&crtc->base, pipe_config);
}
+
+ /* Display WA 827 */
+ if (needs_nv12_wa(dev_priv, old_crtc_state) &&
+ !needs_nv12_wa(dev_priv, pipe_config)) {
+ skl_wa_clkgate(dev_priv, crtc->pipe, false);
+ skl_wa_528(dev_priv, crtc->pipe, false);
+ }
}
static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
@@ -5139,8 +5255,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
struct drm_plane *primary = crtc->base.primary;
- struct drm_plane_state *old_pri_state =
- drm_atomic_get_existing_plane_state(old_state, primary);
+ struct drm_plane_state *old_primary_state =
+ drm_atomic_get_old_plane_state(old_state, primary);
bool modeset = needs_modeset(&pipe_config->base);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
@@ -5148,23 +5264,28 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
hsw_disable_ips(old_crtc_state);
- if (old_pri_state) {
- struct intel_plane_state *primary_state =
+ if (old_primary_state) {
+ struct intel_plane_state *new_primary_state =
intel_atomic_get_new_plane_state(old_intel_state,
to_intel_plane(primary));
- struct intel_plane_state *old_primary_state =
- to_intel_plane_state(old_pri_state);
- intel_fbc_pre_update(crtc, pipe_config, primary_state);
+ intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
- if (IS_GEN2(dev_priv) && old_primary_state->base.visible &&
- (modeset || !primary_state->base.visible))
+ if (IS_GEN2(dev_priv) && old_primary_state->visible &&
+ (modeset || !new_primary_state->base.visible))
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
}
+ /* Display WA 827 */
+ if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
+ needs_nv12_wa(dev_priv, pipe_config)) {
+ skl_wa_clkgate(dev_priv, crtc->pipe, true);
+ skl_wa_528(dev_priv, crtc->pipe, true);
+ }
+
/*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
@@ -5499,6 +5620,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (intel_crtc->config->shared_dpll)
intel_enable_shared_dpll(intel_crtc);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_map_plls_to_ports(crtc, pipe_config, old_state);
+
if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -5696,6 +5820,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_ddi_disable_pipe_clock(intel_crtc->config);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -8766,8 +8893,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
intel_get_shared_dpll_by_id(dev_priv, pll_id);
pll = pipe_config->shared_dpll;
- WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state));
+ WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
tmp = pipe_config->dpll_hw_state.dpll;
pipe_config->pixel_multiplier =
@@ -9243,8 +9370,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
pll = pipe_config->shared_dpll;
if (pll) {
- WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state));
+ WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
}
/*
@@ -9974,6 +10101,8 @@ found:
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
if (!ret)
ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
+ if (!ret)
+ ret = drm_atomic_add_affected_planes(restore_state, crtc);
if (ret) {
DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
goto fail;
@@ -10773,7 +10902,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
- connector_state = drm_atomic_get_existing_connector_state(state, connector);
+ connector_state = drm_atomic_get_new_connector_state(state, connector);
if (!connector_state)
connector_state = connector->state;
@@ -11085,39 +11214,42 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
-#define PIPE_CONF_CHECK_X(name) \
+#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
"(expected 0x%08x, found 0x%08x)\n", \
current_config->name, \
pipe_config->name); \
ret = false; \
- }
+ } \
+} while (0)
-#define PIPE_CONF_CHECK_I(name) \
+#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
ret = false; \
- }
+ } \
+} while (0)
-#define PIPE_CONF_CHECK_BOOL(name) \
+#define PIPE_CONF_CHECK_BOOL(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
"(expected %s, found %s)\n", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
- }
+ } \
+} while (0)
/*
* Checks state where we only read out the enabling, but not the entire
* state itself (like full infoframes or ELD for audio). These states
* require a full modeset on bootup to fix up.
*/
-#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) \
+#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
PIPE_CONF_CHECK_BOOL(name); \
} else { \
@@ -11126,18 +11258,20 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
- }
+ } \
+} while (0)
-#define PIPE_CONF_CHECK_P(name) \
+#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
"(expected %p, found %p)\n", \
current_config->name, \
pipe_config->name); \
ret = false; \
- }
+ } \
+} while (0)
-#define PIPE_CONF_CHECK_M_N(name) \
+#define PIPE_CONF_CHECK_M_N(name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
adjust)) { \
@@ -11155,14 +11289,15 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
pipe_config->name.link_m, \
pipe_config->name.link_n); \
ret = false; \
- }
+ } \
+} while (0)
/* This is required for BDW+ where there is only one set of registers for
* switching between high and low RR.
* This macro can be used whenever a comparison has to be made between one
* hw state and multiple sw state variables.
*/
-#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name, adjust) && \
!intel_compare_link_m_n(&current_config->alt_name, \
@@ -11187,9 +11322,10 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
pipe_config->name.link_m, \
pipe_config->name.link_n); \
ret = false; \
- }
+ } \
+} while (0)
-#define PIPE_CONF_CHECK_FLAGS(name, mask) \
+#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
pipe_config_err(adjust, __stringify(name), \
"(%x) (expected %i, found %i)\n", \
@@ -11197,16 +11333,18 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
current_config->name & (mask), \
pipe_config->name & (mask)); \
ret = false; \
- }
+ } \
+} while (0)
-#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
+#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
pipe_config_err(adjust, __stringify(name), \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
ret = false; \
- }
+ } \
+} while (0)
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
@@ -11315,6 +11453,16 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
@@ -11378,6 +11526,11 @@ static void verify_wm_state(struct drm_crtc *crtc,
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
+ if (INTEL_GEN(dev_priv) >= 11)
+ if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
+ DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
+ sw_ddb->enabled_slices,
+ hw_ddb.enabled_slices);
/* planes */
for_each_universal_plane(dev_priv, pipe, plane) {
hw_plane_wm = &hw_wm.planes[plane];
@@ -11643,11 +11796,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- DRM_DEBUG_KMS("%s\n", pll->name);
+ DRM_DEBUG_KMS("%s\n", pll->info->name);
- active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
+ active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
- if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
+ if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
I915_STATE_WARN(!pll->on && pll->active_mask,
"pll in active use but not on in sw tracking\n");
I915_STATE_WARN(pll->on && !pll->active_mask,
@@ -12136,20 +12289,23 @@ static void intel_update_crtc(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
bool modeset = needs_modeset(new_crtc_state);
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
+ to_intel_plane(crtc->primary));
if (modeset) {
update_scanline_offset(intel_crtc);
dev_priv->display.crtc_enable(pipe_config, state);
+
+ /* vblanks work again, re-enable pipe CRC. */
+ intel_crtc_enable_pipe_crc(intel_crtc);
} else {
intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
pipe_config);
}
- if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
- intel_fbc_enable(
- intel_crtc, pipe_config,
- to_intel_plane_state(crtc->primary->state));
- }
+ if (new_plane_state)
+ intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
}
@@ -12181,6 +12337,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
bool progress;
enum pipe pipe;
int i;
+ u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+ u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
@@ -12189,6 +12347,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
if (new_crtc_state->active)
entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+ /* If 2nd DBuf slice required, enable it here */
+ if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
+ icl_dbuf_slices_update(dev_priv, required_slices);
+
/*
* Whenever the number of active pipes changes, we need to make sure we
* update the pipes in the right order so that their ddb allocations
@@ -12239,6 +12401,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
progress = true;
}
} while (progress);
+
+ /* If 2nd DBuf slice is no more required disable it */
+ if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
+ icl_dbuf_slices_update(dev_priv, required_slices);
}
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
@@ -12320,6 +12486,13 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (old_crtc_state->active) {
intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
+
+ /*
+ * We need to disable pipe CRC before disabling the pipe,
+ * or we race against vblank off.
+ */
+ intel_crtc_disable_pipe_crc(intel_crtc);
+
dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
intel_crtc->active = false;
intel_fbc_disable(intel_crtc);
@@ -12695,6 +12868,15 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
intel_unpin_fb_vma(vma, old_plane_state->flags);
}
+static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_PRIORITY_DISPLAY,
+ };
+
+ i915_gem_object_wait_priority(obj, 0, &attr);
+}
+
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
@@ -12723,8 +12905,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (old_obj) {
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(new_state->state,
- plane->state->crtc);
+ drm_atomic_get_new_crtc_state(new_state->state,
+ plane->state->crtc);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -12771,13 +12953,15 @@ intel_prepare_plane_fb(struct drm_plane *plane,
ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
- i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
+ fb_obj_bump_render_priority(obj);
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
if (ret)
return ret;
+ intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+
if (!new_state->fence) { /* implicit fencing */
struct dma_fence *fence;
@@ -12822,11 +13006,13 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
}
int
-skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
+skl_max_scale(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
+ uint32_t pixel_format)
{
struct drm_i915_private *dev_priv;
- int max_scale;
- int crtc_clock, max_dotclk;
+ int max_scale, mult;
+ int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
if (!intel_crtc || !crtc_state->base.enable)
return DRM_PLANE_HELPER_NO_SCALING;
@@ -12848,8 +13034,10 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
* or
* cdclk/crtc_clock
*/
- max_scale = min((1 << 16) * 3 - 1,
- (1 << 8) * ((max_dotclk << 8) / crtc_clock));
+ mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
+ tmpclk1 = (1 << 16) * mult - 1;
+ tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
+ max_scale = min(tmpclk1, tmpclk2);
return max_scale;
}
@@ -12865,12 +13053,16 @@ intel_check_primary_plane(struct intel_plane *plane,
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
bool can_position = false;
int ret;
+ uint32_t pixel_format = 0;
if (INTEL_GEN(dev_priv) >= 9) {
/* use scaler when colorkey is not required */
if (!state->ckey.flags) {
min_scale = 1;
- max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+ if (state->base.fb)
+ pixel_format = state->base.fb->format->format;
+ max_scale = skl_max_scale(to_intel_crtc(crtc),
+ crtc_state, pixel_format);
}
can_position = true;
}
@@ -12943,10 +13135,25 @@ out:
intel_cstate);
}
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!IS_GEN2(dev_priv))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
+ if (crtc_state->has_pch_encoder) {
+ enum pipe pch_transcoder =
+ intel_crtc_pch_transcoder(crtc);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ }
+}
+
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_crtc_state->state);
@@ -12957,17 +13164,8 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
if (new_crtc_state->update_pipe &&
!needs_modeset(&new_crtc_state->base) &&
- old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED) {
- if (!IS_GEN2(dev_priv))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, intel_crtc->pipe, true);
-
- if (new_crtc_state->has_pch_encoder) {
- enum pipe pch_transcoder =
- intel_crtc_pch_transcoder(intel_crtc);
-
- intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
- }
- }
+ old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
}
/**
@@ -13031,6 +13229,7 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
if (modifier == I915_FORMAT_MOD_Yf_TILED)
return true;
/* fall through */
@@ -13165,8 +13364,9 @@ intel_legacy_cursor_update(struct drm_plane *plane,
if (ret)
goto out_unlock;
- old_fb = old_plane_state->fb;
+ intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
+ old_fb = old_plane_state->fb;
i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
intel_plane->frontbuffer_bit);
@@ -13237,6 +13437,30 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
}
+bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (plane_id == PLANE_PRIMARY) {
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+ else if ((INTEL_GEN(dev_priv) == 9 && pipe == PIPE_C) &&
+ !IS_GEMINILAKE(dev_priv))
+ return false;
+ } else if (plane_id >= PLANE_SPRITE0) {
+ if (plane_id == PLANE_CURSOR)
+ return false;
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) == 10) {
+ if (plane_id != PLANE_SPRITE0)
+ return false;
+ } else {
+ if (plane_id != PLANE_SPRITE0 || pipe == PIPE_C ||
+ IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+ }
+ }
+ return true;
+}
+
static struct intel_plane *
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -13297,8 +13521,13 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->check_plane = intel_check_primary_plane;
if (INTEL_GEN(dev_priv) >= 9) {
- intel_primary_formats = skl_primary_formats;
- num_formats = ARRAY_SIZE(skl_primary_formats);
+ if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
+ intel_primary_formats = skl_pri_planar_formats;
+ num_formats = ARRAY_SIZE(skl_pri_planar_formats);
+ } else {
+ intel_primary_formats = skl_primary_formats;
+ num_formats = ARRAY_SIZE(skl_primary_formats);
+ }
if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
modifiers = skl_format_modifiers_ccs;
@@ -13553,10 +13782,17 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
/* initialize shared scalers */
intel_crtc_init_scalers(intel_crtc, crtc_state);
- BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
- dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] != NULL);
- dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] = intel_crtc;
- dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
+ dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
+ dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
+
+ if (INTEL_GEN(dev_priv) < 9) {
+ enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
+
+ BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
+ }
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -14112,6 +14348,20 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
break;
+ case DRM_FORMAT_NV12:
+ if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED_CCS ||
+ mode_cmd->modifier[0] == I915_FORMAT_MOD_Yf_TILED_CCS) {
+ DRM_DEBUG_KMS("RC not to be enabled with NV12\n");
+ goto err;
+ }
+ if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
+ IS_BROXTON(dev_priv)) {
+ DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name));
+ goto err;
+ }
+ break;
default:
DRM_DEBUG_KMS("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format, &format_name));
@@ -14124,6 +14374,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
+ if (fb->format->format == DRM_FORMAT_NV12 &&
+ (fb->width < SKL_MIN_YUV_420_SRC_W ||
+ fb->height < SKL_MIN_YUV_420_SRC_H ||
+ (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
+ DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < fb->format->num_planes; i++) {
u32 stride_alignment;
@@ -14211,12 +14469,22 @@ static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
+ /*
+ * Can't reject DBLSCAN here because Xorg ddxen can add piles
+ * of DBLSCAN modes to the output's mode list when they detect
+ * the scaling mode property on the connector. And they don't
+ * ask the kernel to validate those modes in any way until
+ * modeset time at which point the client gets a protocol error.
+ * So in order to not upset those clients we silently ignore the
+ * DBLSCAN flag on such connectors. For other connectors we will
+ * reject modes with the DBLSCAN flag in encoder->compute_config().
+ * And we always reject DBLSCAN modes in connector->mode_valid()
+ * as we never want such modes on the connector's mode list.
+ */
+
if (mode->vscan > 1)
return MODE_NO_VSCAN;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->flags & DRM_MODE_FLAG_HSKEW)
return MODE_H_ILLEGAL;
@@ -15101,8 +15369,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- pll->on = pll->funcs.get_hw_state(dev_priv, pll,
- &pll->state.hw_state);
+ pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
+ &pll->state.hw_state);
pll->state.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -15115,7 +15383,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pll->active_mask = pll->state.crtc_mask;
DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
- pll->name, pll->state.crtc_mask, pll->on);
+ pll->info->name, pll->state.crtc_mask, pll->on);
}
for_each_intel_encoder(dev, encoder) {
@@ -15291,9 +15559,10 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
if (!pll->on || pll->active_mask)
continue;
- DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
+ DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
+ pll->info->name);
- pll->funcs.disable(dev_priv, pll);
+ pll->info->funcs->disable(dev_priv, pll);
pll->on = false;
}
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 4e7418b345bc..2ef31617614a 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -218,6 +218,10 @@ struct intel_link_m_n {
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
for_each_if((__mask) & BIT(__p))
+#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+ for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
+ for_each_if ((__mask) & (1 << (__t)))
+
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index b7b4cfdeb974..16faea30114a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -43,7 +43,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
#define DP_DPRX_ESI_LEN 14
/* Compliance test status bits */
@@ -92,8 +91,6 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
{ 270000, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
- { 540000, /* m2_int = 27, m2_fraction = 0 */
- { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
/**
@@ -423,6 +420,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -1650,9 +1650,17 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
}
}
+struct link_config_limits {
+ int min_clock, max_clock;
+ int min_lane_count, max_lane_count;
+ int min_bpp, max_bpp;
+};
+
static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
int bpp, bpc;
bpp = pipe_config->pipe_bpp;
@@ -1661,31 +1669,153 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
if (bpc > 0)
bpp = min(bpp, 3*bpc);
+ if (intel_dp_is_edp(intel_dp)) {
+ /* Get bpp from vbt only for panels that dont have bpp in edid */
+ if (intel_connector->base.display_info.bpc == 0 &&
+ dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
+ DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+ dev_priv->vbt.edp.bpp);
+ bpp = dev_priv->vbt.edp.bpp;
+ }
+ }
+
+ return bpp;
+}
+
+/* Adjust link config limits based on compliance test requests. */
+static void
+intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct link_config_limits *limits)
+{
/* For DP Compliance we override the computed bpp for the pipe */
if (intel_dp->compliance.test_data.bpc != 0) {
- pipe_config->pipe_bpp = 3*intel_dp->compliance.test_data.bpc;
- pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
- DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
- pipe_config->pipe_bpp);
+ int bpp = 3 * intel_dp->compliance.test_data.bpc;
+
+ limits->min_bpp = limits->max_bpp = bpp;
+ pipe_config->dither_force_disable = bpp == 6 * 3;
+
+ DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
}
- return bpp;
+
+ /* Use values requested by Compliance Test Request */
+ if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
+ int index;
+
+ /* Validate the compliance test data since max values
+ * might have changed due to link train fallback.
+ */
+ if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
+ intel_dp->compliance.test_lane_count)) {
+ index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ intel_dp->compliance.test_link_rate);
+ if (index >= 0)
+ limits->min_clock = limits->max_clock = index;
+ limits->min_lane_count = limits->max_lane_count =
+ intel_dp->compliance.test_lane_count;
+ }
+ }
+}
+
+/* Optimize link config in order: max bpp, min clock, min lanes */
+static bool
+intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ const struct link_config_limits *limits)
+{
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int bpp, clock, lane_count;
+ int mode_rate, link_clock, link_avail;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ bpp);
+
+ for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+ for (lane_count = limits->min_lane_count;
+ lane_count <= limits->max_lane_count;
+ lane_count <<= 1) {
+ link_clock = intel_dp->common_rates[clock];
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ pipe_config->lane_count = lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = link_clock;
+
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
}
-static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
- struct drm_display_mode *m2)
+static bool
+intel_dp_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
- bool bres = false;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct link_config_limits limits;
+ int common_len;
+
+ common_len = intel_dp_common_len_rate_limit(intel_dp,
+ intel_dp->max_link_rate);
+
+ /* No common link rates between source and sink */
+ WARN_ON(common_len <= 0);
- if (m1 && m2)
- bres = (m1->hdisplay == m2->hdisplay &&
- m1->hsync_start == m2->hsync_start &&
- m1->hsync_end == m2->hsync_end &&
- m1->htotal == m2->htotal &&
- m1->vdisplay == m2->vdisplay &&
- m1->vsync_start == m2->vsync_start &&
- m1->vsync_end == m2->vsync_end &&
- m1->vtotal == m2->vtotal);
- return bres;
+ limits.min_clock = 0;
+ limits.max_clock = common_len - 1;
+
+ limits.min_lane_count = 1;
+ limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
+
+ limits.min_bpp = 6 * 3;
+ limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ /*
+ * Use the maximum clock and number of lanes the eDP panel
+ * advertizes being capable of. The panels are generally
+ * designed to support only a single clock and lane
+ * configuration, and typically these values correspond to the
+ * native resolution of the panel.
+ */
+ limits.min_lane_count = limits.max_lane_count;
+ limits.min_clock = limits.max_clock;
+ }
+
+ intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
+
+ DRM_DEBUG_KMS("DP link computation with max lane count %i "
+ "max rate %d max bpp %d pixel clock %iKHz\n",
+ limits.max_lane_count,
+ intel_dp->common_rates[limits.max_clock],
+ limits.max_bpp, adjusted_mode->crtc_clock);
+
+ /*
+ * Optimize for slow and wide. This is the place to add alternative
+ * optimization policy.
+ */
+ if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits))
+ return false;
+
+ DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp);
+
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->pipe_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+
+ return true;
}
bool
@@ -1701,27 +1831,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- int lane_count, clock;
- int min_lane_count = 1;
- int max_lane_count = intel_dp_max_lane_count(intel_dp);
- /* Conveniently, the link BW constants become indices with a shift...*/
- int min_clock = 0;
- int max_clock;
- int bpp, mode_rate;
- int link_avail, link_clock;
- int common_len;
- uint8_t link_bw, rate_select;
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_LIMITED_M_N);
- common_len = intel_dp_common_len_rate_limit(intel_dp,
- intel_dp->max_link_rate);
-
- /* No common link rates between source and sink */
- WARN_ON(common_len <= 0);
-
- max_clock = common_len - 1;
-
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -1734,19 +1846,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
- struct drm_display_mode *panel_mode =
- intel_connector->panel.alt_fixed_mode;
- struct drm_display_mode *req_mode = &pipe_config->base.mode;
-
- if (!intel_edp_compare_alt_mode(req_mode, panel_mode))
- panel_mode = intel_connector->panel.fixed_mode;
-
- drm_mode_debug_printmodeline(panel_mode);
-
- intel_fixed_panel_mode(panel_mode, adjusted_mode);
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
if (INTEL_GEN(dev_priv) >= 9) {
int ret;
+
ret = skl_update_scaler_crtc(pipe_config);
if (ret)
return ret;
@@ -1760,82 +1865,19 @@ intel_dp_compute_config(struct intel_encoder *encoder,
conn_state->scaling_mode);
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return false;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
- /* Use values requested by Compliance Test Request */
- if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
- int index;
-
- /* Validate the compliance test data since max values
- * might have changed due to link train fallback.
- */
- if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
- intel_dp->compliance.test_lane_count)) {
- index = intel_dp_rate_index(intel_dp->common_rates,
- intel_dp->num_common_rates,
- intel_dp->compliance.test_link_rate);
- if (index >= 0)
- min_clock = max_clock = index;
- min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
- }
- }
- DRM_DEBUG_KMS("DP link computation with max lane count %i "
- "max bw %d pixel clock %iKHz\n",
- max_lane_count, intel_dp->common_rates[max_clock],
- adjusted_mode->crtc_clock);
-
- /* Walk through all bpp values. Luckily they're all nicely spaced with 2
- * bpc in between. */
- bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp)) {
-
- /* Get bpp from vbt only for panels that dont have bpp in edid */
- if (intel_connector->base.display_info.bpc == 0 &&
- (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
- DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
- dev_priv->vbt.edp.bpp);
- bpp = dev_priv->vbt.edp.bpp;
- }
-
- /*
- * Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The panels are generally
- * designed to support only a single clock and lane
- * configuration, and typically these values correspond to the
- * native resolution of the panel.
- */
- min_lane_count = max_lane_count;
- min_clock = max_clock;
- }
-
- for (; bpp >= 6*3; bpp -= 2*3) {
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- bpp);
-
- for (clock = min_clock; clock <= max_clock; clock++) {
- for (lane_count = min_lane_count;
- lane_count <= max_lane_count;
- lane_count <<= 1) {
-
- link_clock = intel_dp->common_rates[clock];
- link_avail = intel_dp_max_data_rate(link_clock,
- lane_count);
-
- if (mode_rate <= link_avail) {
- goto found;
- }
- }
- }
- }
-
- return false;
+ if (!intel_dp_compute_link_config(encoder, pipe_config))
+ return false;
-found:
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
* See:
@@ -1843,7 +1885,7 @@ found:
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
*/
pipe_config->limited_color_range =
- bpp != 18 &&
+ pipe_config->pipe_bpp != 18 &&
drm_default_rgb_quant_range(adjusted_mode) ==
HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
@@ -1851,21 +1893,7 @@ found:
intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
- pipe_config->lane_count = lane_count;
-
- pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = intel_dp->common_rates[clock];
-
- intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
- &link_bw, &rate_select);
-
- DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
- link_bw, rate_select, pipe_config->lane_count,
- pipe_config->port_clock, bpp);
- DRM_DEBUG_KMS("DP link bw required %i available %i\n",
- mode_rate, link_avail);
-
- intel_link_compute_m_n(bpp, lane_count,
+ intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
@@ -1874,11 +1902,12 @@ found:
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
pipe_config->has_drrs = true;
- intel_link_compute_m_n(bpp, lane_count,
- intel_connector->panel.downclock_mode->clock,
- pipe_config->port_clock,
- &pipe_config->dp_m2_n2,
- reduce_m_n);
+ intel_link_compute_m_n(pipe_config->pipe_bpp,
+ pipe_config->lane_count,
+ intel_connector->panel.downclock_mode->clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m2_n2,
+ reduce_m_n);
}
if (!HAS_DDI(dev_priv))
@@ -2761,16 +2790,6 @@ static void g4x_disable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
-
- /* disable the port before the pipe on g4x */
- intel_dp_link_down(encoder, old_crtc_state);
-}
-
-static void ilk_disable_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
static void vlv_disable_dp(struct intel_encoder *encoder,
@@ -2784,13 +2803,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
-static void ilk_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
+ /*
+ * Bspec does not list a specific disable sequence for g4x DP.
+ * Follow the ilk+ sequence (disable pipe before the port) for
+ * g4x DP as it does not suffer from underruns like the normal
+ * g4x modeset sequence (disable pipe after the port).
+ */
intel_dp_link_down(encoder, old_crtc_state);
/* Only ilk+ has port A */
@@ -2881,10 +2906,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
} else {
- if (IS_CHERRYVIEW(dev_priv))
- *DP &= ~DP_LINK_TRAIN_MASK_CHV;
- else
- *DP &= ~DP_LINK_TRAIN_MASK;
+ *DP &= ~DP_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
@@ -2897,12 +2919,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
- if (IS_CHERRYVIEW(dev_priv)) {
- *DP |= DP_LINK_TRAIN_PAT_3_CHV;
- } else {
- DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
- *DP |= DP_LINK_TRAIN_PAT_2;
- }
+ DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
+ *DP |= DP_LINK_TRAIN_PAT_2;
break;
}
}
@@ -3641,10 +3659,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
DP &= ~DP_LINK_TRAIN_MASK_CPT;
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
- if (IS_CHERRYVIEW(dev_priv))
- DP &= ~DP_LINK_TRAIN_MASK_CHV;
- else
- DP &= ~DP_LINK_TRAIN_MASK;
+ DP &= ~DP_LINK_TRAIN_MASK;
DP |= DP_LINK_TRAIN_PAT_IDLE;
}
I915_WRITE(intel_dp->output_reg, DP);
@@ -6121,7 +6136,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode = NULL;
- struct drm_display_mode *alt_fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
struct drm_display_mode *scan;
@@ -6176,14 +6190,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
- /* prefer fixed mode from EDID if available, save an alt mode also */
+ /* prefer fixed mode from EDID if available */
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
downclock_mode = intel_dp_drrs_init(
intel_connector, fixed_mode);
- } else if (!alt_fixed_mode) {
- alt_fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
}
}
@@ -6220,8 +6233,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pipe_name(pipe));
}
- intel_panel_init(&intel_connector->panel, fixed_mode, alt_fixed_mode,
- downclock_mode);
+ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
@@ -6327,7 +6339,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ if (!HAS_GMCH_DISPLAY(dev_priv))
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -6426,15 +6438,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->enable = vlv_enable_dp;
intel_encoder->disable = vlv_disable_dp;
intel_encoder->post_disable = vlv_post_disable_dp;
- } else if (INTEL_GEN(dev_priv) >= 5) {
- intel_encoder->pre_enable = g4x_pre_enable_dp;
- intel_encoder->enable = g4x_enable_dp;
- intel_encoder->disable = ilk_disable_dp;
- intel_encoder->post_disable = ilk_post_disable_dp;
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
intel_encoder->disable = g4x_disable_dp;
+ intel_encoder->post_disable = g4x_post_disable_dp;
}
intel_dig_port->dp.output_reg = output_reg;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index f59b59bb0a21..3fcaa98b9055 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -139,6 +139,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
&link_bw, &rate_select);
+ if (link_bw)
+ DRM_DEBUG_KMS("Using LINK_BW_SET value %02x\n", link_bw);
+ else
+ DRM_DEBUG_KMS("Using LINK_RATE_SET value %02x\n", rate_select);
+
/* Write the link configuration data */
link_config[0] = link_bw;
link_config[1] = intel_dp->lane_count;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index c3de0918ee13..5890500a3a8b 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_LIMITED_M_N);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
pipe_config->has_pch_encoder = false;
bpp = 24;
if (intel_dp->compliance.test_data.bpc) {
@@ -180,9 +183,11 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_dp->active_mst_links--;
intel_mst->connector = NULL;
- if (intel_dp->active_mst_links == 0)
+ if (intel_dp->active_mst_links == 0) {
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_dig_port->base.post_disable(&intel_dig_port->base,
old_crtc_state, NULL);
+ }
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
}
@@ -223,7 +228,11 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ if (intel_dp->active_mst_links == 0)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
+
if (intel_dp->active_mst_links == 0)
intel_dig_port->base.pre_enable(&intel_dig_port->base,
pipe_config, NULL);
@@ -360,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (!intel_dp)
return MODE_ERROR;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index c8e9e44e5981..00b3ab656b06 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -380,13 +380,14 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* all 1s. Eventually they become accessible as they power up, then
* the reserved bit will give the default 0. Poll on the reserved bit
* becoming 0 to find when the PHY is accessible.
- * HW team confirmed that the time to reach phypowergood status is
- * anywhere between 50 us and 100us.
+ * The flag should get set in 100us according to the HW team, but
+ * use 1ms due to occasional timeouts observed with that.
*/
- if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
- (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
+ if (intel_wait_for_register_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
+ PHY_RESERVED | PHY_POWER_GOOD,
+ PHY_POWER_GOOD,
+ 1))
DRM_ERROR("timeout during PHY%d power on\n", phy);
- }
/* Program PLL Rcomp code offset */
val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 51c5ae4e9116..383fbc15113d 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -118,10 +118,10 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
return;
- cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state);
+ cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
- pll->name, onoff(state), onoff(cur_state));
+ pll->info->name, onoff(state), onoff(cur_state));
}
/**
@@ -143,11 +143,11 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
mutex_lock(&dev_priv->dpll_lock);
WARN_ON(!pll->state.crtc_mask);
if (!pll->active_mask) {
- DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
+ DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
- pll->funcs.prepare(dev_priv, pll);
+ pll->info->funcs->prepare(dev_priv, pll);
}
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -179,7 +179,7 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
pll->active_mask |= crtc_mask;
DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
- pll->name, pll->active_mask, pll->on,
+ pll->info->name, pll->active_mask, pll->on,
crtc->base.base.id);
if (old_mask) {
@@ -189,8 +189,8 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
}
WARN_ON(pll->on);
- DRM_DEBUG_KMS("enabling %s\n", pll->name);
- pll->funcs.enable(dev_priv, pll);
+ DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
+ pll->info->funcs->enable(dev_priv, pll);
pll->on = true;
out:
@@ -221,7 +221,7 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
goto out;
DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
- pll->name, pll->active_mask, pll->on,
+ pll->info->name, pll->active_mask, pll->on,
crtc->base.base.id);
assert_shared_dpll_enabled(dev_priv, pll);
@@ -231,8 +231,8 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
if (pll->active_mask)
goto out;
- DRM_DEBUG_KMS("disabling %s\n", pll->name);
- pll->funcs.disable(dev_priv, pll);
+ DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
+ pll->info->funcs->disable(dev_priv, pll);
pll->on = false;
out:
@@ -263,7 +263,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
&shared_dpll[i].hw_state,
sizeof(crtc_state->dpll_hw_state)) == 0) {
DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
- crtc->base.base.id, crtc->base.name, pll->name,
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name,
shared_dpll[i].crtc_mask,
pll->active_mask);
return pll;
@@ -275,7 +276,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
pll = &dev_priv->shared_dplls[i];
if (shared_dpll[i].crtc_mask == 0) {
DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
- crtc->base.base.id, crtc->base.name, pll->name);
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name);
return pll;
}
}
@@ -289,19 +291,19 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll,
{
struct intel_shared_dpll_state *shared_dpll;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- enum intel_dpll_id i = pll->id;
+ const enum intel_dpll_id id = pll->info->id;
shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
- if (shared_dpll[i].crtc_mask == 0)
- shared_dpll[i].hw_state =
+ if (shared_dpll[id].crtc_mask == 0)
+ shared_dpll[id].hw_state =
crtc_state->dpll_hw_state;
crtc_state->shared_dpll = pll;
- DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
+ DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
pipe_name(crtc->pipe));
- shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
+ shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
}
/**
@@ -341,15 +343,16 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
+ const enum intel_dpll_id id = pll->info->id;
uint32_t val;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
- val = I915_READ(PCH_DPLL(pll->id));
+ val = I915_READ(PCH_DPLL(id));
hw_state->dpll = val;
- hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
- hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+ hw_state->fp0 = I915_READ(PCH_FP0(id));
+ hw_state->fp1 = I915_READ(PCH_FP1(id));
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
@@ -359,8 +362,10 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0);
- I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1);
+ const enum intel_dpll_id id = pll->info->id;
+
+ I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
+ I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
}
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
@@ -379,13 +384,15 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
+ const enum intel_dpll_id id = pll->info->id;
+
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
- I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
+ I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(pll->id));
+ POSTING_READ(PCH_DPLL(id));
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -393,14 +400,15 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
- POSTING_READ(PCH_DPLL(pll->id));
+ I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
+ POSTING_READ(PCH_DPLL(id));
udelay(200);
}
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
+ const enum intel_dpll_id id = pll->info->id;
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
@@ -410,8 +418,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
}
- I915_WRITE(PCH_DPLL(pll->id), 0);
- POSTING_READ(PCH_DPLL(pll->id));
+ I915_WRITE(PCH_DPLL(id), 0);
+ POSTING_READ(PCH_DPLL(id));
udelay(200);
}
@@ -429,7 +437,8 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
pll = &dev_priv->shared_dplls[i];
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
- crtc->base.base.id, crtc->base.name, pll->name);
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name);
} else {
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_PCH_PLL_A,
@@ -466,8 +475,10 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll);
- POSTING_READ(WRPLL_CTL(pll->id));
+ const enum intel_dpll_id id = pll->info->id;
+
+ I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
+ POSTING_READ(WRPLL_CTL(id));
udelay(20);
}
@@ -482,11 +493,12 @@ static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
+ const enum intel_dpll_id id = pll->info->id;
uint32_t val;
- val = I915_READ(WRPLL_CTL(pll->id));
- I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL(pll->id));
+ val = I915_READ(WRPLL_CTL(id));
+ I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL(id));
}
static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
@@ -503,12 +515,13 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
+ const enum intel_dpll_id id = pll->info->id;
uint32_t val;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
- val = I915_READ(WRPLL_CTL(pll->id));
+ val = I915_READ(WRPLL_CTL(id));
hw_state->wrpll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
@@ -914,13 +927,15 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = {
static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
+ const enum intel_dpll_id id = pll->info->id;
uint32_t val;
val = I915_READ(DPLL_CTRL1);
- val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
- DPLL_CTRL1_LINK_RATE_MASK(pll->id));
- val |= pll->state.hw_state.ctrl1 << (pll->id * 6);
+ val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
+ DPLL_CTRL1_SSC(id) |
+ DPLL_CTRL1_LINK_RATE_MASK(id));
+ val |= pll->state.hw_state.ctrl1 << (id * 6);
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
@@ -930,24 +945,25 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const struct skl_dpll_regs *regs = skl_dpll_regs;
+ const enum intel_dpll_id id = pll->info->id;
skl_ddi_pll_write_ctrl1(dev_priv, pll);
- I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1);
- I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2);
- POSTING_READ(regs[pll->id].cfgcr1);
- POSTING_READ(regs[pll->id].cfgcr2);
+ I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
+ I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+ POSTING_READ(regs[id].cfgcr1);
+ POSTING_READ(regs[id].cfgcr2);
/* the enable bit is always bit 31 */
- I915_WRITE(regs[pll->id].ctl,
- I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
+ I915_WRITE(regs[id].ctl,
+ I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
if (intel_wait_for_register(dev_priv,
DPLL_STATUS,
- DPLL_LOCK(pll->id),
- DPLL_LOCK(pll->id),
+ DPLL_LOCK(id),
+ DPLL_LOCK(id),
5))
- DRM_ERROR("DPLL %d not locked\n", pll->id);
+ DRM_ERROR("DPLL %d not locked\n", id);
}
static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
@@ -960,11 +976,12 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const struct skl_dpll_regs *regs = skl_dpll_regs;
+ const enum intel_dpll_id id = pll->info->id;
/* the enable bit is always bit 31 */
- I915_WRITE(regs[pll->id].ctl,
- I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
- POSTING_READ(regs[pll->id].ctl);
+ I915_WRITE(regs[id].ctl,
+ I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+ POSTING_READ(regs[id].ctl);
}
static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
@@ -978,6 +995,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
const struct skl_dpll_regs *regs = skl_dpll_regs;
+ const enum intel_dpll_id id = pll->info->id;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
@@ -985,17 +1003,17 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(regs[pll->id].ctl);
+ val = I915_READ(regs[id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
goto out;
val = I915_READ(DPLL_CTRL1);
- hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
+ hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
/* avoid reading back stale values if HDMI mode is not enabled */
- if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) {
- hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
- hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
+ if (val & DPLL_CTRL1_HDMI_MODE(id)) {
+ hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
+ hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
}
ret = true;
@@ -1011,6 +1029,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
const struct skl_dpll_regs *regs = skl_dpll_regs;
+ const enum intel_dpll_id id = pll->info->id;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
@@ -1019,12 +1038,12 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
/* DPLL0 is always enabled since it drives CDCLK */
- val = I915_READ(regs[pll->id].ctl);
+ val = I915_READ(regs[id].ctl);
if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
goto out;
val = I915_READ(DPLL_CTRL1);
- hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
+ hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
ret = true;
@@ -1424,7 +1443,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t temp;
- enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
enum dpio_phy phy;
enum dpio_channel ch;
@@ -1543,7 +1562,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
uint32_t temp;
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1566,7 +1585,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
uint32_t val;
bool ret;
enum dpio_phy phy;
@@ -1824,7 +1843,7 @@ bxt_get_dpll(struct intel_crtc *crtc,
pll = intel_get_shared_dpll_by_id(dev_priv, i);
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
- crtc->base.base.id, crtc->base.name, pll->name);
+ crtc->base.base.id, crtc->base.name, pll->info->name);
intel_reference_shared_dpll(pll, crtc_state);
@@ -1877,13 +1896,6 @@ static void intel_ddi_pll_init(struct drm_device *dev)
}
}
-struct dpll_info {
- const char *name;
- const int id;
- const struct intel_shared_dpll_funcs *funcs;
- uint32_t flags;
-};
-
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
@@ -1896,9 +1908,9 @@ struct intel_dpll_mgr {
};
static const struct dpll_info pch_plls[] = {
- { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 },
- { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 },
- { NULL, -1, NULL, 0 },
+ { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
+ { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
+ { },
};
static const struct intel_dpll_mgr pch_pll_mgr = {
@@ -1908,13 +1920,13 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
};
static const struct dpll_info hsw_plls[] = {
- { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 },
- { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 },
- { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 },
- { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
- { NULL, -1, NULL, },
+ { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
+ { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
+ { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
+ { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
+ { },
};
static const struct intel_dpll_mgr hsw_pll_mgr = {
@@ -1924,11 +1936,11 @@ static const struct intel_dpll_mgr hsw_pll_mgr = {
};
static const struct dpll_info skl_plls[] = {
- { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
- { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
- { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
- { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
- { NULL, -1, NULL, },
+ { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
+ { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+ { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+ { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
+ { },
};
static const struct intel_dpll_mgr skl_pll_mgr = {
@@ -1938,10 +1950,10 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
};
static const struct dpll_info bxt_plls[] = {
- { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 },
- { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 },
- { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 },
- { NULL, -1, NULL, },
+ { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
+ { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+ { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+ { },
};
static const struct intel_dpll_mgr bxt_pll_mgr = {
@@ -1953,38 +1965,39 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
+ const enum intel_dpll_id id = pll->info->id;
uint32_t val;
/* 1. Enable DPLL power in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(pll->id));
+ val = I915_READ(CNL_DPLL_ENABLE(id));
val |= PLL_POWER_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
+ I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
- CNL_DPLL_ENABLE(pll->id),
+ CNL_DPLL_ENABLE(id),
PLL_POWER_STATE,
PLL_POWER_STATE,
5))
- DRM_ERROR("PLL %d Power not enabled\n", pll->id);
+ DRM_ERROR("PLL %d Power not enabled\n", id);
/*
* 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
* select DP mode, and set DP link rate.
*/
val = pll->state.hw_state.cfgcr0;
- I915_WRITE(CNL_DPLL_CFGCR0(pll->id), val);
+ I915_WRITE(CNL_DPLL_CFGCR0(id), val);
/* 4. Reab back to ensure writes completed */
- POSTING_READ(CNL_DPLL_CFGCR0(pll->id));
+ POSTING_READ(CNL_DPLL_CFGCR0(id));
/* 3. Configure DPLL_CFGCR0 */
/* Avoid touch CFGCR1 if HDMI mode is not enabled */
if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
val = pll->state.hw_state.cfgcr1;
- I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
+ I915_WRITE(CNL_DPLL_CFGCR1(id), val);
/* 4. Reab back to ensure writes completed */
- POSTING_READ(CNL_DPLL_CFGCR1(pll->id));
+ POSTING_READ(CNL_DPLL_CFGCR1(id));
}
/*
@@ -1997,17 +2010,17 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
*/
/* 6. Enable DPLL in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(pll->id));
+ val = I915_READ(CNL_DPLL_ENABLE(id));
val |= PLL_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
+ I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
- CNL_DPLL_ENABLE(pll->id),
+ CNL_DPLL_ENABLE(id),
PLL_LOCK,
PLL_LOCK,
5))
- DRM_ERROR("PLL %d not locked\n", pll->id);
+ DRM_ERROR("PLL %d not locked\n", id);
/*
* 8. If the frequency will result in a change to the voltage
@@ -2027,6 +2040,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
+ const enum intel_dpll_id id = pll->info->id;
uint32_t val;
/*
@@ -2044,17 +2058,17 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
*/
/* 3. Disable DPLL through DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(pll->id));
+ val = I915_READ(CNL_DPLL_ENABLE(id));
val &= ~PLL_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
+ I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
- CNL_DPLL_ENABLE(pll->id),
+ CNL_DPLL_ENABLE(id),
PLL_LOCK,
0,
5))
- DRM_ERROR("PLL %d locked\n", pll->id);
+ DRM_ERROR("PLL %d locked\n", id);
/*
* 5. If the frequency will result in a change to the voltage
@@ -2066,23 +2080,24 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
*/
/* 6. Disable DPLL power in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(pll->id));
+ val = I915_READ(CNL_DPLL_ENABLE(id));
val &= ~PLL_POWER_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
+ I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
- CNL_DPLL_ENABLE(pll->id),
+ CNL_DPLL_ENABLE(id),
PLL_POWER_STATE,
0,
5))
- DRM_ERROR("PLL %d Power not disabled\n", pll->id);
+ DRM_ERROR("PLL %d Power not disabled\n", id);
}
static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
+ const enum intel_dpll_id id = pll->info->id;
uint32_t val;
bool ret;
@@ -2091,16 +2106,16 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(CNL_DPLL_ENABLE(pll->id));
+ val = I915_READ(CNL_DPLL_ENABLE(id));
if (!(val & PLL_ENABLE))
goto out;
- val = I915_READ(CNL_DPLL_CFGCR0(pll->id));
+ val = I915_READ(CNL_DPLL_CFGCR0(id));
hw_state->cfgcr0 = val;
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CFGCR0_HDMI_MODE) {
- hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll->id));
+ hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
}
ret = true;
@@ -2203,6 +2218,7 @@ cnl_ddi_calculate_wrpll(int clock,
struct skl_wrpll_params *wrpll_params)
{
u32 afe_clock = clock * 5;
+ uint32_t ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2235,8 +2251,17 @@ cnl_ddi_calculate_wrpll(int clock,
cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
- cnl_wrpll_params_populate(wrpll_params, best_dco,
- dev_priv->cdclk.hw.ref, pdiv, qdiv, kdiv);
+ ref_clock = dev_priv->cdclk.hw.ref;
+
+ /*
+ * For ICL, the spec states: if reference frequency is 38.4, use 19.2
+ * because the DPLL automatically divides that by 2.
+ */
+ if (IS_ICELAKE(dev_priv) && ref_clock == 38400)
+ ref_clock = 19200;
+
+ cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv,
+ kdiv);
return true;
}
@@ -2372,10 +2397,10 @@ static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
};
static const struct dpll_info cnl_plls[] = {
- { "DPLL 0", DPLL_ID_SKL_DPLL0, &cnl_ddi_pll_funcs, 0 },
- { "DPLL 1", DPLL_ID_SKL_DPLL1, &cnl_ddi_pll_funcs, 0 },
- { "DPLL 2", DPLL_ID_SKL_DPLL2, &cnl_ddi_pll_funcs, 0 },
- { NULL, -1, NULL, },
+ { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
+ { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+ { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+ { },
};
static const struct intel_dpll_mgr cnl_pll_mgr = {
@@ -2384,6 +2409,644 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
.dump_hw_state = cnl_dump_hw_state,
};
+/*
+ * These values alrea already adjusted: they're the bits we write to the
+ * registers, not the logical values.
+ */
+static const struct skl_wrpll_params icl_dp_combo_pll_24MHz_values[] = {
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
+ { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+};
+
+/* Also used for 38.4 MHz values. */
+static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = {
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
+ { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+};
+
+static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
+ struct skl_wrpll_params *pll_params)
+{
+ const struct skl_wrpll_params *params;
+
+ params = dev_priv->cdclk.hw.ref == 24000 ?
+ icl_dp_combo_pll_24MHz_values :
+ icl_dp_combo_pll_19_2MHz_values;
+
+ switch (clock) {
+ case 540000:
+ *pll_params = params[0];
+ break;
+ case 270000:
+ *pll_params = params[1];
+ break;
+ case 162000:
+ *pll_params = params[2];
+ break;
+ case 324000:
+ *pll_params = params[3];
+ break;
+ case 216000:
+ *pll_params = params[4];
+ break;
+ case 432000:
+ *pll_params = params[5];
+ break;
+ case 648000:
+ *pll_params = params[6];
+ break;
+ case 810000:
+ *pll_params = params[7];
+ break;
+ default:
+ MISSING_CASE(clock);
+ return false;
+ }
+
+ return true;
+}
+
+static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder, int clock,
+ struct intel_dpll_hw_state *pll_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ uint32_t cfgcr0, cfgcr1;
+ struct skl_wrpll_params pll_params = { 0 };
+ bool ret;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
+ else
+ ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
+
+ if (!ret)
+ return false;
+
+ cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
+ pll_params.dco_integer;
+
+ cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
+ DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
+ DPLL_CFGCR1_KDIV(pll_params.kdiv) |
+ DPLL_CFGCR1_PDIV(pll_params.pdiv) |
+ DPLL_CFGCR1_CENTRAL_FREQ_8400;
+
+ pll_state->cfgcr0 = cfgcr0;
+ pll_state->cfgcr1 = cfgcr1;
+ return true;
+}
+
+static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
+{
+ return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
+}
+
+static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
+{
+ return port - PORT_C + DPLL_ID_ICL_MGPLL1;
+}
+
+static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
+ uint32_t *target_dco_khz,
+ struct intel_dpll_hw_state *state)
+{
+ uint32_t dco_min_freq, dco_max_freq;
+ int div1_vals[] = {7, 5, 3, 2};
+ unsigned int i;
+ int div2;
+
+ dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
+ dco_max_freq = is_dp ? 8100000 : 10000000;
+
+ for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
+ int div1 = div1_vals[i];
+
+ for (div2 = 10; div2 > 0; div2--) {
+ int dco = div1 * div2 * clock_khz * 5;
+ int a_divratio, tlinedrv, inputsel, hsdiv;
+
+ if (dco < dco_min_freq || dco > dco_max_freq)
+ continue;
+
+ if (div2 >= 2) {
+ a_divratio = is_dp ? 10 : 5;
+ tlinedrv = 2;
+ } else {
+ a_divratio = 5;
+ tlinedrv = 0;
+ }
+ inputsel = is_dp ? 0 : 1;
+
+ switch (div1) {
+ default:
+ MISSING_CASE(div1);
+ case 2:
+ hsdiv = 0;
+ break;
+ case 3:
+ hsdiv = 1;
+ break;
+ case 5:
+ hsdiv = 2;
+ break;
+ case 7:
+ hsdiv = 3;
+ break;
+ }
+
+ *target_dco_khz = dco;
+
+ state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
+
+ state->mg_clktop2_coreclkctl1 =
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
+
+ state->mg_clktop2_hsclkctl =
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(hsdiv) |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * The specification for this function uses real numbers, so the math had to be
+ * adapted to integer-only calculation, that's why it looks so different.
+ */
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder, int clock,
+ struct intel_dpll_hw_state *pll_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int refclk_khz = dev_priv->cdclk.hw.ref;
+ uint32_t dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
+ uint32_t iref_ndiv, iref_trim, iref_pulse_w;
+ uint32_t prop_coeff, int_coeff;
+ uint32_t tdc_targetcnt, feedfwgain;
+ uint64_t ssc_stepsize, ssc_steplen, ssc_steplog;
+ uint64_t tmp;
+ bool use_ssc = false;
+ bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+
+ if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
+ pll_state)) {
+ DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
+ return false;
+ }
+
+ m1div = 2;
+ m2div_int = dco_khz / (refclk_khz * m1div);
+ if (m2div_int > 255) {
+ m1div = 4;
+ m2div_int = dco_khz / (refclk_khz * m1div);
+ if (m2div_int > 255) {
+ DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
+ clock);
+ return false;
+ }
+ }
+ m2div_rem = dco_khz % (refclk_khz * m1div);
+
+ tmp = (uint64_t)m2div_rem * (1 << 22);
+ do_div(tmp, refclk_khz * m1div);
+ m2div_frac = tmp;
+
+ switch (refclk_khz) {
+ case 19200:
+ iref_ndiv = 1;
+ iref_trim = 28;
+ iref_pulse_w = 1;
+ break;
+ case 24000:
+ iref_ndiv = 1;
+ iref_trim = 25;
+ iref_pulse_w = 2;
+ break;
+ case 38400:
+ iref_ndiv = 2;
+ iref_trim = 28;
+ iref_pulse_w = 1;
+ break;
+ default:
+ MISSING_CASE(refclk_khz);
+ return false;
+ }
+
+ /*
+ * tdc_res = 0.000003
+ * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
+ *
+ * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
+ * was supposed to be a division, but we rearranged the operations of
+ * the formula to avoid early divisions so we don't multiply the
+ * rounding errors.
+ *
+ * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
+ * we also rearrange to work with integers.
+ *
+ * The 0.5 transformed to 5 results in a multiplication by 10 and the
+ * last division by 10.
+ */
+ tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
+
+ /*
+ * Here we divide dco_khz by 10 in order to allow the dividend to fit in
+ * 32 bits. That's not a problem since we round the division down
+ * anyway.
+ */
+ feedfwgain = (use_ssc || m2div_rem > 0) ?
+ m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
+
+ if (dco_khz >= 9000000) {
+ prop_coeff = 5;
+ int_coeff = 10;
+ } else {
+ prop_coeff = 4;
+ int_coeff = 8;
+ }
+
+ if (use_ssc) {
+ tmp = (uint64_t)dco_khz * 47 * 32;
+ do_div(tmp, refclk_khz * m1div * 10000);
+ ssc_stepsize = tmp;
+
+ tmp = (uint64_t)dco_khz * 1000;
+ ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
+ } else {
+ ssc_stepsize = 0;
+ ssc_steplen = 0;
+ }
+ ssc_steplog = 4;
+
+ pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
+ MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
+ MG_PLL_DIV0_FBDIV_INT(m2div_int);
+
+ pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
+ MG_PLL_DIV1_DITHER_DIV_2 |
+ MG_PLL_DIV1_NDIVRATIO(1) |
+ MG_PLL_DIV1_FBPREDIV(m1div);
+
+ pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
+ MG_PLL_LF_AFCCNTSEL_512 |
+ MG_PLL_LF_GAINCTRL(1) |
+ MG_PLL_LF_INT_COEFF(int_coeff) |
+ MG_PLL_LF_PROP_COEFF(prop_coeff);
+
+ pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
+ MG_PLL_FRAC_LOCK_DCODITHEREN |
+ MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
+ if (use_ssc || m2div_rem > 0)
+ pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
+
+ pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
+ MG_PLL_SSC_TYPE(2) |
+ MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
+ MG_PLL_SSC_STEPNUM(ssc_steplog) |
+ MG_PLL_SSC_FLLEN |
+ MG_PLL_SSC_STEPSIZE(ssc_stepsize);
+
+ pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART;
+
+ if (refclk_khz != 38400) {
+ pll_state->mg_pll_tdc_coldst_bias |=
+ MG_PLL_TDC_COLDST_IREFINT_EN |
+ MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+ MG_PLL_TDC_COLDST_COLDSTART |
+ MG_PLL_TDC_TDCOVCCORR_EN |
+ MG_PLL_TDC_TDCSEL(3);
+
+ pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
+ MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+ MG_PLL_BIAS_BIAS_BONUS(10) |
+ MG_PLL_BIAS_BIASCAL_EN |
+ MG_PLL_BIAS_CTRIM(12) |
+ MG_PLL_BIAS_VREF_RDAC(4) |
+ MG_PLL_BIAS_IREFTRIM(iref_trim);
+ }
+
+ return true;
+}
+
+static struct intel_shared_dpll *
+icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct intel_shared_dpll *pll;
+ struct intel_dpll_hw_state pll_state = {};
+ enum port port = encoder->port;
+ enum intel_dpll_id min, max;
+ int clock = crtc_state->port_clock;
+ bool ret;
+
+ switch (port) {
+ case PORT_A:
+ case PORT_B:
+ min = DPLL_ID_ICL_DPLL0;
+ max = DPLL_ID_ICL_DPLL1;
+ ret = icl_calc_dpll_state(crtc_state, encoder, clock,
+ &pll_state);
+ break;
+ case PORT_C:
+ case PORT_D:
+ case PORT_E:
+ case PORT_F:
+ min = icl_port_to_mg_pll_id(port);
+ max = min;
+ ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
+ &pll_state);
+ break;
+ default:
+ MISSING_CASE(port);
+ return NULL;
+ }
+
+ if (!ret) {
+ DRM_DEBUG_KMS("Could not calculate PLL state.\n");
+ return NULL;
+ }
+
+ crtc_state->dpll_hw_state = pll_state;
+
+ pll = intel_find_shared_dpll(crtc, crtc_state, min, max);
+ if (!pll) {
+ DRM_DEBUG_KMS("No PLL selected\n");
+ return NULL;
+ }
+
+ intel_reference_shared_dpll(pll, crtc_state);
+
+ return pll;
+}
+
+static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
+{
+ switch (id) {
+ default:
+ MISSING_CASE(id);
+ case DPLL_ID_ICL_DPLL0:
+ case DPLL_ID_ICL_DPLL1:
+ return CNL_DPLL_ENABLE(id);
+ case DPLL_ID_ICL_MGPLL1:
+ case DPLL_ID_ICL_MGPLL2:
+ case DPLL_ID_ICL_MGPLL3:
+ case DPLL_ID_ICL_MGPLL4:
+ return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
+ }
+}
+
+static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ uint32_t val;
+ enum port port;
+ bool ret = false;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(icl_pll_id_to_enable_reg(id));
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ switch (id) {
+ case DPLL_ID_ICL_DPLL0:
+ case DPLL_ID_ICL_DPLL1:
+ hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+ break;
+ case DPLL_ID_ICL_MGPLL1:
+ case DPLL_ID_ICL_MGPLL2:
+ case DPLL_ID_ICL_MGPLL3:
+ case DPLL_ID_ICL_MGPLL4:
+ port = icl_mg_pll_id_to_port(id);
+ hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
+ hw_state->mg_clktop2_coreclkctl1 =
+ I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+ hw_state->mg_clktop2_hsclkctl =
+ I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+ hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
+ hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port));
+ hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port));
+ hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port));
+ hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port));
+ hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port));
+ hw_state->mg_pll_tdc_coldst_bias =
+ I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+ break;
+ default:
+ MISSING_CASE(id);
+ }
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ return ret;
+}
+
+static void icl_dpll_write(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+ const enum intel_dpll_id id = pll->info->id;
+
+ I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
+ I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
+ POSTING_READ(ICL_DPLL_CFGCR1(id));
+}
+
+static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+ enum port port = icl_mg_pll_id_to_port(pll->info->id);
+
+ I915_WRITE(MG_REFCLKIN_CTL(port), hw_state->mg_refclkin_ctl);
+ I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port),
+ hw_state->mg_clktop2_coreclkctl1);
+ I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), hw_state->mg_clktop2_hsclkctl);
+ I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0);
+ I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1);
+ I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf);
+ I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock);
+ I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc);
+ I915_WRITE(MG_PLL_BIAS(port), hw_state->mg_pll_bias);
+ I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port),
+ hw_state->mg_pll_tdc_coldst_bias);
+ POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port));
+}
+
+static void icl_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
+ uint32_t val;
+
+ val = I915_READ(enable_reg);
+ val |= PLL_POWER_ENABLE;
+ I915_WRITE(enable_reg, val);
+
+ /*
+ * The spec says we need to "wait" but it also says it should be
+ * immediate.
+ */
+ if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
+ PLL_POWER_STATE, 1))
+ DRM_ERROR("PLL %d Power not enabled\n", id);
+
+ switch (id) {
+ case DPLL_ID_ICL_DPLL0:
+ case DPLL_ID_ICL_DPLL1:
+ icl_dpll_write(dev_priv, pll);
+ break;
+ case DPLL_ID_ICL_MGPLL1:
+ case DPLL_ID_ICL_MGPLL2:
+ case DPLL_ID_ICL_MGPLL3:
+ case DPLL_ID_ICL_MGPLL4:
+ icl_mg_pll_write(dev_priv, pll);
+ break;
+ default:
+ MISSING_CASE(id);
+ }
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothign here.
+ */
+
+ val = I915_READ(enable_reg);
+ val |= PLL_ENABLE;
+ I915_WRITE(enable_reg, val);
+
+ if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
+ 1)) /* 600us actually. */
+ DRM_ERROR("PLL %d not locked\n", id);
+
+ /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void icl_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
+ uint32_t val;
+
+ /* The first steps are done by intel_ddi_post_disable(). */
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothign here.
+ */
+
+ val = I915_READ(enable_reg);
+ val &= ~PLL_ENABLE;
+ I915_WRITE(enable_reg, val);
+
+ /* Timeout is actually 1us. */
+ if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
+ DRM_ERROR("PLL %d locked\n", id);
+
+ /* DVFS post sequence would be here. See the comment above. */
+
+ val = I915_READ(enable_reg);
+ val &= ~PLL_POWER_ENABLE;
+ I915_WRITE(enable_reg, val);
+
+ /*
+ * The spec says we need to "wait" but it also says it should be
+ * immediate.
+ */
+ if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
+ 1))
+ DRM_ERROR("PLL %d Power not disabled\n", id);
+}
+
+static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
+ "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+ "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+ "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+ "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+ "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+ hw_state->cfgcr0, hw_state->cfgcr1,
+ hw_state->mg_refclkin_ctl,
+ hw_state->mg_clktop2_coreclkctl1,
+ hw_state->mg_clktop2_hsclkctl,
+ hw_state->mg_pll_div0,
+ hw_state->mg_pll_div1,
+ hw_state->mg_pll_lf,
+ hw_state->mg_pll_frac_lock,
+ hw_state->mg_pll_ssc,
+ hw_state->mg_pll_bias,
+ hw_state->mg_pll_tdc_coldst_bias);
+}
+
+static const struct intel_shared_dpll_funcs icl_pll_funcs = {
+ .enable = icl_pll_enable,
+ .disable = icl_pll_disable,
+ .get_hw_state = icl_pll_get_hw_state,
+};
+
+static const struct dpll_info icl_plls[] = {
+ { "DPLL 0", &icl_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &icl_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "MG PLL 4", &icl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr icl_pll_mgr = {
+ .dpll_info = icl_plls,
+ .get_dpll = icl_get_dpll,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@@ -2397,7 +3060,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ dpll_mgr = &icl_pll_mgr;
+ else if (IS_CANNONLAKE(dev_priv))
dpll_mgr = &cnl_pll_mgr;
else if (IS_GEN9_BC(dev_priv))
dpll_mgr = &skl_pll_mgr;
@@ -2415,13 +3080,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
dpll_info = dpll_mgr->dpll_info;
- for (i = 0; dpll_info[i].id >= 0; i++) {
+ for (i = 0; dpll_info[i].name; i++) {
WARN_ON(i != dpll_info[i].id);
-
- dev_priv->shared_dplls[i].id = dpll_info[i].id;
- dev_priv->shared_dplls[i].name = dpll_info[i].name;
- dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs;
- dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
+ dev_priv->shared_dplls[i].info = &dpll_info[i];
}
dev_priv->dpll_mgr = dpll_mgr;
@@ -2481,7 +3142,7 @@ void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
struct intel_shared_dpll_state *shared_dpll_state;
shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
- shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe);
+ shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index f24ccf443d25..7a0cd564a9ee 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -103,6 +103,32 @@ enum intel_dpll_id {
* @DPLL_ID_SKL_DPLL3: SKL and later DPLL3
*/
DPLL_ID_SKL_DPLL3 = 3,
+
+
+ /**
+ * @DPLL_ID_ICL_DPLL0: ICL combo PHY DPLL0
+ */
+ DPLL_ID_ICL_DPLL0 = 0,
+ /**
+ * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1
+ */
+ DPLL_ID_ICL_DPLL1 = 1,
+ /**
+ * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
+ */
+ DPLL_ID_ICL_MGPLL1 = 2,
+ /**
+ * @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
+ */
+ DPLL_ID_ICL_MGPLL2 = 3,
+ /**
+ * @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
+ */
+ DPLL_ID_ICL_MGPLL3 = 4,
+ /**
+ * @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
+ */
+ DPLL_ID_ICL_MGPLL4 = 5,
};
#define I915_NUM_PLLS 6
@@ -135,6 +161,21 @@ struct intel_dpll_hw_state {
/* bxt */
uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
pcsdw12;
+
+ /*
+ * ICL uses the following, already defined:
+ * uint32_t cfgcr0, cfgcr1;
+ */
+ uint32_t mg_refclkin_ctl;
+ uint32_t mg_clktop2_coreclkctl1;
+ uint32_t mg_clktop2_hsclkctl;
+ uint32_t mg_pll_div0;
+ uint32_t mg_pll_div1;
+ uint32_t mg_pll_lf;
+ uint32_t mg_pll_frac_lock;
+ uint32_t mg_pll_ssc;
+ uint32_t mg_pll_bias;
+ uint32_t mg_pll_tdc_coldst_bias;
};
/**
@@ -206,6 +247,37 @@ struct intel_shared_dpll_funcs {
};
/**
+ * struct dpll_info - display PLL platform specific info
+ */
+struct dpll_info {
+ /**
+ * @name: DPLL name; used for logging
+ */
+ const char *name;
+
+ /**
+ * @funcs: platform specific hooks
+ */
+ const struct intel_shared_dpll_funcs *funcs;
+
+ /**
+ * @id: unique indentifier for this DPLL; should match the index in the
+ * dev_priv->shared_dplls array
+ */
+ enum intel_dpll_id id;
+
+#define INTEL_DPLL_ALWAYS_ON (1 << 0)
+ /**
+ * @flags:
+ *
+ * INTEL_DPLL_ALWAYS_ON
+ * Inform the state checker that the DPLL is kept enabled even if
+ * not in use by any CRTC.
+ */
+ uint32_t flags;
+};
+
+/**
* struct intel_shared_dpll - display PLL with tracked state and users
*/
struct intel_shared_dpll {
@@ -228,30 +300,9 @@ struct intel_shared_dpll {
bool on;
/**
- * @name: DPLL name; used for logging
+ * @info: platform specific info
*/
- const char *name;
-
- /**
- * @id: unique indentifier for this DPLL; should match the index in the
- * dev_priv->shared_dplls array
- */
- enum intel_dpll_id id;
-
- /**
- * @funcs: platform specific hooks
- */
- struct intel_shared_dpll_funcs funcs;
-
-#define INTEL_DPLL_ALWAYS_ON (1 << 0)
- /**
- * @flags:
- *
- * INTEL_DPLL_ALWAYS_ON
- * Inform the state checker that the DPLL is kept enabled even if
- * not in use by any CRTC.
- */
- uint32_t flags;
+ const struct dpll_info *info;
};
#define SKL_DPLL0 0
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a80fbad9be0f..0361130500a6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -56,6 +56,8 @@
for (;;) { \
const bool expired__ = ktime_after(ktime_get_raw(), end__); \
OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
if (COND) { \
ret__ = 0; \
break; \
@@ -96,6 +98,8 @@
u64 now = local_clock(); \
if (!(ATOMIC)) \
preempt_enable(); \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
if (COND) { \
ret = 0; \
break; \
@@ -140,6 +144,10 @@
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
+#define KBps(x) (1000 * (x))
+#define MBps(x) KBps(1000 * (x))
+#define GBps(x) ((u64)1000 * MBps((x)))
+
/*
* Display related stuff
*/
@@ -269,7 +277,6 @@ struct intel_encoder {
struct intel_panel {
struct drm_display_mode *fixed_mode;
- struct drm_display_mode *alt_fixed_mode;
struct drm_display_mode *downclock_mode;
/* backlight */
@@ -482,7 +489,7 @@ struct intel_atomic_state {
bool skip_intermediate_wm;
/* Gen9+ only */
- struct skl_wm_values wm_results;
+ struct skl_ddb_values wm_results;
struct i915_sw_fence commit_ready;
@@ -548,6 +555,12 @@ struct intel_initial_plane_config {
#define SKL_MAX_DST_W 4096
#define SKL_MIN_DST_H 8
#define SKL_MAX_DST_H 4096
+#define ICL_MAX_SRC_W 5120
+#define ICL_MAX_SRC_H 4096
+#define ICL_MAX_DST_W 5120
+#define ICL_MAX_DST_H 4096
+#define SKL_MIN_YUV_420_SRC_W 16
+#define SKL_MIN_YUV_420_SRC_H 16
struct intel_scaler {
int in_use;
@@ -598,7 +611,9 @@ struct intel_pipe_wm {
struct skl_plane_wm {
struct skl_wm_level wm[8];
+ struct skl_wm_level uv_wm[8];
struct skl_wm_level trans_wm;
+ bool is_planar;
};
struct skl_pipe_wm {
@@ -874,6 +889,7 @@ struct intel_crtc_state {
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
+ u8 nv12_planes;
/* HDMI scrambling status */
bool hdmi_scrambling;
@@ -1321,10 +1337,14 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
+bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+ const unsigned int bank,
+ const unsigned int bit);
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1389,6 +1409,12 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
+void icl_map_plls_to_ports(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct drm_atomic_state *old_state);
+void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct drm_atomic_state *old_state);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int plane, unsigned int height);
@@ -1571,8 +1597,6 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
unsigned int skl_cdclk_get_vco(unsigned int freq);
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
-void skl_disable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
@@ -1588,9 +1612,12 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
-int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
+int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ uint32_t pixel_format);
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
{
@@ -1607,6 +1634,7 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
@@ -1773,6 +1801,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
+int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
@@ -1783,7 +1812,7 @@ struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
-void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder,
+bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
struct drm_connector *connector,
bool high_tmds_clock_ratio,
bool scrambling);
@@ -1820,7 +1849,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv);
/* intel_panel.c */
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode,
- struct drm_display_mode *alt_fixed_mode,
struct drm_display_mode *downclock_mode);
void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
@@ -1877,7 +1905,8 @@ void intel_psr_enable(struct intel_dp *intel_dp,
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits);
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
@@ -1886,6 +1915,8 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -1909,6 +1940,8 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices);
static inline void
assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
@@ -2046,6 +2079,9 @@ void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
bool skl_plane_get_hw_state(struct intel_plane *plane);
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
+bool intel_format_is_yuv(uint32_t format);
+bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
@@ -2082,31 +2118,6 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
-static inline struct intel_crtc_state *
-intel_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_crtc_state *crtc_state;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state, &crtc->base);
-
- if (crtc_state)
- return to_intel_crtc_state(crtc_state);
- else
- return NULL;
-}
-
-static inline struct intel_plane_state *
-intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
- struct intel_plane *plane)
-{
- struct drm_plane_state *plane_state;
-
- plane_state = drm_atomic_get_existing_plane_state(state, &plane->base);
-
- return to_intel_plane_state(plane_state);
-}
-
int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
@@ -2138,8 +2149,17 @@ int intel_pipe_crc_create(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt);
+void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
+void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
#define intel_crtc_set_crc_source NULL
+static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+
+static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
+{
+}
#endif
extern const struct file_operations i915_display_crc_ctl_fops;
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 51a1d6868b1e..f349b3920199 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
conn_state->scaling_mode);
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
@@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_KMS("\n");
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -1846,7 +1852,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
connector->display_info.width_mm = fixed_mode->width_mm;
connector->display_info.height_mm = fixed_mode->height_mm;
- intel_panel_init(&intel_connector->panel, fixed_mode, NULL, NULL);
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
intel_dsi_add_properties(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 91c07b0c8db9..4d6ffa7b3e7b 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -647,6 +647,11 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
/* prepare count */
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
+ if (prepare_cnt > PREPARE_CNT_MAX) {
+ DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt);
+ prepare_cnt = PREPARE_CNT_MAX;
+ }
+
/* exit zero count */
exit_zero_cnt = DIV_ROUND_UP(
(ths_prepare_hszero - ths_prepare_ns) * ui_den,
@@ -662,32 +667,29 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
exit_zero_cnt += 1;
+ if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt);
+ exit_zero_cnt = EXIT_ZERO_CNT_MAX;
+ }
+
/* clk zero count */
clk_zero_cnt = DIV_ROUND_UP(
(tclk_prepare_clkzero - ths_prepare_ns)
* ui_den, ui_num * mul);
+ if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt);
+ clk_zero_cnt = CLK_ZERO_CNT_MAX;
+ }
+
/* trail count */
tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
- if (prepare_cnt > PREPARE_CNT_MAX ||
- exit_zero_cnt > EXIT_ZERO_CNT_MAX ||
- clk_zero_cnt > CLK_ZERO_CNT_MAX ||
- trail_cnt > TRAIL_CNT_MAX)
- DRM_DEBUG_DRIVER("Values crossing maximum limits, restricting to max values\n");
-
- if (prepare_cnt > PREPARE_CNT_MAX)
- prepare_cnt = PREPARE_CNT_MAX;
-
- if (exit_zero_cnt > EXIT_ZERO_CNT_MAX)
- exit_zero_cnt = EXIT_ZERO_CNT_MAX;
-
- if (clk_zero_cnt > CLK_ZERO_CNT_MAX)
- clk_zero_cnt = CLK_ZERO_CNT_MAX;
-
- if (trail_cnt > TRAIL_CNT_MAX)
+ if (trail_cnt > TRAIL_CNT_MAX) {
+ DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt);
trail_cnt = TRAIL_CNT_MAX;
+ }
/* B080 */
intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index eb0c559b2715..61d908e0df0e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int target_clock = mode->clock;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
/* XXX: Validate clock range */
if (fixed_mode) {
@@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
return true;
}
@@ -536,7 +542,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
*/
intel_panel_init(&intel_connector->panel,
intel_dvo_get_current_mode(intel_encoder),
- NULL, NULL);
+ NULL);
intel_dvo->panel_wants_dither = true;
}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index f7c25828d3bb..1590375f31cb 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -81,13 +81,17 @@ static const struct engine_class_info intel_engine_classes[] = {
},
};
+#define MAX_MMIO_BASES 3
struct engine_info {
unsigned int hw_id;
unsigned int uabi_id;
u8 class;
u8 instance;
- u32 mmio_base;
- unsigned irq_shift;
+ /* mmio bases table *must* be sorted in reverse gen order */
+ struct engine_mmio_base {
+ u32 gen : 8;
+ u32 base : 24;
+ } mmio_bases[MAX_MMIO_BASES];
};
static const struct engine_info intel_engines[] = {
@@ -96,64 +100,76 @@ static const struct engine_info intel_engines[] = {
.uabi_id = I915_EXEC_RENDER,
.class = RENDER_CLASS,
.instance = 0,
- .mmio_base = RENDER_RING_BASE,
- .irq_shift = GEN8_RCS_IRQ_SHIFT,
+ .mmio_bases = {
+ { .gen = 1, .base = RENDER_RING_BASE }
+ },
},
[BCS] = {
.hw_id = BCS_HW,
.uabi_id = I915_EXEC_BLT,
.class = COPY_ENGINE_CLASS,
.instance = 0,
- .mmio_base = BLT_RING_BASE,
- .irq_shift = GEN8_BCS_IRQ_SHIFT,
+ .mmio_bases = {
+ { .gen = 6, .base = BLT_RING_BASE }
+ },
},
[VCS] = {
.hw_id = VCS_HW,
.uabi_id = I915_EXEC_BSD,
.class = VIDEO_DECODE_CLASS,
.instance = 0,
- .mmio_base = GEN6_BSD_RING_BASE,
- .irq_shift = GEN8_VCS1_IRQ_SHIFT,
+ .mmio_bases = {
+ { .gen = 11, .base = GEN11_BSD_RING_BASE },
+ { .gen = 6, .base = GEN6_BSD_RING_BASE },
+ { .gen = 4, .base = BSD_RING_BASE }
+ },
},
[VCS2] = {
.hw_id = VCS2_HW,
.uabi_id = I915_EXEC_BSD,
.class = VIDEO_DECODE_CLASS,
.instance = 1,
- .mmio_base = GEN8_BSD2_RING_BASE,
- .irq_shift = GEN8_VCS2_IRQ_SHIFT,
+ .mmio_bases = {
+ { .gen = 11, .base = GEN11_BSD2_RING_BASE },
+ { .gen = 8, .base = GEN8_BSD2_RING_BASE }
+ },
},
[VCS3] = {
.hw_id = VCS3_HW,
.uabi_id = I915_EXEC_BSD,
.class = VIDEO_DECODE_CLASS,
.instance = 2,
- .mmio_base = GEN11_BSD3_RING_BASE,
- .irq_shift = 0, /* not used */
+ .mmio_bases = {
+ { .gen = 11, .base = GEN11_BSD3_RING_BASE }
+ },
},
[VCS4] = {
.hw_id = VCS4_HW,
.uabi_id = I915_EXEC_BSD,
.class = VIDEO_DECODE_CLASS,
.instance = 3,
- .mmio_base = GEN11_BSD4_RING_BASE,
- .irq_shift = 0, /* not used */
+ .mmio_bases = {
+ { .gen = 11, .base = GEN11_BSD4_RING_BASE }
+ },
},
[VECS] = {
.hw_id = VECS_HW,
.uabi_id = I915_EXEC_VEBOX,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 0,
- .mmio_base = VEBOX_RING_BASE,
- .irq_shift = GEN8_VECS_IRQ_SHIFT,
+ .mmio_bases = {
+ { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
+ { .gen = 7, .base = VEBOX_RING_BASE }
+ },
},
[VECS2] = {
.hw_id = VECS2_HW,
.uabi_id = I915_EXEC_VEBOX,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 1,
- .mmio_base = GEN11_VEBOX2_RING_BASE,
- .irq_shift = 0, /* not used */
+ .mmio_bases = {
+ { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
+ },
},
};
@@ -223,16 +239,36 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
}
}
+static u32 __engine_mmio_base(struct drm_i915_private *i915,
+ const struct engine_mmio_base *bases)
+{
+ int i;
+
+ for (i = 0; i < MAX_MMIO_BASES; i++)
+ if (INTEL_GEN(i915) >= bases[i].gen)
+ break;
+
+ GEM_BUG_ON(i == MAX_MMIO_BASES);
+ GEM_BUG_ON(!bases[i].base);
+
+ return bases[i].base;
+}
+
+static void __sprint_engine_name(char *name, const struct engine_info *info)
+{
+ WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
+ intel_engine_classes[info->class].name,
+ info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
+}
+
static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
- const struct engine_class_info *class_info;
struct intel_engine_cs *engine;
GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
- class_info = &intel_engine_classes[info->class];
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
@@ -253,35 +289,14 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->id = id;
engine->i915 = dev_priv;
- WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
- class_info->name, info->instance) >=
- sizeof(engine->name));
+ __sprint_engine_name(engine->name, info);
engine->hw_id = engine->guc_id = info->hw_id;
- if (INTEL_GEN(dev_priv) >= 11) {
- switch (engine->id) {
- case VCS:
- engine->mmio_base = GEN11_BSD_RING_BASE;
- break;
- case VCS2:
- engine->mmio_base = GEN11_BSD2_RING_BASE;
- break;
- case VECS:
- engine->mmio_base = GEN11_VEBOX_RING_BASE;
- break;
- default:
- /* take the original value for all other engines */
- engine->mmio_base = info->mmio_base;
- break;
- }
- } else {
- engine->mmio_base = info->mmio_base;
- }
- engine->irq_shift = info->irq_shift;
+ engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
engine->uabi_id = info->uabi_id;
- engine->uabi_class = class_info->uabi_class;
+ engine->uabi_class = intel_engine_classes[info->class].uabi_class;
engine->context_size = __intel_engine_context_size(dev_priv,
engine->class);
@@ -291,7 +306,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
- spin_lock_init(&engine->stats.lock);
+ seqlock_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -436,21 +451,13 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
}
-static void intel_engine_init_timeline(struct intel_engine_cs *engine)
+static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
{
- engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
+ i915_gem_batch_pool_init(&engine->batch_pool, engine);
}
static bool csb_force_mmio(struct drm_i915_private *i915)
{
- /*
- * IOMMU adds unpredictable latency causing the CSB write (from the
- * GPU into the HWSP) to only be visible some time after the interrupt
- * (missed breadcrumb syndrome).
- */
- if (intel_vtd_active())
- return true;
-
/* Older GVT emulation depends upon intercepting CSB mmio */
if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
return true;
@@ -484,12 +491,11 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
*/
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
- intel_engine_init_execlist(engine);
+ i915_timeline_init(engine->i915, &engine->timeline, engine->name);
- intel_engine_init_timeline(engine);
+ intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine);
- i915_gem_batch_pool_init(engine, &engine->batch_pool);
-
+ intel_engine_init_batch_pool(engine);
intel_engine_init_cmd_parser(engine);
}
@@ -520,8 +526,6 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
goto err_unref;
engine->scratch = vma;
- DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
- engine->name, i915_ggtt_offset(vma));
return 0;
err_unref:
@@ -615,9 +619,6 @@ static int init_status_page(struct intel_engine_cs *engine)
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
-
- DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
- engine->name, i915_ggtt_offset(vma));
return 0;
err_unpin:
@@ -669,7 +670,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
- ring = engine->context_pin(engine, engine->i915->kernel_context);
+ ring = intel_context_pin(engine->i915->kernel_context, engine);
if (IS_ERR(ring))
return PTR_ERR(ring);
@@ -678,8 +679,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* we can interrupt the engine at any time.
*/
if (engine->i915->preempt_context) {
- ring = engine->context_pin(engine,
- engine->i915->preempt_context);
+ ring = intel_context_pin(engine->i915->preempt_context, engine);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto err_unpin_kernel;
@@ -703,9 +703,9 @@ err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
if (engine->i915->preempt_context)
- engine->context_unpin(engine, engine->i915->preempt_context);
+ intel_context_unpin(engine->i915->preempt_context, engine);
err_unpin_kernel:
- engine->context_unpin(engine, engine->i915->kernel_context);
+ intel_context_unpin(engine->i915->kernel_context, engine);
return ret;
}
@@ -733,8 +733,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
i915_gem_object_put(engine->default_state);
if (engine->i915->preempt_context)
- engine->context_unpin(engine, engine->i915->preempt_context);
- engine->context_unpin(engine, engine->i915->kernel_context);
+ intel_context_unpin(engine->i915->preempt_context, engine);
+ intel_context_unpin(engine->i915->kernel_context, engine);
+
+ i915_timeline_fini(&engine->timeline);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
@@ -782,10 +784,24 @@ static inline uint32_t
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
+ uint32_t mcr_slice_subslice_mask;
+ uint32_t mcr_slice_subslice_select;
uint32_t mcr;
uint32_t ret;
enum forcewake_domains fw_domains;
+ if (INTEL_GEN(dev_priv) >= 11) {
+ mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
+ GEN11_MCR_SUBSLICE_MASK;
+ mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
+ GEN11_MCR_SUBSLICE(subslice);
+ } else {
+ mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
+ GEN8_MCR_SUBSLICE_MASK;
+ mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
+ GEN8_MCR_SUBSLICE(subslice);
+ }
+
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -800,14 +816,14 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
* The HW expects the slice and sublice selectors to be reset to 0
* after reading out the registers.
*/
- WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
- mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
- mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ WARN_ON_ONCE(mcr & mcr_slice_subslice_mask);
+ mcr &= ~mcr_slice_subslice_mask;
+ mcr |= mcr_slice_subslice_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
ret = I915_READ_FW(reg);
- mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+ mcr &= ~mcr_slice_subslice_mask;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
@@ -871,644 +887,6 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
}
}
-static int wa_add(struct drm_i915_private *dev_priv,
- i915_reg_t addr,
- const u32 mask, const u32 val)
-{
- const u32 idx = dev_priv->workarounds.count;
-
- if (WARN_ON(idx >= I915_MAX_WA_REGS))
- return -ENOSPC;
-
- dev_priv->workarounds.reg[idx].addr = addr;
- dev_priv->workarounds.reg[idx].value = val;
- dev_priv->workarounds.reg[idx].mask = mask;
-
- dev_priv->workarounds.count++;
-
- return 0;
-}
-
-#define WA_REG(addr, mask, val) do { \
- const int r = wa_add(dev_priv, (addr), (mask), (val)); \
- if (r) \
- return r; \
- } while (0)
-
-#define WA_SET_BIT_MASKED(addr, mask) \
- WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
-
-#define WA_CLR_BIT_MASKED(addr, mask) \
- WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
-
-#define WA_SET_FIELD_MASKED(addr, mask, value) \
- WA_REG(addr, mask, _MASKED_FIELD(mask, value))
-
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
- i915_reg_t reg)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct i915_workarounds *wa = &dev_priv->workarounds;
- const uint32_t index = wa->hw_whitelist_count[engine->id];
-
- if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
- return -EINVAL;
-
- I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
- i915_mmio_reg_offset(reg));
- wa->hw_whitelist_count[engine->id]++;
-
- return 0;
-}
-
-static int gen8_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
-
- /* WaDisableAsyncFlipPerfMode:bdw,chv */
- WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
-
- /* WaDisablePartialInstShootdown:bdw,chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
-
- /* Use Force Non-Coherent whenever executing a 3D context. This is a
- * workaround for for a possible hang in the unlikely event a TLB
- * invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:bdw,chv */
- /* WaHdcDisableFetchWhenMasked:bdw,chv */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_DONOT_FETCH_MEM_WHEN_MASKED |
- HDC_FORCE_NON_COHERENT);
-
- /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
- * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
- * polygons in the same 8x4 pixel/sample area to be processed without
- * stalling waiting for the earlier ones to write to Hierarchical Z
- * buffer."
- *
- * This optimization is off by default for BDW and CHV; turn it on.
- */
- WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
-
- /* Wa4x4STCOptimizationDisable:bdw,chv */
- WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK,
- GEN6_WIZ_HASHING_16x4);
-
- return 0;
-}
-
-static int bdw_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen8_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
-
- /* WaDisableDopClockGating:bdw
- *
- * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
- * to disable EUTC clock gating.
- */
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- DOP_CLOCK_GATING_DISABLE);
-
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
-
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- /* WaForceContextSaveRestoreNonCoherent:bdw */
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
- return 0;
-}
-
-static int chv_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen8_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaDisableThreadStallDopClockGating:chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
-
- /* Improve HiZ throughput on CHV. */
- WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
- return 0;
-}
-
-static int gen9_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
-
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
- I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
- GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
-
- /* WaDisableKillLogic:bxt,skl,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
-
- if (HAS_LLC(dev_priv)) {
- /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
- *
- * Must match Display Engine. See
- * WaCompressedResourceDisplayNewHashMode.
- */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN9_PBE_COMPRESSED_HASH_SELECTION);
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
-
- I915_WRITE(MMCD_MISC_CTRL,
- I915_READ(MMCD_MISC_CTRL) |
- MMCD_PCLA |
- MMCD_HOTSPOT_EN);
- }
-
- /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
- /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- FLOW_CONTROL_ENABLE |
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
-
- /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
- /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX |
- GEN9_ENABLE_GPGPU_PREEMPTION);
-
- /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
- /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
- GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
-
- /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_CCS_TLB_PREFETCH_ENABLE);
-
- /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
-
- /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
- * both tied to WaForceContextSaveRestoreNonCoherent
- * in some hsds for skl. We keep the tie for all gen9. The
- * documentation is a bit hazy and so we want to get common behaviour,
- * even though there is no clear evidence we would need both on kbl/bxt.
- * This area has been source of system hangs so we play it safe
- * and mimic the skl regardless of what bspec says.
- *
- * Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
-
- /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
-
- /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
-
- /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
- if (IS_SKYLAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
-
- /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
- if (IS_GEN9_LP(dev_priv)) {
- u32 val = I915_READ(GEN8_L3SQCREG1);
-
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
- I915_WRITE(GEN8_L3SQCREG1, val);
- }
-
- /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
- I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_FLUSH_COHERENT_LINES));
-
- /*
- * Supporting preemption with fine-granularity requires changes in the
- * batch buffer programming. Since we can't break old userspace, we
- * need to set our default preemption level to safe value. Userspace is
- * still able to use more fine-grained preemption levels, since in
- * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
- * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
- * not real HW workarounds, but merely a way to start using preemption
- * while maintaining old contract with userspace.
- */
-
- /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
- WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
-
- /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
- GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
-
- /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
- if (IS_GEN9_LP(dev_priv))
- WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
-
- /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
- ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
- if (ret)
- return ret;
-
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
- ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
- if (ret)
- return ret;
-
- /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
- ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- u8 vals[3] = { 0, 0, 0 };
- unsigned int i;
-
- for (i = 0; i < 3; i++) {
- u8 ss;
-
- /*
- * Only consider slices where one, and only one, subslice has 7
- * EUs
- */
- if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
- continue;
-
- /*
- * subslice_7eu[i] != 0 (because of the check above) and
- * ss_max == 4 (maximum number of subslices possible per slice)
- *
- * -> 0 <= ss <= 3;
- */
- ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
- vals[i] = 3 - ss;
- }
-
- if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
- return 0;
-
- /* Tune IZ hashing. See intel_device_info_runtime_init() */
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
- GEN9_IZ_HASHING_MASK(2) |
- GEN9_IZ_HASHING_MASK(1) |
- GEN9_IZ_HASHING_MASK(0),
- GEN9_IZ_HASHING(2, vals[2]) |
- GEN9_IZ_HASHING(1, vals[1]) |
- GEN9_IZ_HASHING(0, vals[0]));
-
- return 0;
-}
-
-static int skl_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaEnableGapsTsvCreditFix:skl */
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
-
- /* WaDisableGafsUnitClkGating:skl */
- I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
- GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
-
- /* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
-
- /* WaDisableLSQCROPERFforOCL:skl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
-
- return skl_tune_iz_hashing(engine);
-}
-
-static int bxt_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaDisableThreadStallDopClockGating:bxt */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- STALL_DOP_GATING_DISABLE);
-
- /* WaDisablePooledEuLoadBalancingFix:bxt */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
-
- /* WaToEnableHwFixForPushConstHWBug:bxt */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- /* WaInPlaceDecompressionHang:bxt */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
-
- return 0;
-}
-
-static int cnl_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- (I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT));
-
- /* WaForceContextSaveRestoreNonCoherent:cnl */
- WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
-
- /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
-
- /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
-
- /* WaInPlaceDecompressionHang:cnl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
-
- /* WaPushConstantDereferenceHoldDisable:cnl */
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
-
- /* FtrEnableFastAnisoL1BankingFix: cnl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
-
- /* WaDisable3DMidCmdPreemption:cnl */
- WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
-
- /* WaDisableGPGPUMidCmdPreemption:cnl */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
- GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
-
- /* WaEnablePreemptionGranularityControlByUMD:cnl */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
- ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
- if (ret)
- return ret;
-
- /* WaDisableEarlyEOT:cnl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
-
- return 0;
-}
-
-static int kbl_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaEnableGapsTsvCreditFix:kbl */
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
-
- /* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- (I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING));
-
- /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE);
-
- /* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- /* WaDisableGafsUnitClkGating:kbl */
- I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
- GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
-
- /* WaDisableSbeCacheDispatchPortSharing:kbl */
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- /* WaInPlaceDecompressionHang:kbl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
-
- /* WaDisableLSQCROPERFforOCL:kbl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int glk_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
- ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
- if (ret)
- return ret;
-
- /* WaToEnableHwFixForPushConstHWBug:glk */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
-}
-
-static int cfl_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaEnableGapsTsvCreditFix:cfl */
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
-
- /* WaToEnableHwFixForPushConstHWBug:cfl */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- /* WaDisableGafsUnitClkGating:cfl */
- I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
- GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
-
- /* WaDisableSbeCacheDispatchPortSharing:cfl */
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- /* WaInPlaceDecompressionHang:cfl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
-
- return 0;
-}
-
-int init_workarounds_ring(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int err;
-
- if (GEM_WARN_ON(engine->id != RCS))
- return -EINVAL;
-
- dev_priv->workarounds.count = 0;
- dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
-
- if (IS_BROADWELL(dev_priv))
- err = bdw_init_workarounds(engine);
- else if (IS_CHERRYVIEW(dev_priv))
- err = chv_init_workarounds(engine);
- else if (IS_SKYLAKE(dev_priv))
- err = skl_init_workarounds(engine);
- else if (IS_BROXTON(dev_priv))
- err = bxt_init_workarounds(engine);
- else if (IS_KABYLAKE(dev_priv))
- err = kbl_init_workarounds(engine);
- else if (IS_GEMINILAKE(dev_priv))
- err = glk_init_workarounds(engine);
- else if (IS_COFFEELAKE(dev_priv))
- err = cfl_init_workarounds(engine);
- else if (IS_CANNONLAKE(dev_priv))
- err = cnl_init_workarounds(engine);
- else
- err = 0;
- if (err)
- return err;
-
- DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
- engine->name, dev_priv->workarounds.count);
- return 0;
-}
-
-int intel_ring_workarounds_emit(struct i915_request *rq)
-{
- struct i915_workarounds *w = &rq->i915->workarounds;
- u32 *cs;
- int ret, i;
-
- if (w->count == 0)
- return 0;
-
- ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
- if (ret)
- return ret;
-
- cs = intel_ring_begin(rq, w->count * 2 + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(w->count);
- for (i = 0; i < w->count; i++) {
- *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
- *cs++ = w->reg[i].value;
- }
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
- if (ret)
- return ret;
-
- return 0;
-}
-
static bool ring_is_idle(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1611,7 +989,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
* the last request that remains in the timeline. When idle, it is
* the last executed context as tracked by retirement.
*/
- rq = __i915_gem_active_peek(&engine->timeline->last_request);
+ rq = __i915_gem_active_peek(&engine->timeline.last_request);
if (rq)
return rq->ctx == kernel_context;
else
@@ -1659,6 +1037,9 @@ void intel_engines_park(struct drm_i915_private *i915)
intel_engine_dump(engine, &p, NULL);
}
+ /* Must be reset upon idling, or we may miss the busy wakeup. */
+ GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN);
+
if (engine->park)
engine->park(engine);
@@ -1681,6 +1062,8 @@ void intel_engines_unpark(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) {
if (engine->unpark)
engine->unpark(engine);
+
+ intel_engine_init_hangcheck(engine);
}
}
@@ -1713,17 +1096,37 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
return which;
}
+static int print_sched_attr(struct drm_i915_private *i915,
+ const struct i915_sched_attr *attr,
+ char *buf, int x, int len)
+{
+ if (attr->priority == I915_PRIORITY_INVALID)
+ return x;
+
+ x += snprintf(buf + x, len - x,
+ " prio=%d", attr->priority);
+
+ return x;
+}
+
static void print_request(struct drm_printer *m,
struct i915_request *rq,
const char *prefix)
{
- drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
+ const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
+ char buf[80] = "";
+ int x = 0;
+
+ x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
+
+ drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n",
+ prefix,
rq->global_seqno,
i915_request_completed(rq) ? "!" : "",
rq->fence.context, rq->fence.seqno,
- rq->priotree.priority,
+ buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- rq->timeline->common->name);
+ name);
}
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
@@ -1829,12 +1232,15 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
read = GEN8_CSB_READ_PTR(ptr);
write = GEN8_CSB_WRITE_PTR(ptr);
- drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
+ drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n",
read, execlists->csb_head,
write,
intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
yesno(test_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted)));
+ &engine->irq_posted)),
+ yesno(test_bit(TASKLET_STATE_SCHED,
+ &engine->execlists.tasklet.state)),
+ enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
if (read >= GEN8_CSB_ENTRIES)
read = 0;
if (write >= GEN8_CSB_ENTRIES)
@@ -1861,8 +1267,9 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
char hdr[80];
snprintf(hdr, sizeof(hdr),
- "\t\tELSP[%d] count=%d, rq: ",
- idx, count);
+ "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ",
+ idx, count,
+ i915_ggtt_offset(rq->ring->vma));
print_request(m, rq, hdr);
} else {
drm_printf(m, "\t\tELSP[%d] idle\n", idx);
@@ -1884,11 +1291,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
{
+ const int MAX_REQUESTS_TO_SHOW = 8;
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
- struct i915_request *rq;
+ struct i915_request *rq, *last;
struct rb_node *rb;
+ int count;
if (header) {
va_list ap;
@@ -1901,12 +1310,11 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (i915_terminally_wedged(&engine->i915->gpu_error))
drm_printf(m, "*** WEDGED ***\n");
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+ drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
- engine->timeline->inflight_seqnos);
+ jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
i915_reset_count(error));
@@ -1915,14 +1323,14 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tRequests:\n");
- rq = list_first_entry(&engine->timeline->requests,
+ rq = list_first_entry(&engine->timeline.requests,
struct i915_request, link);
- if (&rq->link != &engine->timeline->requests)
+ if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tfirst ");
- rq = list_last_entry(&engine->timeline->requests,
+ rq = list_last_entry(&engine->timeline.requests,
struct i915_request, link);
- if (&rq->link != &engine->timeline->requests)
+ if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tlast ");
rq = i915_gem_find_active_request(engine);
@@ -1933,12 +1341,16 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq->head, rq->postfix, rq->tail,
rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
- drm_printf(m, "\t\tring->start: 0x%08x\n",
+ drm_printf(m, "\t\tring->start: 0x%08x\n",
i915_ggtt_offset(rq->ring->vma));
- drm_printf(m, "\t\tring->head: 0x%08x\n",
+ drm_printf(m, "\t\tring->head: 0x%08x\n",
rq->ring->head);
- drm_printf(m, "\t\tring->tail: 0x%08x\n",
+ drm_printf(m, "\t\tring->tail: 0x%08x\n",
rq->ring->tail);
+ drm_printf(m, "\t\tring->emit: 0x%08x\n",
+ rq->ring->emit);
+ drm_printf(m, "\t\tring->space: 0x%08x\n",
+ rq->ring->space);
}
rcu_read_unlock();
@@ -1950,18 +1362,49 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- spin_lock_irq(&engine->timeline->lock);
- list_for_each_entry(rq, &engine->timeline->requests, link)
- print_request(m, rq, "\t\tE ");
+ spin_lock_irq(&engine->timeline.lock);
+
+ last = NULL;
+ count = 0;
+ list_for_each_entry(rq, &engine->timeline.requests, link) {
+ if (count++ < MAX_REQUESTS_TO_SHOW - 1)
+ print_request(m, rq, "\t\tE ");
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > MAX_REQUESTS_TO_SHOW) {
+ drm_printf(m,
+ "\t\t...skipping %d executing requests...\n",
+ count - MAX_REQUESTS_TO_SHOW);
+ }
+ print_request(m, last, "\t\tE ");
+ }
+
+ last = NULL;
+ count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = execlists->first; rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
- list_for_each_entry(rq, &p->requests, priotree.link)
- print_request(m, rq, "\t\tQ ");
+ list_for_each_entry(rq, &p->requests, sched.link) {
+ if (count++ < MAX_REQUESTS_TO_SHOW - 1)
+ print_request(m, rq, "\t\tQ ");
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > MAX_REQUESTS_TO_SHOW) {
+ drm_printf(m,
+ "\t\t...skipping %d queued requests...\n",
+ count - MAX_REQUESTS_TO_SHOW);
+ }
+ print_request(m, last, "\t\tQ ");
}
- spin_unlock_irq(&engine->timeline->lock);
+
+ spin_unlock_irq(&engine->timeline.lock);
spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
@@ -2026,7 +1469,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
return -ENODEV;
tasklet_disable(&execlists->tasklet);
- spin_lock_irqsave(&engine->stats.lock, flags);
+ write_seqlock_irqsave(&engine->stats.lock, flags);
if (unlikely(engine->stats.enabled == ~0)) {
err = -EBUSY;
@@ -2050,7 +1493,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
}
unlock:
- spin_unlock_irqrestore(&engine->stats.lock, flags);
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
tasklet_enable(&execlists->tasklet);
return err;
@@ -2079,12 +1522,13 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
*/
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
+ unsigned int seq;
ktime_t total;
- unsigned long flags;
- spin_lock_irqsave(&engine->stats.lock, flags);
- total = __intel_engine_get_busy_time(engine);
- spin_unlock_irqrestore(&engine->stats.lock, flags);
+ do {
+ seq = read_seqbegin(&engine->stats.lock);
+ total = __intel_engine_get_busy_time(engine);
+ } while (read_seqretry(&engine->stats.lock, seq));
return total;
}
@@ -2102,15 +1546,16 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine))
return;
- spin_lock_irqsave(&engine->stats.lock, flags);
+ write_seqlock_irqsave(&engine->stats.lock, flags);
WARN_ON_ONCE(engine->stats.enabled == 0);
if (--engine->stats.enabled == 0) {
engine->stats.total = __intel_engine_get_busy_time(engine);
engine->stats.active = 0;
}
- spin_unlock_irqrestore(&engine->stats.lock, flags);
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
+#include "selftests/intel_engine_cs.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 707d49c12638..b431b6733cc1 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1272,6 +1272,34 @@ out:
mutex_unlock(&fbc->lock);
}
+/*
+ * intel_fbc_reset_underrun - reset FBC fifo underrun status.
+ * @dev_priv: i915 device instance
+ *
+ * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
+ * want to re-enable FBC after an underrun to increase test coverage.
+ */
+int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ cancel_work_sync(&dev_priv->fbc.underrun_work);
+
+ ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
+ if (ret)
+ return ret;
+
+ if (dev_priv->fbc.underrun_detected) {
+ DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
+ dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
+ }
+
+ dev_priv->fbc.underrun_detected = false;
+ mutex_unlock(&dev_priv->fbc.lock);
+
+ return 0;
+}
+
/**
* intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
* @dev_priv: i915 device instance
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6467a5cc2ca3..e9e02b58b7be 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -221,6 +221,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unlock;
}
+ fb = &ifbdev->fb->base;
+ intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
+
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
DRM_ERROR("Failed to allocate fb_info\n");
@@ -230,8 +233,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->par = helper;
- fb = &ifbdev->fb->base;
-
ifbdev->helper.fb = fb;
strcpy(info->fix.id, "inteldrmfb");
@@ -640,7 +641,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
if (!crtc->state->active)
continue;
- WARN(!crtc->primary->fb,
+ WARN(!crtc->primary->state->fb,
"re-used BIOS config but lost an fb on crtc %d\n",
crtc->base.id);
}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 3a8d3d06c26a..7fff0a0eceb4 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -80,7 +80,7 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
}
might_sleep();
- intel_psr_invalidate(dev_priv, frontbuffer_bits);
+ intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
}
diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h
new file mode 100644
index 000000000000..105e2a9e874a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gpu_commands.h
@@ -0,0 +1,274 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright � 2003-2018 Intel Corporation
+ */
+
+#ifndef _INTEL_GPU_COMMANDS_H_
+#define _INTEL_GPU_COMMANDS_H_
+
+/*
+ * Instruction field definitions used by the command parser
+ */
+#define INSTR_CLIENT_SHIFT 29
+#define INSTR_MI_CLIENT 0x0
+#define INSTR_BC_CLIENT 0x2
+#define INSTR_RC_CLIENT 0x3
+#define INSTR_SUBCLIENT_SHIFT 27
+#define INSTR_SUBCLIENT_MASK 0x18000000
+#define INSTR_MEDIA_SUBCLIENT 0x2
+#define INSTR_26_TO_24_MASK 0x7000000
+#define INSTR_26_TO_24_SHIFT 24
+
+/*
+ * Memory interface instructions used by the kernel
+ */
+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
+#define MI_GLOBAL_GTT (1<<22)
+
+#define MI_NOOP MI_INSTR(0, 0)
+#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
+#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
+#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define MI_FLUSH MI_INSTR(0x04, 0)
+#define MI_READ_FLUSH (1 << 0)
+#define MI_EXE_FLUSH (1 << 1)
+#define MI_NO_WRITE_FLUSH (1 << 2)
+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
+#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
+#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
+#define MI_ARB_ENABLE (1<<0)
+#define MI_ARB_DISABLE (0<<0)
+#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
+#define MI_SUSPEND_FLUSH_EN (1<<0)
+#define MI_SET_APPID MI_INSTR(0x0e, 0)
+#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
+#define MI_OVERLAY_CONTINUE (0x0<<21)
+#define MI_OVERLAY_ON (0x1<<21)
+#define MI_OVERLAY_OFF (0x2<<21)
+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
+#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+/* SKL ones */
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8)
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
+#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
+#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
+#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
+#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
+#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
+#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
+#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
+#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
+#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
+#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
+#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
+#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
+#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
+#define MI_SEMAPHORE_SYNC_MASK (3<<16)
+#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
+#define MI_MM_SPACE_GTT (1<<8)
+#define MI_MM_SPACE_PHYSICAL (0<<8)
+#define MI_SAVE_EXT_STATE_EN (1<<3)
+#define MI_RESTORE_EXT_STATE_EN (1<<2)
+#define MI_FORCE_RESTORE (1<<1)
+#define MI_RESTORE_INHIBIT (1<<0)
+#define HSW_MI_RS_SAVE_STATE_EN (1<<3)
+#define HSW_MI_RS_RESTORE_STATE_EN (1<<2)
+#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
+#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
+#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
+#define MI_SEMAPHORE_POLL (1<<15)
+#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
+#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
+#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
+#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
+#define MI_USE_GGTT (1 << 22) /* g4x+ */
+#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_STORE_DWORD_INDEX_SHIFT 2
+/*
+ * Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ * simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
+#define MI_LRI_FORCE_POSTED (1<<12)
+#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
+#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
+#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
+#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
+#define MI_FLUSH_DW_STORE_INDEX (1<<21)
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_FLUSH_DW_OP_STOREDW (1<<14)
+#define MI_FLUSH_DW_OP_MASK (3<<14)
+#define MI_FLUSH_DW_NOTIFY (1<<8)
+#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_USE_GTT (1<<2)
+#define MI_FLUSH_DW_USE_PPGTT (0<<2)
+#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
+#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
+#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
+#define MI_BATCH_NON_SECURE (1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_PPGTT_HSW (1<<8)
+#define MI_BATCH_NON_SECURE_HSW (1<<13)
+#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
+#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
+#define MI_BATCH_RESOURCE_STREAMER (1<<10)
+
+/*
+ * 3D instructions used by the kernel
+ */
+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+
+#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
+#define GEN9_MEDIA_POOL_ENABLE (1 << 31)
+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR (0x1<<1)
+#define SC_ENABLE_MASK (0x1<<0)
+#define SC_ENABLE (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK (0xffff<<16)
+#define SCI_XMIN_MASK (0xffff<<0)
+#define SCI_YMAX_MASK (0xffff<<16)
+#define SCI_XMAX_MASK (0xffff<<0)
+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+
+#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
+#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
+#define BLT_WRITE_A (2<<20)
+#define BLT_WRITE_RGB (1<<20)
+#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
+#define BLT_DEPTH_8 (0<<24)
+#define BLT_DEPTH_16_565 (1<<24)
+#define BLT_DEPTH_16_1555 (2<<24)
+#define BLT_DEPTH_32 (3<<24)
+#define BLT_ROP_SRC_COPY (0xcc<<16)
+#define BLT_ROP_COLOR_COPY (0xf0<<16)
+#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
+#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define ASYNC_FLIP (1<<22)
+#define DISPLAY_PLANE_A (0<<20)
+#define DISPLAY_PLANE_B (1<<20)
+#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define PIPE_CONTROL_FLUSH_L3 (1<<27)
+#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
+#define PIPE_CONTROL_MMIO_WRITE (1<<23)
+#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
+#define PIPE_CONTROL_CS_STALL (1<<20)
+#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
+#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
+#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
+#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */
+#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
+#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
+#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
+#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
+#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
+#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
+#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
+#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+
+/*
+ * Commands used only by the command parser
+ */
+#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
+#define MI_ARB_CHECK MI_INSTR(0x05, 0)
+#define MI_RS_CONTROL MI_INSTR(0x06, 0)
+#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0)
+#define MI_PREDICATE MI_INSTR(0x0C, 0)
+#define MI_RS_CONTEXT MI_INSTR(0x0F, 0)
+#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0)
+#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
+#define MI_URB_CLEAR MI_INSTR(0x19, 0)
+#define MI_UPDATE_GTT MI_INSTR(0x23, 0)
+#define MI_CLFLUSH MI_INSTR(0x27, 0)
+#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
+#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
+#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
+#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
+#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
+#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
+#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
+
+#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
+#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
+#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
+#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
+#define GFX_OP_3DSTATE_SO_DECL_LIST \
+ ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
+
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
+
+#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16))
+
+#define COLOR_BLT ((0x2<<29)|(0x40<<22))
+#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
+
+#endif /* _INTEL_GPU_COMMANDS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index ff08ea0ebf49..116f4ccf1bbd 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -64,10 +64,12 @@ void intel_guc_init_early(struct intel_guc *guc)
{
intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct);
- intel_guc_log_init_early(guc);
+ intel_guc_log_init_early(&guc->log);
mutex_init(&guc->send_mutex);
+ spin_lock_init(&guc->irq_lock);
guc->send = intel_guc_send_nop;
+ guc->handler = intel_guc_to_host_event_handler_nop;
guc->notify = gen8_guc_raise_irq;
}
@@ -86,9 +88,10 @@ int intel_guc_init_wq(struct intel_guc *guc)
* or scheduled later on resume. This way the handling of work
* item can be kept same between system suspend & rpm suspend.
*/
- guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (!guc->log.runtime.flush_wq) {
+ guc->log.relay.flush_wq =
+ alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (!guc->log.relay.flush_wq) {
DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
return -ENOMEM;
}
@@ -111,7 +114,7 @@ int intel_guc_init_wq(struct intel_guc *guc)
guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
WQ_HIGHPRI);
if (!guc->preempt_wq) {
- destroy_workqueue(guc->log.runtime.flush_wq);
+ destroy_workqueue(guc->log.relay.flush_wq);
DRM_ERROR("Couldn't allocate workqueue for GuC "
"preemption\n");
return -ENOMEM;
@@ -129,7 +132,7 @@ void intel_guc_fini_wq(struct intel_guc *guc)
USES_GUC_SUBMISSION(dev_priv))
destroy_workqueue(guc->preempt_wq);
- destroy_workqueue(guc->log.runtime.flush_wq);
+ destroy_workqueue(guc->log.relay.flush_wq);
}
static int guc_shared_data_create(struct intel_guc *guc)
@@ -169,7 +172,7 @@ int intel_guc_init(struct intel_guc *guc)
return ret;
GEM_BUG_ON(!guc->shared_data);
- ret = intel_guc_log_create(guc);
+ ret = intel_guc_log_create(&guc->log);
if (ret)
goto err_shared;
@@ -184,7 +187,7 @@ int intel_guc_init(struct intel_guc *guc)
return 0;
err_log:
- intel_guc_log_destroy(guc);
+ intel_guc_log_destroy(&guc->log);
err_shared:
guc_shared_data_destroy(guc);
return ret;
@@ -196,41 +199,27 @@ void intel_guc_fini(struct intel_guc *guc)
i915_ggtt_disable_guc(dev_priv);
intel_guc_ads_destroy(guc);
- intel_guc_log_destroy(guc);
+ intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);
}
-static u32 get_gt_type(struct drm_i915_private *dev_priv)
+static u32 get_log_control_flags(void)
{
- /* XXX: GT type based on PCI device ID? field seems unused by fw */
- return 0;
-}
-
-static u32 get_core_family(struct drm_i915_private *dev_priv)
-{
- u32 gen = INTEL_GEN(dev_priv);
+ u32 level = i915_modparams.guc_log_level;
+ u32 flags = 0;
- switch (gen) {
- case 9:
- return GUC_CORE_FAMILY_GEN9;
-
- default:
- MISSING_CASE(gen);
- return GUC_CORE_FAMILY_UNKNOWN;
- }
-}
+ GEM_BUG_ON(level < 0);
-static u32 get_log_verbosity_flags(void)
-{
- if (i915_modparams.guc_log_level > 0) {
- u32 verbosity = i915_modparams.guc_log_level - 1;
+ if (!GUC_LOG_LEVEL_IS_ENABLED(level))
+ flags |= GUC_LOG_DEFAULT_DISABLED;
- GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
- return verbosity << GUC_LOG_VERBOSITY_SHIFT;
- }
+ if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
+ flags |= GUC_LOG_DISABLED;
+ else
+ flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
+ GUC_LOG_VERBOSITY_SHIFT;
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- return GUC_LOG_DISABLED;
+ return flags;
}
/*
@@ -246,10 +235,6 @@ void intel_guc_init_params(struct intel_guc *guc)
memset(params, 0, sizeof(params));
- params[GUC_CTL_DEVICE_INFO] |=
- (get_gt_type(dev_priv) << GUC_CTL_GT_TYPE_SHIFT) |
- (get_core_family(dev_priv) << GUC_CTL_CORE_FAMILY_SHIFT);
-
/*
* GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
* second. This ARAR is calculated by:
@@ -265,12 +250,13 @@ void intel_guc_init_params(struct intel_guc *guc)
params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
- params[GUC_CTL_DEBUG] = get_log_verbosity_flags();
+ params[GUC_CTL_DEBUG] = get_log_control_flags();
/* If GuC submission is enabled, set up additional parameters here */
if (USES_GUC_SUBMISSION(dev_priv)) {
- u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
- u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
+ u32 ads = intel_guc_ggtt_offset(guc,
+ guc->ads_vma) >> PAGE_SHIFT;
+ u32 pgs = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
@@ -301,16 +287,23 @@ void intel_guc_init_params(struct intel_guc *guc)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
}
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size)
{
WARN(1, "Unexpected send: action=%#x\n", *action);
return -ENODEV;
}
+void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
+{
+ WARN(1, "Unexpected event: no suitable handler\n");
+}
+
/*
* This function implements the MMIO based host to GuC interface.
*/
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 status;
@@ -320,6 +313,9 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
GEM_BUG_ON(!len);
GEM_BUG_ON(len > guc->send_regs.count);
+ /* We expect only action code */
+ GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
+
/* If CT is available, we expect to use MMIO only during init/fini */
GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
@@ -341,29 +337,74 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
*/
ret = __intel_wait_for_register_fw(dev_priv,
guc_send_reg(guc, 0),
- INTEL_GUC_RECV_MASK,
- INTEL_GUC_RECV_MASK,
+ INTEL_GUC_MSG_TYPE_MASK,
+ INTEL_GUC_MSG_TYPE_RESPONSE <<
+ INTEL_GUC_MSG_TYPE_SHIFT,
10, 10, &status);
- if (status != INTEL_GUC_STATUS_SUCCESS) {
- /*
- * Either the GuC explicitly returned an error (which
- * we convert to -EIO here) or no response at all was
- * received within the timeout limit (-ETIMEDOUT)
- */
- if (ret != -ETIMEDOUT)
- ret = -EIO;
-
- DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
- " ret=%d status=0x%08X response=0x%08X\n",
- action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
+ /* If GuC explicitly returned an error, convert it to -EIO */
+ if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
+ ret = -EIO;
+
+ if (ret) {
+ DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;"
+ " ret=%d status=0x%08X response=0x%08X\n",
+ action[0], ret, status,
+ I915_READ(SOFT_SCRATCH(15)));
+ goto out;
}
+ if (response_buf) {
+ int count = min(response_buf_size, guc->send_regs.count - 1);
+
+ for (i = 0; i < count; i++)
+ response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
+ }
+
+ /* Use data from the GuC response as our return value */
+ ret = INTEL_GUC_MSG_TO_DATA(status);
+
+out:
intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
mutex_unlock(&guc->send_mutex);
return ret;
}
+void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 msg, val;
+
+ /*
+ * Sample the log buffer flush related bits & clear them out now
+ * itself from the message identity register to minimize the
+ * probability of losing a flush interrupt, when there are back
+ * to back flush interrupts.
+ * There can be a new flush interrupt, for different log buffer
+ * type (like for ISR), whilst Host is handling one (for DPC).
+ * Since same bit is used in message register for ISR & DPC, it
+ * could happen that GuC sets the bit for 2nd interrupt but Host
+ * clears out the bit on handling the 1st interrupt.
+ */
+ spin_lock(&guc->irq_lock);
+ val = I915_READ(SOFT_SCRATCH(15));
+ msg = val & guc->msg_enabled_mask;
+ I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
+ spin_unlock(&guc->irq_lock);
+
+ intel_guc_to_host_process_recv_msg(guc, msg);
+}
+
+void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
+{
+ /* Make sure to handle only enabled messages */
+ msg &= guc->msg_enabled_mask;
+
+ if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
+ INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
+ intel_guc_log_handle_flush_event(&guc->log);
+}
+
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -410,7 +451,7 @@ int intel_guc_suspend(struct intel_guc *guc)
u32 data[] = {
INTEL_GUC_ACTION_ENTER_S_STATE,
GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
- guc_ggtt_offset(guc->shared_data)
+ intel_guc_ggtt_offset(guc, guc->shared_data)
};
return intel_guc_send(guc, data, ARRAY_SIZE(data));
@@ -434,7 +475,7 @@ int intel_guc_reset_engine(struct intel_guc *guc,
data[3] = 0;
data[4] = 0;
data[5] = guc->execbuf_client->stage_id;
- data[6] = guc_ggtt_offset(guc->shared_data);
+ data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
@@ -448,13 +489,66 @@ int intel_guc_resume(struct intel_guc *guc)
u32 data[] = {
INTEL_GUC_ACTION_EXIT_S_STATE,
GUC_POWER_D0,
- guc_ggtt_offset(guc->shared_data)
+ intel_guc_ggtt_offset(guc, guc->shared_data)
};
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
/**
+ * DOC: GuC Address Space
+ *
+ * The layout of GuC address space is shown below:
+ *
+ * ::
+ *
+ * +==============> +====================+ <== GUC_GGTT_TOP
+ * ^ | |
+ * | | |
+ * | | DRAM |
+ * | | Memory |
+ * | | |
+ * GuC | |
+ * Address +========> +====================+ <== WOPCM Top
+ * Space ^ | HW contexts RSVD |
+ * | | | WOPCM |
+ * | | +==> +--------------------+ <== GuC WOPCM Top
+ * | GuC ^ | |
+ * | GGTT | | |
+ * | Pin GuC | GuC |
+ * | Bias WOPCM | WOPCM |
+ * | | Size | |
+ * | | | | |
+ * v v v | |
+ * +=====+=====+==> +====================+ <== GuC WOPCM Base
+ * | Non-GuC WOPCM |
+ * | (HuC/Reserved) |
+ * +====================+ <== WOPCM Base
+ *
+ * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to WOPCM
+ * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
+ * to DRAM. The value of the GuC ggtt_pin_bias is determined by WOPCM size and
+ * actual GuC WOPCM size.
+ */
+
+/**
+ * intel_guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
+ * @guc: intel_guc structure.
+ *
+ * This function will calculate and initialize the ggtt_pin_bias value based on
+ * overall WOPCM size and GuC WOPCM size.
+ */
+void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
+ GEM_BUG_ON(!i915->wopcm.size);
+ GEM_BUG_ON(i915->wopcm.size < i915->wopcm.guc.base);
+
+ guc->ggtt_pin_bias = i915->wopcm.size - i915->wopcm.guc.base;
+}
+
+/**
* intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
* @guc: the guc
* @size: size of area to allocate (both virtual space and memory)
@@ -462,7 +556,7 @@ int intel_guc_resume(struct intel_guc *guc)
* This is a wrapper to create an object for use with the GuC. In order to
* use it inside the GuC, an object needs to be pinned lifetime, so we allocate
* both some backing storage and a range inside the Global GTT. We must pin
- * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
+ * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
* range is reserved inside GuC.
*
* Return: A i915_vma if successful, otherwise an ERR_PTR.
@@ -483,7 +577,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
goto err;
ret = i915_vma_pin(vma, 0, PAGE_SIZE,
- PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ PIN_GLOBAL | PIN_OFFSET_BIAS | guc->ggtt_pin_bias);
if (ret) {
vma = ERR_PTR(ret);
goto err;
@@ -495,14 +589,3 @@ err:
i915_gem_object_put(obj);
return vma;
}
-
-u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
-{
- u32 wopcm_size = GUC_WOPCM_TOP;
-
- /* On BXT, the top of WOPCM is reserved for RC6 context */
- if (IS_GEN9_LP(dev_priv))
- wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
-
- return wopcm_size;
-}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index b9424ac644ac..f1265e122d30 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -49,11 +49,16 @@ struct intel_guc {
struct intel_guc_log log;
struct intel_guc_ct ct;
+ /* Offset where Non-WOPCM memory starts. */
+ u32 ggtt_pin_bias;
+
/* Log snapshot if GuC errors during load */
struct drm_i915_gem_object *load_err_log;
/* intel_guc_recv interrupt related state */
+ spinlock_t irq_lock;
bool interrupts_enabled;
+ unsigned int msg_enabled_mask;
struct i915_vma *ads_vma;
struct i915_vma *stage_desc_pool;
@@ -83,7 +88,11 @@ struct intel_guc {
struct mutex send_mutex;
/* GuC's FW specific send function */
- int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
+ int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
+ u32 *response_buf, u32 response_buf_size);
+
+ /* GuC's FW specific event handler function */
+ void (*handler)(struct intel_guc *guc);
/* GuC's FW specific notify function */
void (*notify)(struct intel_guc *guc);
@@ -92,7 +101,14 @@ struct intel_guc {
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
- return guc->send(guc, action, len);
+ return guc->send(guc, action, len, NULL, 0);
+}
+
+static inline int
+intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size)
+{
+ return guc->send(guc, action, len, response_buf, response_buf_size);
}
static inline void intel_guc_notify(struct intel_guc *guc)
@@ -100,17 +116,33 @@ static inline void intel_guc_notify(struct intel_guc *guc)
guc->notify(guc);
}
-/*
- * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
- * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
- * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
- * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
+static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
+{
+ guc->handler(guc);
+}
+
+/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
+#define GUC_GGTT_TOP 0xFEE00000
+
+/**
+ * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
+ * @guc: intel_guc structure.
+ * @vma: i915 graphics virtual memory area.
+ *
+ * GuC does not allow any gfx GGTT address that falls into range
+ * [0, GuC ggtt_pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
+ * Currently, in order to exclude [0, GuC ggtt_pin_bias) address space from
+ * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
+ * and pinned with PIN_OFFSET_BIAS along with the value of GuC ggtt_pin_bias.
+ *
+ * Return: GGTT offset of the @vma.
*/
-static inline u32 guc_ggtt_offset(struct i915_vma *vma)
+static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
+ struct i915_vma *vma)
{
u32 offset = i915_ggtt_offset(vma);
- GEM_BUG_ON(offset < GUC_WOPCM_TOP);
+ GEM_BUG_ON(offset < guc->ggtt_pin_bias);
GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
return offset;
@@ -119,17 +151,43 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
void intel_guc_init_params(struct intel_guc *guc);
+void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc);
int intel_guc_init_wq(struct intel_guc *guc);
void intel_guc_fini_wq(struct intel_guc *guc);
int intel_guc_init(struct intel_guc *guc);
void intel_guc_fini(struct intel_guc *guc);
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size);
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size);
+void intel_guc_to_host_event_handler(struct intel_guc *guc);
+void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
+void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
+void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg);
int intel_guc_sample_forcewake(struct intel_guc *guc);
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
-u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
+
+static inline int intel_guc_sanitize(struct intel_guc *guc)
+{
+ intel_uc_fw_sanitize(&guc->fw);
+ return 0;
+}
+
+static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
+{
+ spin_lock_irq(&guc->irq_lock);
+ guc->msg_enabled_mask |= mask;
+ spin_unlock_irq(&guc->irq_lock);
+}
+
+static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
+{
+ spin_lock_irq(&guc->irq_lock);
+ guc->msg_enabled_mask &= ~mask;
+ spin_unlock_irq(&guc->irq_lock);
+}
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index ac627534667d..dcaa3fb71765 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -75,7 +75,7 @@ static void guc_policies_init(struct guc_policies *policies)
int intel_guc_ads_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct i915_vma *vma;
+ struct i915_vma *vma, *kernel_ctx_vma;
struct page *page;
/* The ads obj includes the struct itself and buffers passed to GuC */
struct {
@@ -121,9 +121,10 @@ int intel_guc_ads_create(struct intel_guc *guc)
* to find it. Note that we have to skip our header (1 page),
* because our GuC shared data is there.
*/
+ kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
+ dev_priv->engine[RCS])->state;
blob->ads.golden_context_lrca =
- guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) +
- skipped_offset;
+ intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
/*
* The GuC expects us to exclude the portion of the context image that
@@ -135,7 +136,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
blob->ads.eng_state_size[engine->guc_id] =
engine->context_size - skipped_size;
- base = guc_ggtt_offset(vma);
+ base = intel_guc_ggtt_offset(guc, vma);
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 24ad55752396..371b6005954a 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -24,14 +24,49 @@
#include "i915_drv.h"
#include "intel_guc_ct.h"
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
+#else
+#define CT_DEBUG_DRIVER(...) do { } while (0)
+#endif
+
+struct ct_request {
+ struct list_head link;
+ u32 fence;
+ u32 status;
+ u32 response_len;
+ u32 *response_buf;
+};
+
+struct ct_incoming_request {
+ struct list_head link;
+ u32 msg[];
+};
+
enum { CTB_SEND = 0, CTB_RECV = 1 };
enum { CTB_OWNER_HOST = 0 };
+static void ct_incoming_request_worker_func(struct work_struct *w);
+
+/**
+ * intel_guc_ct_init_early - Initialize CT state without requiring device access
+ * @ct: pointer to CT struct
+ */
void intel_guc_ct_init_early(struct intel_guc_ct *ct)
{
/* we're using static channel owners */
ct->host_channel.owner = CTB_OWNER_HOST;
+
+ spin_lock_init(&ct->lock);
+ INIT_LIST_HEAD(&ct->pending_requests);
+ INIT_LIST_HEAD(&ct->incoming_requests);
+ INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
+}
+
+static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
+{
+ return container_of(ct, struct intel_guc, ct);
}
static inline const char *guc_ct_buffer_type_to_str(u32 type)
@@ -49,8 +84,8 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type)
static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
u32 cmds_addr, u32 size, u32 owner)
{
- DRM_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
- desc, cmds_addr, size, owner);
+ CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
+ desc, cmds_addr, size, owner);
memset(desc, 0, sizeof(*desc));
desc->addr = cmds_addr;
desc->size = size;
@@ -59,8 +94,8 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
{
- DRM_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
- desc, desc->head, desc->tail);
+ CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
+ desc, desc->head, desc->tail);
desc->head = 0;
desc->tail = 0;
desc->is_in_error = 0;
@@ -79,7 +114,7 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc,
int err;
/* Can't use generic send(), CT registration must go over MMIO */
- err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action));
+ err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (err)
DRM_ERROR("CT: register %s buffer failed; err=%d\n",
guc_ct_buffer_type_to_str(type), err);
@@ -98,7 +133,7 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
int err;
/* Can't use generic send(), CT deregistration must go over MMIO */
- err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action));
+ err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (err)
DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
guc_ct_buffer_type_to_str(type), owner, err);
@@ -156,7 +191,8 @@ static int ctch_init(struct intel_guc *guc,
err = PTR_ERR(blob);
goto err_vma;
}
- DRM_DEBUG_DRIVER("CT: vma base=%#x\n", guc_ggtt_offset(ctch->vma));
+ CT_DEBUG_DRIVER("CT: vma base=%#x\n",
+ intel_guc_ggtt_offset(guc, ctch->vma));
/* store pointers to desc and cmds */
for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
@@ -170,8 +206,8 @@ static int ctch_init(struct intel_guc *guc,
err_vma:
i915_vma_unpin_and_release(&ctch->vma);
err_out:
- DRM_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
- ctch->owner, err);
+ CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
+ ctch->owner, err);
return err;
}
@@ -191,8 +227,8 @@ static int ctch_open(struct intel_guc *guc,
int err;
int i;
- DRM_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
- ctch->owner, yesno(ctch_is_open(ctch)));
+ CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
+ ctch->owner, yesno(ctch_is_open(ctch)));
if (!ctch->vma) {
err = ctch_init(guc, ctch);
@@ -202,7 +238,7 @@ static int ctch_open(struct intel_guc *guc,
}
/* vma should be already allocated and map'ed */
- base = guc_ggtt_offset(ctch->vma);
+ base = intel_guc_ggtt_offset(guc, ctch->vma);
/* (re)initialize descriptors
* cmds buffers are in the second half of the blob page
@@ -263,10 +299,29 @@ static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
return ++ctch->next_fence;
}
+/**
+ * DOC: CTB Host to GuC request
+ *
+ * Format of the CTB Host to GuC request message is as follows::
+ *
+ * +------------+---------+---------+---------+---------+
+ * | msg[0] | [1] | [2] | ... | [n-1] |
+ * +------------+---------+---------+---------+---------+
+ * | MESSAGE | MESSAGE PAYLOAD |
+ * + HEADER +---------+---------+---------+---------+
+ * | | 0 | 1 | ... | n |
+ * +============+=========+=========+=========+=========+
+ * | len >= 1 | FENCE | request specific data |
+ * +------+-----+---------+---------+---------+---------+
+ *
+ * ^-----------------len-------------------^
+ */
+
static int ctb_write(struct intel_guc_ct_buffer *ctb,
const u32 *action,
u32 len /* in dwords */,
- u32 fence)
+ u32 fence,
+ bool want_response)
{
struct guc_ct_buffer_desc *desc = ctb->desc;
u32 head = desc->head / 4; /* in dwords */
@@ -295,15 +350,21 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
if (unlikely(used + len + 1 >= size))
return -ENOSPC;
- /* Write the message. The format is the following:
+ /*
+ * Write the message. The format is the following:
* DW0: header (including action code)
* DW1: fence
* DW2+: action data
*/
header = (len << GUC_CT_MSG_LEN_SHIFT) |
(GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
+ (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
(action[0] << GUC_CT_MSG_ACTION_SHIFT);
+ CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
+ 4, &header, 4, &fence,
+ 4 * (len - 1), &action[1]);
+
cmds[tail] = header;
tail = (tail + 1) % size;
@@ -322,16 +383,25 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
return 0;
}
-/* Wait for the response from the GuC.
+/**
+ * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
+ * @desc: buffer descriptor
* @fence: response fence
* @status: placeholder for status
- * return: 0 response received (status is valid)
- * -ETIMEDOUT no response within hardcoded timeout
- * -EPROTO no response, ct buffer was in error
+ *
+ * Guc will update CT buffer descriptor with new fence and status
+ * after processing the command identified by the fence. Wait for
+ * specified fence and then read from the descriptor status of the
+ * command.
+ *
+ * Return:
+ * * 0 response received (status is valid)
+ * * -ETIMEDOUT no response within hardcoded timeout
+ * * -EPROTO no response, CT buffer is in error
*/
-static int wait_for_response(struct guc_ct_buffer_desc *desc,
- u32 fence,
- u32 *status)
+static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
+ u32 fence,
+ u32 *status)
{
int err;
@@ -363,71 +433,440 @@ static int wait_for_response(struct guc_ct_buffer_desc *desc,
return err;
}
-static int ctch_send(struct intel_guc *guc,
+/**
+ * wait_for_ct_request_update - Wait for CT request state update.
+ * @req: pointer to pending request
+ * @status: placeholder for status
+ *
+ * For each sent request, Guc shall send bac CT response message.
+ * Our message handler will update status of tracked request once
+ * response message with given fence is received. Wait here and
+ * check for valid response status value.
+ *
+ * Return:
+ * * 0 response received (status is valid)
+ * * -ETIMEDOUT no response within hardcoded timeout
+ */
+static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
+{
+ int err;
+
+ /*
+ * Fast commands should complete in less than 10us, so sample quickly
+ * up to that length of time, then switch to a slower sleep-wait loop.
+ * No GuC command should ever take longer than 10ms.
+ */
+#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
+ err = wait_for_us(done, 10);
+ if (err)
+ err = wait_for(done, 10);
+#undef done
+
+ if (unlikely(err))
+ DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
+
+ *status = req->status;
+ return err;
+}
+
+static int ctch_send(struct intel_guc_ct *ct,
struct intel_guc_ct_channel *ctch,
const u32 *action,
u32 len,
+ u32 *response_buf,
+ u32 response_buf_size,
u32 *status)
{
struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
struct guc_ct_buffer_desc *desc = ctb->desc;
+ struct ct_request request;
+ unsigned long flags;
u32 fence;
int err;
GEM_BUG_ON(!ctch_is_open(ctch));
GEM_BUG_ON(!len);
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
+ GEM_BUG_ON(!response_buf && response_buf_size);
fence = ctch_get_next_fence(ctch);
- err = ctb_write(ctb, action, len, fence);
+ request.fence = fence;
+ request.status = 0;
+ request.response_len = response_buf_size;
+ request.response_buf = response_buf;
+
+ spin_lock_irqsave(&ct->lock, flags);
+ list_add_tail(&request.link, &ct->pending_requests);
+ spin_unlock_irqrestore(&ct->lock, flags);
+
+ err = ctb_write(ctb, action, len, fence, !!response_buf);
if (unlikely(err))
- return err;
+ goto unlink;
- intel_guc_notify(guc);
+ intel_guc_notify(ct_to_guc(ct));
- err = wait_for_response(desc, fence, status);
+ if (response_buf)
+ err = wait_for_ct_request_update(&request, status);
+ else
+ err = wait_for_ctb_desc_update(desc, fence, status);
if (unlikely(err))
- return err;
- if (*status != INTEL_GUC_STATUS_SUCCESS)
- return -EIO;
- return 0;
+ goto unlink;
+
+ if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
+ err = -EIO;
+ goto unlink;
+ }
+
+ if (response_buf) {
+ /* There shall be no data in the status */
+ WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
+ /* Return actual response len */
+ err = request.response_len;
+ } else {
+ /* There shall be no response payload */
+ WARN_ON(request.response_len);
+ /* Return data decoded from the status dword */
+ err = INTEL_GUC_MSG_TO_DATA(*status);
+ }
+
+unlink:
+ spin_lock_irqsave(&ct->lock, flags);
+ list_del(&request.link);
+ spin_unlock_irqrestore(&ct->lock, flags);
+
+ return err;
}
/*
* Command Transport (CT) buffer based GuC send function.
*/
-static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len)
+static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size)
{
- struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
+ struct intel_guc_ct *ct = &guc->ct;
+ struct intel_guc_ct_channel *ctch = &ct->host_channel;
u32 status = ~0; /* undefined */
- int err;
+ int ret;
mutex_lock(&guc->send_mutex);
- err = ctch_send(guc, ctch, action, len, &status);
- if (unlikely(err)) {
+ ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
+ &status);
+ if (unlikely(ret < 0)) {
DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
- action[0], err, status);
+ action[0], ret, status);
+ } else if (unlikely(ret)) {
+ CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
+ action[0], ret, ret);
}
mutex_unlock(&guc->send_mutex);
- return err;
+ return ret;
+}
+
+static inline unsigned int ct_header_get_len(u32 header)
+{
+ return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
+}
+
+static inline unsigned int ct_header_get_action(u32 header)
+{
+ return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
+}
+
+static inline bool ct_header_is_response(u32 header)
+{
+ return ct_header_get_action(header) == INTEL_GUC_ACTION_DEFAULT;
+}
+
+static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
+{
+ struct guc_ct_buffer_desc *desc = ctb->desc;
+ u32 head = desc->head / 4; /* in dwords */
+ u32 tail = desc->tail / 4; /* in dwords */
+ u32 size = desc->size / 4; /* in dwords */
+ u32 *cmds = ctb->cmds;
+ s32 available; /* in dwords */
+ unsigned int len;
+ unsigned int i;
+
+ GEM_BUG_ON(desc->size % 4);
+ GEM_BUG_ON(desc->head % 4);
+ GEM_BUG_ON(desc->tail % 4);
+ GEM_BUG_ON(tail >= size);
+ GEM_BUG_ON(head >= size);
+
+ /* tail == head condition indicates empty */
+ available = tail - head;
+ if (unlikely(available == 0))
+ return -ENODATA;
+
+ /* beware of buffer wrap case */
+ if (unlikely(available < 0))
+ available += size;
+ CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
+ GEM_BUG_ON(available < 0);
+
+ data[0] = cmds[head];
+ head = (head + 1) % size;
+
+ /* message len with header */
+ len = ct_header_get_len(data[0]) + 1;
+ if (unlikely(len > (u32)available)) {
+ DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
+ 4, data,
+ 4 * (head + available - 1 > size ?
+ size - head : available - 1), &cmds[head],
+ 4 * (head + available - 1 > size ?
+ available - 1 - size + head : 0), &cmds[0]);
+ return -EPROTO;
+ }
+
+ for (i = 1; i < len; i++) {
+ data[i] = cmds[head];
+ head = (head + 1) % size;
+ }
+ CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
+
+ desc->head = head * 4;
+ return 0;
}
/**
- * Enable buffer based command transport
+ * DOC: CTB GuC to Host response
+ *
+ * Format of the CTB GuC to Host response message is as follows::
+ *
+ * +------------+---------+---------+---------+---------+---------+
+ * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
+ * +------------+---------+---------+---------+---------+---------+
+ * | MESSAGE | MESSAGE PAYLOAD |
+ * + HEADER +---------+---------+---------+---------+---------+
+ * | | 0 | 1 | 2 | ... | n |
+ * +============+=========+=========+=========+=========+=========+
+ * | len >= 2 | FENCE | STATUS | response specific data |
+ * +------+-----+---------+---------+---------+---------+---------+
+ *
+ * ^-----------------------len-----------------------^
+ */
+
+static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
+{
+ u32 header = msg[0];
+ u32 len = ct_header_get_len(header);
+ u32 msglen = len + 1; /* total message length including header */
+ u32 fence;
+ u32 status;
+ u32 datalen;
+ struct ct_request *req;
+ bool found = false;
+
+ GEM_BUG_ON(!ct_header_is_response(header));
+ GEM_BUG_ON(!in_irq());
+
+ /* Response payload shall at least include fence and status */
+ if (unlikely(len < 2)) {
+ DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
+ return -EPROTO;
+ }
+
+ fence = msg[1];
+ status = msg[2];
+ datalen = len - 2;
+
+ /* Format of the status follows RESPONSE message */
+ if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
+ DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
+ return -EPROTO;
+ }
+
+ CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
+
+ spin_lock(&ct->lock);
+ list_for_each_entry(req, &ct->pending_requests, link) {
+ if (unlikely(fence != req->fence)) {
+ CT_DEBUG_DRIVER("CT: request %u awaits response\n",
+ req->fence);
+ continue;
+ }
+ if (unlikely(datalen > req->response_len)) {
+ DRM_ERROR("CT: response %u too long %*ph\n",
+ req->fence, 4 * msglen, msg);
+ datalen = 0;
+ }
+ if (datalen)
+ memcpy(req->response_buf, msg + 3, 4 * datalen);
+ req->response_len = datalen;
+ WRITE_ONCE(req->status, status);
+ found = true;
+ break;
+ }
+ spin_unlock(&ct->lock);
+
+ if (!found)
+ DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
+ return 0;
+}
+
+static void ct_process_request(struct intel_guc_ct *ct,
+ u32 action, u32 len, const u32 *payload)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+
+ CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
+
+ switch (action) {
+ case INTEL_GUC_ACTION_DEFAULT:
+ if (unlikely(len < 1))
+ goto fail_unexpected;
+ intel_guc_to_host_process_recv_msg(guc, *payload);
+ break;
+
+ default:
+fail_unexpected:
+ DRM_ERROR("CT: unexpected request %x %*ph\n",
+ action, 4 * len, payload);
+ break;
+ }
+}
+
+static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
+{
+ unsigned long flags;
+ struct ct_incoming_request *request;
+ u32 header;
+ u32 *payload;
+ bool done;
+
+ spin_lock_irqsave(&ct->lock, flags);
+ request = list_first_entry_or_null(&ct->incoming_requests,
+ struct ct_incoming_request, link);
+ if (request)
+ list_del(&request->link);
+ done = !!list_empty(&ct->incoming_requests);
+ spin_unlock_irqrestore(&ct->lock, flags);
+
+ if (!request)
+ return true;
+
+ header = request->msg[0];
+ payload = &request->msg[1];
+ ct_process_request(ct,
+ ct_header_get_action(header),
+ ct_header_get_len(header),
+ payload);
+
+ kfree(request);
+ return done;
+}
+
+static void ct_incoming_request_worker_func(struct work_struct *w)
+{
+ struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
+ bool done;
+
+ done = ct_process_incoming_requests(ct);
+ if (!done)
+ queue_work(system_unbound_wq, &ct->worker);
+}
+
+/**
+ * DOC: CTB GuC to Host request
+ *
+ * Format of the CTB GuC to Host request message is as follows::
+ *
+ * +------------+---------+---------+---------+---------+---------+
+ * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
+ * +------------+---------+---------+---------+---------+---------+
+ * | MESSAGE | MESSAGE PAYLOAD |
+ * + HEADER +---------+---------+---------+---------+---------+
+ * | | 0 | 1 | 2 | ... | n |
+ * +============+=========+=========+=========+=========+=========+
+ * | len | request specific data |
+ * +------+-----+---------+---------+---------+---------+---------+
+ *
+ * ^-----------------------len-----------------------^
+ */
+
+static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
+{
+ u32 header = msg[0];
+ u32 len = ct_header_get_len(header);
+ u32 msglen = len + 1; /* total message length including header */
+ struct ct_incoming_request *request;
+ unsigned long flags;
+
+ GEM_BUG_ON(ct_header_is_response(header));
+
+ request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
+ if (unlikely(!request)) {
+ DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
+ return 0; /* XXX: -ENOMEM ? */
+ }
+ memcpy(request->msg, msg, 4 * msglen);
+
+ spin_lock_irqsave(&ct->lock, flags);
+ list_add_tail(&request->link, &ct->incoming_requests);
+ spin_unlock_irqrestore(&ct->lock, flags);
+
+ queue_work(system_unbound_wq, &ct->worker);
+ return 0;
+}
+
+static void ct_process_host_channel(struct intel_guc_ct *ct)
+{
+ struct intel_guc_ct_channel *ctch = &ct->host_channel;
+ struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
+ u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
+ int err = 0;
+
+ if (!ctch_is_open(ctch))
+ return;
+
+ do {
+ err = ctb_read(ctb, msg);
+ if (err)
+ break;
+
+ if (ct_header_is_response(msg[0]))
+ err = ct_handle_response(ct, msg);
+ else
+ err = ct_handle_request(ct, msg);
+ } while (!err);
+
+ if (GEM_WARN_ON(err == -EPROTO)) {
+ DRM_ERROR("CT: corrupted message detected!\n");
+ ctb->desc->is_in_error = 1;
+ }
+}
+
+/*
+ * When we're communicating with the GuC over CT, GuC uses events
+ * to notify us about new messages being posted on the RECV buffer.
+ */
+static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
+{
+ struct intel_guc_ct *ct = &guc->ct;
+
+ ct_process_host_channel(ct);
+}
+
+/**
+ * intel_guc_ct_enable - Enable buffer based command transport.
+ * @ct: pointer to CT struct
+ *
* Shall only be called for platforms with HAS_GUC_CT.
- * @guc: the guc
- * return: 0 on success
- * non-zero on failure
+ *
+ * Return: 0 on success, a negative errno code on failure.
*/
-int intel_guc_enable_ct(struct intel_guc *guc)
+int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct intel_guc_ct_channel *ctch = &ct->host_channel;
int err;
- GEM_BUG_ON(!HAS_GUC_CT(dev_priv));
+ GEM_BUG_ON(!HAS_GUC_CT(i915));
err = ctch_open(guc, ctch);
if (unlikely(err))
@@ -435,21 +874,24 @@ int intel_guc_enable_ct(struct intel_guc *guc)
/* Switch into cmd transport buffer based send() */
guc->send = intel_guc_send_ct;
+ guc->handler = intel_guc_to_host_event_handler_ct;
DRM_INFO("CT: %s\n", enableddisabled(true));
return 0;
}
/**
- * Disable buffer based command transport.
+ * intel_guc_ct_disable - Disable buffer based command transport.
+ * @ct: pointer to CT struct
+ *
* Shall only be called for platforms with HAS_GUC_CT.
- * @guc: the guc
*/
-void intel_guc_disable_ct(struct intel_guc *guc)
+void intel_guc_ct_disable(struct intel_guc_ct *ct)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct intel_guc_ct_channel *ctch = &ct->host_channel;
- GEM_BUG_ON(!HAS_GUC_CT(dev_priv));
+ GEM_BUG_ON(!HAS_GUC_CT(i915));
if (!ctch_is_open(ctch))
return;
@@ -458,5 +900,6 @@ void intel_guc_disable_ct(struct intel_guc *guc)
/* Disable send */
guc->send = intel_guc_send_nop;
+ guc->handler = intel_guc_to_host_event_handler_nop;
DRM_INFO("CT: %s\n", enableddisabled(false));
}
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index 6d97f36fcc62..d774895ab143 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -75,12 +75,22 @@ struct intel_guc_ct_channel {
struct intel_guc_ct {
struct intel_guc_ct_channel host_channel;
/* other channels are tbd */
+
+ /** @lock: protects pending requests list */
+ spinlock_t lock;
+
+ /** @pending_requests: list of requests waiting for response */
+ struct list_head pending_requests;
+
+ /** @incoming_requests: list of incoming requests */
+ struct list_head incoming_requests;
+
+ /** @worker: worker for handling incoming requests */
+ struct work_struct worker;
};
void intel_guc_ct_init_early(struct intel_guc_ct *ct);
-
-/* XXX: move to intel_uc.h ? don't fit there either */
-int intel_guc_enable_ct(struct intel_guc *guc);
-void intel_guc_disable_ct(struct intel_guc *guc);
+int intel_guc_ct_enable(struct intel_guc_ct *ct);
+void intel_guc_ct_disable(struct intel_guc_ct *ct);
#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index d07f2b985f1c..a9e6fcce467c 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -165,7 +165,7 @@ static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
- offset = guc_ggtt_offset(vma) + guc_fw->header_offset;
+ offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
@@ -275,9 +275,8 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
* Called from intel_uc_init_hw() during driver load, resume from sleep and
* after a GPU reset.
*
- * The firmware image should have already been fetched into memory by the
- * earlier call to intel_uc_init_fw(), so here we need to only check that
- * fetch succeeded, and then transfer the image to the h/w.
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 6a10aa6f04d3..0867ba76d445 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -23,9 +23,6 @@
#ifndef _INTEL_GUC_FWIF_H
#define _INTEL_GUC_FWIF_H
-#define GUC_CORE_FAMILY_GEN9 12
-#define GUC_CORE_FAMILY_UNKNOWN 0x7fffffff
-
#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
#define GUC_CLIENT_PRIORITY_HIGH 1
#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
@@ -82,8 +79,6 @@
#define GUC_CTL_ARAT_LOW 2
#define GUC_CTL_DEVICE_INFO 3
-#define GUC_CTL_GT_TYPE_SHIFT 0
-#define GUC_CTL_CORE_FAMILY_SHIFT 7
#define GUC_CTL_LOG_PARAMS 4
#define GUC_LOG_VALID (1 << 0)
@@ -127,7 +122,7 @@
#define GUC_PROFILE_ENABLED (1 << 7)
#define GUC_WQ_TRACK_ENABLED (1 << 8)
#define GUC_ADS_ENABLED (1 << 9)
-#define GUC_DEBUG_RESERVED (1 << 10)
+#define GUC_LOG_DEFAULT_DISABLED (1 << 10)
#define GUC_ADS_ADDR_SHIFT 11
#define GUC_ADS_ADDR_MASK 0xfffff800
@@ -327,6 +322,58 @@ struct guc_stage_desc {
u64 desc_private;
} __packed;
+/**
+ * DOC: CTB based communication
+ *
+ * The CTB (command transport buffer) communication between Host and GuC
+ * is based on u32 data stream written to the shared buffer. One buffer can
+ * be used to transmit data only in one direction (one-directional channel).
+ *
+ * Current status of the each buffer is stored in the buffer descriptor.
+ * Buffer descriptor holds tail and head fields that represents active data
+ * stream. The tail field is updated by the data producer (sender), and head
+ * field is updated by the data consumer (receiver)::
+ *
+ * +------------+
+ * | DESCRIPTOR | +=================+============+========+
+ * +============+ | | MESSAGE(s) | |
+ * | address |--------->+=================+============+========+
+ * +------------+
+ * | head | ^-----head--------^
+ * +------------+
+ * | tail | ^---------tail-----------------^
+ * +------------+
+ * | size | ^---------------size--------------------^
+ * +------------+
+ *
+ * Each message in data stream starts with the single u32 treated as a header,
+ * followed by optional set of u32 data that makes message specific payload::
+ *
+ * +------------+---------+---------+---------+
+ * | MESSAGE |
+ * +------------+---------+---------+---------+
+ * | msg[0] | [1] | ... | [n-1] |
+ * +------------+---------+---------+---------+
+ * | MESSAGE | MESSAGE PAYLOAD |
+ * + HEADER +---------+---------+---------+
+ * | | 0 | ... | n |
+ * +======+=====+=========+=========+=========+
+ * | 31:16| code| | | |
+ * +------+-----+ | | |
+ * | 15:5|flags| | | |
+ * +------+-----+ | | |
+ * | 4:0| len| | | |
+ * +------+-----+---------+---------+---------+
+ *
+ * ^-------------len-------------^
+ *
+ * The message header consists of:
+ *
+ * - **len**, indicates length of the message payload (in u32)
+ * - **code**, indicates message code
+ * - **flags**, holds various bits to control message handling
+ */
+
/*
* Describes single command transport buffer.
* Used by both guc-master and clients.
@@ -534,16 +581,6 @@ struct guc_log_buffer_state {
u32 version;
} __packed;
-union guc_log_control {
- struct {
- u32 logging_enabled:1;
- u32 reserved1:3;
- u32 verbosity:4;
- u32 reserved2:24;
- };
- u32 value;
-} __packed;
-
struct guc_ctx_report {
u32 report_return_status;
u32 reserved1[64];
@@ -570,7 +607,68 @@ struct guc_shared_ctx_data {
struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM];
} __packed;
-/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
+/**
+ * DOC: MMIO based communication
+ *
+ * The MMIO based communication between Host and GuC uses software scratch
+ * registers, where first register holds data treated as message header,
+ * and other registers are used to hold message payload.
+ *
+ * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8
+ *
+ * +-----------+---------+---------+---------+
+ * | MMIO[0] | MMIO[1] | ... | MMIO[n] |
+ * +-----------+---------+---------+---------+
+ * | header | optional payload |
+ * +======+====+=========+=========+=========+
+ * | 31:28|type| | | |
+ * +------+----+ | | |
+ * | 27:16|data| | | |
+ * +------+----+ | | |
+ * | 15:0|code| | | |
+ * +------+----+---------+---------+---------+
+ *
+ * The message header consists of:
+ *
+ * - **type**, indicates message type
+ * - **code**, indicates message code, is specific for **type**
+ * - **data**, indicates message data, optional, depends on **code**
+ *
+ * The following message **types** are supported:
+ *
+ * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code
+ * must be priovided in **code** field. Optional action specific parameters
+ * can be provided in remaining payload registers or **data** field.
+ *
+ * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request,
+ * action response status will be provided in **code** field. Optional
+ * response data can be returned in remaining payload registers or **data**
+ * field.
+ */
+
+#define INTEL_GUC_MSG_TYPE_SHIFT 28
+#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
+#define INTEL_GUC_MSG_DATA_SHIFT 16
+#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT)
+#define INTEL_GUC_MSG_CODE_SHIFT 0
+#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT)
+
+#define __INTEL_GUC_MSG_GET(T, m) \
+ (((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT)
+#define INTEL_GUC_MSG_TO_TYPE(m) __INTEL_GUC_MSG_GET(TYPE, m)
+#define INTEL_GUC_MSG_TO_DATA(m) __INTEL_GUC_MSG_GET(DATA, m)
+#define INTEL_GUC_MSG_TO_CODE(m) __INTEL_GUC_MSG_GET(CODE, m)
+
+enum intel_guc_msg_type {
+ INTEL_GUC_MSG_TYPE_REQUEST = 0x0,
+ INTEL_GUC_MSG_TYPE_RESPONSE = 0xF,
+};
+
+#define __INTEL_GUC_MSG_TYPE_IS(T, m) \
+ (INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T)
+#define INTEL_GUC_MSG_IS_REQUEST(m) __INTEL_GUC_MSG_TYPE_IS(REQUEST, m)
+#define INTEL_GUC_MSG_IS_RESPONSE(m) __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m)
+
enum intel_guc_action {
INTEL_GUC_ACTION_DEFAULT = 0x0,
INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
@@ -602,24 +700,22 @@ enum intel_guc_report_status {
INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
};
-/*
- * The GuC sends its response to a command by overwriting the
- * command in SS0. The response is distinguishable from a command
- * by the fact that all the MASK bits are set. The remaining bits
- * give more detail.
- */
-#define INTEL_GUC_RECV_MASK ((u32)0xF0000000)
-#define INTEL_GUC_RECV_IS_RESPONSE(x) ((u32)(x) >= INTEL_GUC_RECV_MASK)
-#define INTEL_GUC_RECV_STATUS(x) (INTEL_GUC_RECV_MASK | (x))
-
-/* GUC will return status back to SOFT_SCRATCH_O_REG */
-enum intel_guc_status {
- INTEL_GUC_STATUS_SUCCESS = INTEL_GUC_RECV_STATUS(0x0),
- INTEL_GUC_STATUS_ALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x10),
- INTEL_GUC_STATUS_DEALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x20),
- INTEL_GUC_STATUS_GENERIC_FAIL = INTEL_GUC_RECV_STATUS(0x0000F000)
+#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
+#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
+#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
+#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
+
+enum intel_guc_response_status {
+ INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+ INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
};
+#define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \
+ (typecheck(u32, (m)) && \
+ ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \
+ ((INTEL_GUC_MSG_TYPE_RESPONSE << INTEL_GUC_MSG_TYPE_SHIFT) | \
+ (INTEL_GUC_RESPONSE_STATUS_SUCCESS << INTEL_GUC_MSG_CODE_SHIFT)))
+
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
enum intel_guc_recv_message {
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index c0c2e7d1c7d7..401e1704d61e 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -23,12 +23,11 @@
*/
#include <linux/debugfs.h>
-#include <linux/relay.h>
#include "intel_guc_log.h"
#include "i915_drv.h"
-static void guc_log_capture_logs(struct intel_guc *guc);
+static void guc_log_capture_logs(struct intel_guc_log *log);
/**
* DOC: GuC firmware log
@@ -39,7 +38,7 @@ static void guc_log_capture_logs(struct intel_guc *guc);
* registers value.
*/
-static int guc_log_flush_complete(struct intel_guc *guc)
+static int guc_action_flush_log_complete(struct intel_guc *guc)
{
u32 action[] = {
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
@@ -48,7 +47,7 @@ static int guc_log_flush_complete(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static int guc_log_flush(struct intel_guc *guc)
+static int guc_action_flush_log(struct intel_guc *guc)
{
u32 action[] = {
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
@@ -58,22 +57,40 @@ static int guc_log_flush(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity)
+static int guc_action_control_log(struct intel_guc *guc, bool enable,
+ bool default_logging, u32 verbosity)
{
- union guc_log_control control_val = {
- {
- .logging_enabled = enable,
- .verbosity = verbosity,
- },
- };
u32 action[] = {
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
- control_val.value
+ (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
+ (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
+ (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
};
+ GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
+
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
+static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
+{
+ return container_of(log, struct intel_guc, log);
+}
+
+static void guc_log_enable_flush_events(struct intel_guc_log *log)
+{
+ intel_guc_enable_msg(log_to_guc(log),
+ INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
+ INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
+}
+
+static void guc_log_disable_flush_events(struct intel_guc_log *log)
+{
+ intel_guc_disable_msg(log_to_guc(log),
+ INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
+ INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
+}
+
/*
* Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned.
@@ -121,14 +138,7 @@ static struct dentry *create_buf_file_callback(const char *filename,
if (!parent)
return NULL;
- /*
- * Not using the channel filename passed as an argument, since for each
- * channel relay appends the corresponding CPU number to the filename
- * passed in relay_open(). This should be fine as relay just needs a
- * dentry of the file associated with the channel buffer and that file's
- * name need not be same as the filename passed as an argument.
- */
- buf_file = debugfs_create_file("guc_log", mode,
+ buf_file = debugfs_create_file(filename, mode,
parent, buf, &relay_file_operations);
return buf_file;
}
@@ -149,59 +159,7 @@ static struct rchan_callbacks relay_callbacks = {
.remove_buf_file = remove_buf_file_callback,
};
-static int guc_log_relay_file_create(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct dentry *log_dir;
- int ret;
-
- if (!i915_modparams.guc_log_level)
- return 0;
-
- mutex_lock(&guc->log.runtime.relay_lock);
-
- /* For now create the log file in /sys/kernel/debug/dri/0 dir */
- log_dir = dev_priv->drm.primary->debugfs_root;
-
- /*
- * If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
- * not mounted and so can't create the relay file.
- * The relay API seems to fit well with debugfs only, for availing relay
- * there are 3 requirements which can be met for debugfs file only in a
- * straightforward/clean manner :-
- * i) Need the associated dentry pointer of the file, while opening the
- * relay channel.
- * ii) Should be able to use 'relay_file_operations' fops for the file.
- * iii) Set the 'i_private' field of file's inode to the pointer of
- * relay channel buffer.
- */
- if (!log_dir) {
- DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
- ret = -ENODEV;
- goto out_unlock;
- }
-
- ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
- if (ret < 0 && ret != -EEXIST) {
- DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
- goto out_unlock;
- }
-
- ret = 0;
-
-out_unlock:
- mutex_unlock(&guc->log.runtime.relay_lock);
- return ret;
-}
-
-static bool guc_log_has_relay(struct intel_guc *guc)
-{
- lockdep_assert_held(&guc->log.runtime.relay_lock);
-
- return guc->log.runtime.relay_chan != NULL;
-}
-
-static void guc_move_to_next_buf(struct intel_guc *guc)
+static void guc_move_to_next_buf(struct intel_guc_log *log)
{
/*
* Make sure the updates made in the sub buffer are visible when
@@ -209,21 +167,15 @@ static void guc_move_to_next_buf(struct intel_guc *guc)
*/
smp_wmb();
- if (!guc_log_has_relay(guc))
- return;
-
/* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
+ relay_reserve(log->relay.channel, log->vma->obj->base.size);
/* Switch to the next sub buffer */
- relay_flush(guc->log.runtime.relay_chan);
+ relay_flush(log->relay.channel);
}
-static void *guc_get_write_buffer(struct intel_guc *guc)
+static void *guc_get_write_buffer(struct intel_guc_log *log)
{
- if (!guc_log_has_relay(guc))
- return NULL;
-
/*
* Just get the base address of a new sub buffer and copy data into it
* ourselves. NULL will be returned in no-overwrite mode, if all sub
@@ -233,25 +185,25 @@ static void *guc_get_write_buffer(struct intel_guc *guc)
* done without using relay_reserve() along with relay_write(). So its
* better to use relay_reserve() alone.
*/
- return relay_reserve(guc->log.runtime.relay_chan, 0);
+ return relay_reserve(log->relay.channel, 0);
}
-static bool guc_check_log_buf_overflow(struct intel_guc *guc,
+static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
enum guc_log_buffer_type type,
unsigned int full_cnt)
{
- unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
+ unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
bool overflow = false;
if (full_cnt != prev_full_cnt) {
overflow = true;
- guc->log.prev_overflow_count[type] = full_cnt;
- guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
+ log->stats[type].overflow = full_cnt;
+ log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
if (full_cnt < prev_full_cnt) {
/* buffer_full_cnt is a 4 bit counter */
- guc->log.total_overflow_count[type] += 16;
+ log->stats[type].sampled_overflow += 16;
}
DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
}
@@ -275,7 +227,7 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
return 0;
}
-static void guc_read_update_log_buffer(struct intel_guc *guc)
+static void guc_read_update_log_buffer(struct intel_guc_log *log)
{
unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
@@ -284,16 +236,16 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
void *src_data, *dst_data;
bool new_overflow;
- if (WARN_ON(!guc->log.runtime.buf_addr))
- return;
+ mutex_lock(&log->relay.lock);
- /* Get the pointer to shared GuC log buffer */
- log_buf_state = src_data = guc->log.runtime.buf_addr;
+ if (WARN_ON(!intel_guc_log_relay_enabled(log)))
+ goto out_unlock;
- mutex_lock(&guc->log.runtime.relay_lock);
+ /* Get the pointer to shared GuC log buffer */
+ log_buf_state = src_data = log->relay.buf_addr;
/* Get the pointer to local buffer to store the logs */
- log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+ log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
if (unlikely(!log_buf_snapshot_state)) {
/*
@@ -301,10 +253,9 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
* getting consumed by User at a slow rate.
*/
DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
- guc->log.capture_miss_count++;
- mutex_unlock(&guc->log.runtime.relay_lock);
+ log->relay.full_count++;
- return;
+ goto out_unlock;
}
/* Actual logs are present from the 2nd page */
@@ -325,8 +276,8 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
full_cnt = log_buf_state_local.buffer_full_cnt;
/* Bookkeeping stuff */
- guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
- new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
+ log->stats[type].flush += log_buf_state_local.flush_to_file;
+ new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
/* Update the state of shared log buffer */
log_buf_state->read_ptr = write_offset;
@@ -373,38 +324,35 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
dst_data += buffer_size;
}
- guc_move_to_next_buf(guc);
+ guc_move_to_next_buf(log);
- mutex_unlock(&guc->log.runtime.relay_lock);
+out_unlock:
+ mutex_unlock(&log->relay.lock);
}
static void capture_logs_work(struct work_struct *work)
{
- struct intel_guc *guc =
- container_of(work, struct intel_guc, log.runtime.flush_work);
-
- guc_log_capture_logs(guc);
-}
+ struct intel_guc_log *log =
+ container_of(work, struct intel_guc_log, relay.flush_work);
-static bool guc_log_has_runtime(struct intel_guc *guc)
-{
- return guc->log.runtime.buf_addr != NULL;
+ guc_log_capture_logs(log);
}
-static int guc_log_runtime_create(struct intel_guc *guc)
+static int guc_log_map(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&log->relay.lock);
- if (!guc->log.vma)
+ if (!log->vma)
return -ENODEV;
- GEM_BUG_ON(guc_log_has_runtime(guc));
-
- ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret)
return ret;
@@ -413,49 +361,40 @@ static int guc_log_runtime_create(struct intel_guc *guc)
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
*/
- vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
return PTR_ERR(vaddr);
}
- guc->log.runtime.buf_addr = vaddr;
+ log->relay.buf_addr = vaddr;
return 0;
}
-static void guc_log_runtime_destroy(struct intel_guc *guc)
+static void guc_log_unmap(struct intel_guc_log *log)
{
- /*
- * It's possible that the runtime stuff was never allocated because
- * GuC log was disabled at the boot time.
- */
- if (!guc_log_has_runtime(guc))
- return;
+ lockdep_assert_held(&log->relay.lock);
- i915_gem_object_unpin_map(guc->log.vma->obj);
- guc->log.runtime.buf_addr = NULL;
+ i915_gem_object_unpin_map(log->vma->obj);
+ log->relay.buf_addr = NULL;
}
-void intel_guc_log_init_early(struct intel_guc *guc)
+void intel_guc_log_init_early(struct intel_guc_log *log)
{
- mutex_init(&guc->log.runtime.relay_lock);
- INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
+ mutex_init(&log->relay.lock);
+ INIT_WORK(&log->relay.flush_work, capture_logs_work);
}
-int intel_guc_log_relay_create(struct intel_guc *guc)
+static int guc_log_relay_create(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
- if (!i915_modparams.guc_log_level)
- return 0;
-
- mutex_lock(&guc->log.runtime.relay_lock);
-
- GEM_BUG_ON(guc_log_has_relay(guc));
+ lockdep_assert_held(&log->relay.lock);
/* Keep the size of sub buffers same as shared log buffer */
subbuf_size = GUC_LOG_SIZE;
@@ -468,157 +407,56 @@ int intel_guc_log_relay_create(struct intel_guc *guc)
*/
n_subbufs = 8;
- /*
- * Create a relay channel, so that we have buffers for storing
- * the GuC firmware logs, the channel will be linked with a file
- * later on when debugfs is registered.
- */
- guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
- n_subbufs, &relay_callbacks, dev_priv);
+ guc_log_relay_chan = relay_open("guc_log",
+ dev_priv->drm.primary->debugfs_root,
+ subbuf_size, n_subbufs,
+ &relay_callbacks, dev_priv);
if (!guc_log_relay_chan) {
DRM_ERROR("Couldn't create relay chan for GuC logging\n");
ret = -ENOMEM;
- goto err;
+ return ret;
}
GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
- guc->log.runtime.relay_chan = guc_log_relay_chan;
-
- mutex_unlock(&guc->log.runtime.relay_lock);
+ log->relay.channel = guc_log_relay_chan;
return 0;
-
-err:
- mutex_unlock(&guc->log.runtime.relay_lock);
- /* logging will be off */
- i915_modparams.guc_log_level = 0;
- return ret;
-}
-
-void intel_guc_log_relay_destroy(struct intel_guc *guc)
-{
- mutex_lock(&guc->log.runtime.relay_lock);
-
- /*
- * It's possible that the relay was never allocated because
- * GuC log was disabled at the boot time.
- */
- if (!guc_log_has_relay(guc))
- goto out_unlock;
-
- relay_close(guc->log.runtime.relay_chan);
- guc->log.runtime.relay_chan = NULL;
-
-out_unlock:
- mutex_unlock(&guc->log.runtime.relay_lock);
}
-static int guc_log_late_setup(struct intel_guc *guc)
+static void guc_log_relay_destroy(struct intel_guc_log *log)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
-
- if (!guc_log_has_runtime(guc)) {
- /*
- * If log was disabled at boot time, then setup needed to handle
- * log buffer flush interrupts would not have been done yet, so
- * do that now.
- */
- ret = intel_guc_log_relay_create(guc);
- if (ret)
- goto err;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_get(dev_priv);
- ret = guc_log_runtime_create(guc);
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- if (ret)
- goto err_relay;
- }
-
- ret = guc_log_relay_file_create(guc);
- if (ret)
- goto err_runtime;
-
- return 0;
+ lockdep_assert_held(&log->relay.lock);
-err_runtime:
- mutex_lock(&dev_priv->drm.struct_mutex);
- guc_log_runtime_destroy(guc);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-err_relay:
- intel_guc_log_relay_destroy(guc);
-err:
- /* logging will remain off */
- i915_modparams.guc_log_level = 0;
- return ret;
+ relay_close(log->relay.channel);
+ log->relay.channel = NULL;
}
-static void guc_log_capture_logs(struct intel_guc *guc)
+static void guc_log_capture_logs(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- guc_read_update_log_buffer(guc);
+ guc_read_update_log_buffer(log);
/*
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
intel_runtime_pm_get(dev_priv);
- guc_log_flush_complete(guc);
- intel_runtime_pm_put(dev_priv);
-}
-
-static void guc_flush_logs(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
- return;
-
- /* First disable the interrupts, will be renabled afterwards */
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_get(dev_priv);
- gen9_disable_guc_interrupts(dev_priv);
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- /*
- * Before initiating the forceful flush, wait for any pending/ongoing
- * flush to complete otherwise forceful flush may not actually happen.
- */
- flush_work(&guc->log.runtime.flush_work);
-
- /* Ask GuC to update the log buffer state */
- intel_runtime_pm_get(dev_priv);
- guc_log_flush(guc);
+ guc_action_flush_log_complete(guc);
intel_runtime_pm_put(dev_priv);
-
- /* GuC would have updated log buffer by now, so capture it */
- guc_log_capture_logs(guc);
}
-int intel_guc_log_create(struct intel_guc *guc)
+int intel_guc_log_create(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
struct i915_vma *vma;
unsigned long offset;
u32 flags;
int ret;
- GEM_BUG_ON(guc->log.vma);
-
- /*
- * We require SSE 4.1 for fast reads from the GuC log buffer and
- * it should be present on the chipsets supporting GuC based
- * submisssions.
- */
- if (WARN_ON(!i915_has_memcpy_from_wc())) {
- ret = -EINVAL;
- goto err;
- }
+ GEM_BUG_ON(log->vma);
vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
if (IS_ERR(vma)) {
@@ -626,13 +464,7 @@ int intel_guc_log_create(struct intel_guc *guc)
goto err;
}
- guc->log.vma = vma;
-
- if (i915_modparams.guc_log_level) {
- ret = guc_log_runtime_create(guc);
- if (ret < 0)
- goto err_vma;
- }
+ log->vma = vma;
/* each allocated unit is a page */
flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
@@ -640,117 +472,159 @@ int intel_guc_log_create(struct intel_guc *guc)
(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
- offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
- guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ offset = intel_guc_ggtt_offset(guc, vma) >> PAGE_SHIFT;
+ log->flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
return 0;
-err_vma:
- i915_vma_unpin_and_release(&guc->log.vma);
err:
/* logging will be off */
i915_modparams.guc_log_level = 0;
return ret;
}
-void intel_guc_log_destroy(struct intel_guc *guc)
+void intel_guc_log_destroy(struct intel_guc_log *log)
+{
+ i915_vma_unpin_and_release(&log->vma);
+}
+
+int intel_guc_log_level_get(struct intel_guc_log *log)
{
- guc_log_runtime_destroy(guc);
- i915_vma_unpin_and_release(&guc->log.vma);
+ GEM_BUG_ON(!log->vma);
+ GEM_BUG_ON(i915_modparams.guc_log_level < 0);
+
+ return i915_modparams.guc_log_level;
}
-int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
+int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
{
+ struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- bool enable_logging = control_val > 0;
- u32 verbosity;
int ret;
- if (!guc->log.vma)
- return -ENODEV;
+ BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
+ GEM_BUG_ON(!log->vma);
+ GEM_BUG_ON(i915_modparams.guc_log_level < 0);
- BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN);
- if (control_val > 1 + GUC_LOG_VERBOSITY_MAX)
+ /*
+ * GuC is recognizing log levels starting from 0 to max, we're using 0
+ * as indication that logging should be disabled.
+ */
+ if (val < GUC_LOG_LEVEL_DISABLED || val > GUC_LOG_LEVEL_MAX)
return -EINVAL;
- /* This combination doesn't make sense & won't have any effect */
- if (!enable_logging && !i915_modparams.guc_log_level)
- return 0;
+ mutex_lock(&dev_priv->drm.struct_mutex);
- verbosity = enable_logging ? control_val - 1 : 0;
+ if (i915_modparams.guc_log_level == val) {
+ ret = 0;
+ goto out_unlock;
+ }
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
- ret = guc_log_control(guc, enable_logging, verbosity);
+ ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(val),
+ GUC_LOG_LEVEL_IS_ENABLED(val),
+ GUC_LOG_LEVEL_TO_VERBOSITY(val));
intel_runtime_pm_put(dev_priv);
+ if (ret) {
+ DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
+ goto out_unlock;
+ }
+
+ i915_modparams.guc_log_level = val;
+
+out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
- if (ret < 0) {
- DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
- return ret;
- }
+ return ret;
+}
- if (enable_logging) {
- i915_modparams.guc_log_level = 1 + verbosity;
+bool intel_guc_log_relay_enabled(const struct intel_guc_log *log)
+{
+ return log->relay.buf_addr;
+}
- /*
- * If log was disabled at boot time, then the relay channel file
- * wouldn't have been created by now and interrupts also would
- * not have been enabled. Try again now, just in case.
- */
- ret = guc_log_late_setup(guc);
- if (ret < 0) {
- DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
- return ret;
- }
+int intel_guc_log_relay_open(struct intel_guc_log *log)
+{
+ int ret;
- /* GuC logging is currently the only user of Guc2Host interrupts */
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_get(dev_priv);
- gen9_enable_guc_interrupts(dev_priv);
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- } else {
- /*
- * Once logging is disabled, GuC won't generate logs & send an
- * interrupt. But there could be some data in the log buffer
- * which is yet to be captured. So request GuC to update the log
- * buffer state and then collect the left over logs.
- */
- guc_flush_logs(guc);
+ mutex_lock(&log->relay.lock);
- /* As logging is disabled, update log level to reflect that */
- i915_modparams.guc_log_level = 0;
+ if (intel_guc_log_relay_enabled(log)) {
+ ret = -EEXIST;
+ goto out_unlock;
}
- return ret;
-}
+ /*
+ * We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submisssions.
+ */
+ if (!i915_has_memcpy_from_wc()) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
-void i915_guc_log_register(struct drm_i915_private *dev_priv)
-{
- if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
- return;
+ ret = guc_log_relay_create(log);
+ if (ret)
+ goto out_unlock;
+
+ ret = guc_log_map(log);
+ if (ret)
+ goto out_relay;
- guc_log_late_setup(&dev_priv->guc);
+ mutex_unlock(&log->relay.lock);
+
+ guc_log_enable_flush_events(log);
+
+ /*
+ * When GuC is logging without us relaying to userspace, we're ignoring
+ * the flush notification. This means that we need to unconditionally
+ * flush on relay enabling, since GuC only notifies us once.
+ */
+ queue_work(log->relay.flush_wq, &log->relay.flush_work);
+
+ return 0;
+
+out_relay:
+ guc_log_relay_destroy(log);
+out_unlock:
+ mutex_unlock(&log->relay.lock);
+
+ return ret;
}
-void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
+void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
+ /*
+ * Before initiating the forceful flush, wait for any pending/ongoing
+ * flush to complete otherwise forceful flush may not actually happen.
+ */
+ flush_work(&log->relay.flush_work);
- if (!USES_GUC_SUBMISSION(dev_priv))
- return;
+ intel_runtime_pm_get(i915);
+ guc_action_flush_log(guc);
+ intel_runtime_pm_put(i915);
- mutex_lock(&dev_priv->drm.struct_mutex);
- /* GuC logging is currently the only user of Guc2Host interrupts */
- intel_runtime_pm_get(dev_priv);
- gen9_disable_guc_interrupts(dev_priv);
- intel_runtime_pm_put(dev_priv);
+ /* GuC would have updated log buffer by now, so capture it */
+ guc_log_capture_logs(log);
+}
- guc_log_runtime_destroy(guc);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+void intel_guc_log_relay_close(struct intel_guc_log *log)
+{
+ guc_log_disable_flush_events(log);
+ flush_work(&log->relay.flush_work);
+
+ mutex_lock(&log->relay.lock);
+ GEM_BUG_ON(!intel_guc_log_relay_enabled(log));
+ guc_log_unmap(log);
+ guc_log_relay_destroy(log);
+ mutex_unlock(&log->relay.lock);
+}
- intel_guc_log_relay_destroy(guc);
+void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
+{
+ queue_work(log->relay.flush_wq, &log->relay.flush_work);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index dab0e949567a..fa80535a6f9d 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -25,11 +25,12 @@
#ifndef _INTEL_GUC_LOG_H_
#define _INTEL_GUC_LOG_H_
+#include <linux/mutex.h>
+#include <linux/relay.h>
#include <linux/workqueue.h>
#include "intel_guc_fwif.h"
-struct drm_i915_private;
struct intel_guc;
/*
@@ -39,33 +40,53 @@ struct intel_guc;
#define GUC_LOG_SIZE ((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \
1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT)
+/*
+ * While we're using plain log level in i915, GuC controls are much more...
+ * "elaborate"? We have a couple of bits for verbosity, separate bit for actual
+ * log enabling, and separate bit for default logging - which "conveniently"
+ * ignores the enable bit.
+ */
+#define GUC_LOG_LEVEL_DISABLED 0
+#define GUC_LOG_LEVEL_NON_VERBOSE 1
+#define GUC_LOG_LEVEL_IS_ENABLED(x) ((x) > GUC_LOG_LEVEL_DISABLED)
+#define GUC_LOG_LEVEL_IS_VERBOSE(x) ((x) > GUC_LOG_LEVEL_NON_VERBOSE)
+#define GUC_LOG_LEVEL_TO_VERBOSITY(x) ({ \
+ typeof(x) _x = (x); \
+ GUC_LOG_LEVEL_IS_VERBOSE(_x) ? _x - 2 : 0; \
+})
+#define GUC_VERBOSITY_TO_LOG_LEVEL(x) ((x) + 2)
+#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
+
struct intel_guc_log {
u32 flags;
struct i915_vma *vma;
- /* The runtime stuff gets created only when GuC logging gets enabled */
struct {
void *buf_addr;
struct workqueue_struct *flush_wq;
struct work_struct flush_work;
- struct rchan *relay_chan;
- /* To serialize the access to relay_chan */
- struct mutex relay_lock;
- } runtime;
+ struct rchan *channel;
+ struct mutex lock;
+ u32 full_count;
+ } relay;
/* logging related stats */
- u32 capture_miss_count;
- u32 flush_interrupt_count;
- u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
- u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
- u32 flush_count[GUC_MAX_LOG_BUFFER];
+ struct {
+ u32 sampled_overflow;
+ u32 overflow;
+ u32 flush;
+ } stats[GUC_MAX_LOG_BUFFER];
};
-int intel_guc_log_create(struct intel_guc *guc);
-void intel_guc_log_destroy(struct intel_guc *guc);
-void intel_guc_log_init_early(struct intel_guc *guc);
-int intel_guc_log_relay_create(struct intel_guc *guc);
-void intel_guc_log_relay_destroy(struct intel_guc *guc);
-int intel_guc_log_control(struct intel_guc *guc, u64 control_val);
-void i915_guc_log_register(struct drm_i915_private *dev_priv);
-void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
+void intel_guc_log_init_early(struct intel_guc_log *log);
+int intel_guc_log_create(struct intel_guc_log *log);
+void intel_guc_log_destroy(struct intel_guc_log *log);
+
+int intel_guc_log_level_get(struct intel_guc_log *log);
+int intel_guc_log_level_set(struct intel_guc_log *log, u64 control_val);
+bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
+int intel_guc_log_relay_open(struct intel_guc_log *log);
+void intel_guc_log_relay_flush(struct intel_guc_log *log);
+void intel_guc_log_relay_close(struct intel_guc_log *log);
+
+void intel_guc_log_handle_flush_event(struct intel_guc_log *log);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index 19a9247c5664..d86084742a4a 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -66,22 +66,20 @@
#define UOS_MOVE (1<<4)
#define START_DMA (1<<0)
#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
+#define GUC_WOPCM_OFFSET_VALID (1<<0)
#define HUC_LOADING_AGENT_VCR (0<<1)
#define HUC_LOADING_AGENT_GUC (1<<1)
-#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
+#define GUC_WOPCM_OFFSET_SHIFT 14
+#define GUC_WOPCM_OFFSET_MASK (0x3ffff << GUC_WOPCM_OFFSET_SHIFT)
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
#define HUC_STATUS2 _MMIO(0xD3B0)
#define HUC_FW_VERIFIED (1<<7)
-/* Defines WOPCM space available to GuC firmware */
#define GUC_WOPCM_SIZE _MMIO(0xc050)
-/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
-#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
-#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
-
-/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
-#define GUC_GGTT_TOP 0xFEE00000
+#define GUC_WOPCM_SIZE_LOCKED (1<<0)
+#define GUC_WOPCM_SIZE_SHIFT 12
+#define GUC_WOPCM_SIZE_MASK (0xfffff << GUC_WOPCM_SIZE_SHIFT)
#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 8a8ad2fe158d..2feb65096966 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -124,9 +124,17 @@ static int reserve_doorbell(struct intel_guc_client *client)
return 0;
}
+static bool has_doorbell(struct intel_guc_client *client)
+{
+ if (client->doorbell_id == GUC_DOORBELL_INVALID)
+ return false;
+
+ return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
+}
+
static void unreserve_doorbell(struct intel_guc_client *client)
{
- GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
+ GEM_BUG_ON(!has_doorbell(client));
__clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
client->doorbell_id = GUC_DOORBELL_INVALID;
@@ -184,14 +192,6 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
return client->vaddr + client->doorbell_offset;
}
-static bool has_doorbell(struct intel_guc_client *client)
-{
- if (client->doorbell_id == GUC_DOORBELL_INVALID)
- return false;
-
- return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
-}
-
static void __create_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *doorbell;
@@ -207,7 +207,6 @@ static void __destroy_doorbell(struct intel_guc_client *client)
struct guc_doorbell_info *doorbell;
u16 db_id = client->doorbell_id;
-
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_DISABLED;
doorbell->cookie = 0;
@@ -224,6 +223,9 @@ static int create_doorbell(struct intel_guc_client *client)
{
int ret;
+ if (WARN_ON(!has_doorbell(client)))
+ return -ENODEV; /* internal setup error, should never happen */
+
__update_doorbell_desc(client, client->doorbell_id);
__create_doorbell(client);
@@ -231,8 +233,8 @@ static int create_doorbell(struct intel_guc_client *client)
if (ret) {
__destroy_doorbell(client);
__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
- DRM_ERROR("Couldn't create client %u doorbell: %d\n",
- client->stage_id, ret);
+ DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
+ client->stage_id, ret);
return ret;
}
@@ -362,7 +364,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
- struct intel_context *ce = &ctx->engine[engine->id];
+ struct intel_context *ce = to_intel_context(ctx, engine);
u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
@@ -386,8 +388,8 @@ static void guc_stage_desc_init(struct intel_guc *guc,
lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
- lrc->ring_lrca =
- guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+ lrc->ring_lrca = intel_guc_ggtt_offset(guc, ce->state) +
+ LRC_STATE_PN * PAGE_SIZE;
/* XXX: In direct submission, the GuC wants the HW context id
* here. In proxy submission, it wants the stage id
@@ -395,7 +397,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
- lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
+ lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
@@ -411,7 +413,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
*/
- gfx_addr = guc_ggtt_offset(client->vma);
+ gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
@@ -584,7 +586,7 @@ static void inject_preempt_context(struct work_struct *work)
data[3] = engine->guc_id;
data[4] = guc->execbuf_client->priority;
data[5] = guc->execbuf_client->stage_id;
- data[6] = guc_ggtt_offset(guc->shared_data);
+ data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
execlists_clear_active(&engine->execlists,
@@ -657,7 +659,17 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
port_set(port, i915_request_get(rq));
}
-static void guc_dequeue(struct intel_engine_cs *engine)
+static inline int rq_prio(const struct i915_request *rq)
+{
+ return rq->sched.attr.priority;
+}
+
+static inline int port_prio(const struct execlist_port *port)
+{
+ return rq_prio(port_request(port));
+}
+
+static bool __guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
@@ -667,28 +679,29 @@ static void guc_dequeue(struct intel_engine_cs *engine)
bool submit = false;
struct rb_node *rb;
- spin_lock_irq(&engine->timeline->lock);
+ lockdep_assert_held(&engine->timeline.lock);
+
rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
if (port_isset(port)) {
- if (engine->i915->preempt_context) {
+ if (intel_engine_has_preemption(engine)) {
struct guc_preempt_work *preempt_work =
&engine->i915->guc.preempt_work[engine->id];
+ int prio = execlists->queue_priority;
- if (execlists->queue_priority >
- max(port_request(port)->priotree.priority, 0)) {
+ if (__execlists_need_preempt(prio, port_prio(port))) {
execlists_set_active(execlists,
EXECLISTS_ACTIVE_PREEMPT);
queue_work(engine->i915->guc.preempt_wq,
&preempt_work->work);
- goto unlock;
+ return false;
}
}
port++;
if (port_isset(port))
- goto unlock;
+ return false;
}
GEM_BUG_ON(port_isset(port));
@@ -696,11 +709,11 @@ static void guc_dequeue(struct intel_engine_cs *engine)
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
- list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
+ list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
if (last && rq->ctx != last->ctx) {
if (port == last_port) {
__list_del_many(&p->requests,
- &rq->priotree.link);
+ &rq->sched.link);
goto done;
}
@@ -709,7 +722,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
port++;
}
- INIT_LIST_HEAD(&rq->priotree.link);
+ INIT_LIST_HEAD(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
@@ -726,19 +739,34 @@ static void guc_dequeue(struct intel_engine_cs *engine)
done:
execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
execlists->first = rb;
- if (submit) {
+ if (submit)
port_assign(port, last);
- execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
- guc_submit(engine);
- }
+ if (last)
+ execlists_user_begin(execlists, execlists->port);
/* We must always keep the beast fed if we have work piled up */
GEM_BUG_ON(port_isset(execlists->port) &&
!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
-unlock:
- spin_unlock_irq(&engine->timeline->lock);
+ return submit;
+}
+
+static void guc_dequeue(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+ bool submit;
+
+ local_irq_save(flags);
+
+ spin_lock(&engine->timeline.lock);
+ submit = __guc_dequeue(engine);
+ spin_unlock(&engine->timeline.lock);
+
+ if (submit)
+ guc_submit(engine);
+
+ local_irq_restore(flags);
}
static void guc_submission_tasklet(unsigned long data)
@@ -748,17 +776,20 @@ static void guc_submission_tasklet(unsigned long data)
struct execlist_port *port = execlists->port;
struct i915_request *rq;
- rq = port_request(&port[0]);
+ rq = port_request(port);
while (rq && i915_request_completed(rq)) {
trace_i915_request_out(rq);
i915_request_put(rq);
- execlists_port_complete(execlists, port);
-
- rq = port_request(&port[0]);
+ port = execlists_port_complete(execlists, port);
+ if (port_isset(port)) {
+ execlists_user_begin(execlists, port);
+ rq = port_request(port);
+ } else {
+ execlists_user_end(execlists);
+ rq = NULL;
+ }
}
- if (!rq)
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
@@ -977,7 +1008,8 @@ static void guc_fill_preempt_context(struct intel_guc *guc)
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
- struct intel_context *ce = &client->owner->engine[id];
+ struct intel_context *ce =
+ to_intel_context(client->owner, engine);
u32 addr = intel_hws_preempt_done_address(engine);
u32 *cs;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index c8ea510629fa..d47e346bd49e 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -246,9 +246,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
*/
tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, BIT(engine->id),
- "Kicking stuck wait on %s",
- engine->name);
+ i915_handle_error(dev_priv, BIT(engine->id), 0,
+ "stuck wait on %s", engine->name);
I915_WRITE_CTL(engine, tmp);
return ENGINE_WAIT_KICK;
}
@@ -258,8 +257,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
default:
return ENGINE_DEAD;
case 1:
- i915_handle_error(dev_priv, ALL_ENGINES,
- "Kicking stuck semaphore on %s",
+ i915_handle_error(dev_priv, ALL_ENGINES, 0,
+ "stuck semaphore on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
return ENGINE_WAIT_KICK;
@@ -357,7 +356,7 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
break;
case ENGINE_DEAD:
- if (drm_debug & DRM_UT_DRIVER) {
+ if (GEM_SHOW_DEBUG()) {
struct drm_printer p = drm_debug_printer("hangcheck");
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
@@ -386,13 +385,13 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
if (stuck != hung)
hung &= ~stuck;
len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "No progress" : "Hang");
+ "%s on ", stuck == hung ? "no progress" : "hang");
for_each_engine_masked(engine, i915, hung, tmp)
len += scnprintf(msg + len, sizeof(msg) - len,
"%s, ", engine->name);
msg[len-2] = '\0';
- return i915_handle_error(i915, hung, "%s", msg);
+ return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg);
}
/*
@@ -453,6 +452,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
{
memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+ engine->hangcheck.action_timestamp = jiffies;
}
void intel_hangcheck_init(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 14ca5d3057a7..0cc6a861bcf8 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -37,6 +37,43 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
return 0;
}
+static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ enum i915_power_well_id id;
+ bool enabled = false;
+
+ /*
+ * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
+ * On all BXT+, SW can load the keys only when the PW#1 is turned on.
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ id = HSW_DISP_PW_GLOBAL;
+ else
+ id = SKL_DISP_PW_1;
+
+ mutex_lock(&power_domains->lock);
+
+ /* PG1 (power well #1) needs to be enabled */
+ for_each_power_well(dev_priv, power_well) {
+ if (power_well->id == id) {
+ enabled = power_well->ops->is_enabled(dev_priv,
+ power_well);
+ break;
+ }
+ }
+ mutex_unlock(&power_domains->lock);
+
+ /*
+ * Another req for hdcp key loadability is enabled state of pll for
+ * cdclk. Without active crtc we wont land here. So we are assuming that
+ * cdclk is already on.
+ */
+
+ return enabled;
+}
+
static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
{
I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
@@ -142,53 +179,17 @@ bool intel_hdcp_is_ksv_valid(u8 *ksv)
return true;
}
-/* Implements Part 2 of the HDCP authorization procedure */
static
-int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
- const struct intel_hdcp_shim *shim)
+int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim,
+ u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
struct drm_i915_private *dev_priv;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
- u8 bstatus[2], num_downstream, *ksv_fifo;
int ret, i, j, sha_idx;
dev_priv = intel_dig_port->base.base.dev->dev_private;
- ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
- if (ret) {
- DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
- return ret;
- }
-
- ret = shim->read_bstatus(intel_dig_port, bstatus);
- if (ret)
- return ret;
-
- if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
- DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
- DRM_ERROR("Max Topology Limit Exceeded\n");
- return -EPERM;
- }
-
- /*
- * When repeater reports 0 device count, HDCP1.4 spec allows disabling
- * the HDCP encryption. That implies that repeater can't have its own
- * display. As there is no consumption of encrypted content in the
- * repeater with 0 downstream devices, we are failing the
- * authentication.
- */
- num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
- if (num_downstream == 0)
- return -EINVAL;
-
- ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
- if (!ksv_fifo)
- return -ENOMEM;
-
- ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
- if (ret)
- return ret;
-
/* Process V' values from the receiver */
for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
@@ -353,7 +354,8 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
return ret;
sha_idx += sizeof(sha_text);
} else {
- DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers);
+ DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
+ sha_leftovers);
return -EINVAL;
}
@@ -381,17 +383,83 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE,
HDCP_SHA1_COMPLETE, 1)) {
- DRM_ERROR("Timed out waiting for SHA1 complete\n");
+ DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
- DRM_ERROR("SHA-1 mismatch, HDCP failed\n");
+ DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
return -ENXIO;
}
+ return 0;
+}
+
+/* Implements Part 2 of the HDCP authorization procedure */
+static
+int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ u8 bstatus[2], num_downstream, *ksv_fifo;
+ int ret, i, tries = 3;
+
+ ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
+ if (ret) {
+ DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
+ return ret;
+ }
+
+ ret = shim->read_bstatus(intel_dig_port, bstatus);
+ if (ret)
+ return ret;
+
+ if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
+ DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
+ DRM_ERROR("Max Topology Limit Exceeded\n");
+ return -EPERM;
+ }
+
+ /*
+ * When repeater reports 0 device count, HDCP1.4 spec allows disabling
+ * the HDCP encryption. That implies that repeater can't have its own
+ * display. As there is no consumption of encrypted content in the
+ * repeater with 0 downstream devices, we are failing the
+ * authentication.
+ */
+ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
+ if (num_downstream == 0)
+ return -EINVAL;
+
+ ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
+ if (!ksv_fifo)
+ return -ENOMEM;
+
+ ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
+ if (ret)
+ goto err;
+
+ /*
+ * When V prime mismatches, DP Spec mandates re-read of
+ * V prime atleast twice.
+ */
+ for (i = 0; i < tries; i++) {
+ ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
+ ksv_fifo, num_downstream,
+ bstatus);
+ if (!ret)
+ break;
+ }
+
+ if (i == tries) {
+ DRM_ERROR("V Prime validation failed.(%d)\n", ret);
+ goto err;
+ }
+
DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
num_downstream);
- return 0;
+ ret = 0;
+err:
+ kfree(ksv_fifo);
+ return ret;
}
/* Implements Part 1 of the HDCP authorization procedure */
@@ -506,15 +574,26 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
*/
wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
- ri.reg = 0;
- ret = shim->read_ri_prime(intel_dig_port, ri.shim);
- if (ret)
- return ret;
- I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+ tries = 3;
- /* Wait for Ri prime match */
- if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
+ /*
+ * DP HDCP Spec mandates the two more reattempt to read R0, incase
+ * of R0 mismatch.
+ */
+ for (i = 0; i < tries; i++) {
+ ri.reg = 0;
+ ret = shim->read_ri_prime(intel_dig_port, ri.shim);
+ if (ret)
+ return ret;
+ I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+
+ /* Wait for Ri prime match */
+ if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+ break;
+ }
+
+ if (i == tries) {
DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
I915_READ(PORT_HDCP_STATUS(port)));
return -ETIMEDOUT;
@@ -580,8 +659,8 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
connector->base.name, connector->base.base.id);
- if (!(I915_READ(SKL_FUSE_STATUS) & SKL_FUSE_PG_DIST_STATUS(1))) {
- DRM_ERROR("PG1 is disabled, cannot load keys\n");
+ if (!hdcp_key_loadable(dev_priv)) {
+ DRM_ERROR("HDCP key Load is not possible\n");
return -ENXIO;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1baef4ac7ecb..d8cb53ef4351 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool force_dvi =
READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
clock = mode->clock;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
if (pipe_config->has_hdmi_sink)
@@ -2082,41 +2088,33 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
* it enables scrambling. This should be called before enabling the HDMI
* 2.0 port, as the sink can choose to disable the scrambling if it doesn't
* detect a scrambled clock within 100 ms.
+ *
+ * Returns:
+ * True on success, false on failure.
*/
-void intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
+bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
struct drm_connector *connector,
bool high_tmds_clock_ratio,
bool scrambling)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct drm_scrambling *sink_scrambling =
- &connector->display_info.hdmi.scdc.scrambling;
- struct i2c_adapter *adptr = intel_gmbus_get_adapter(dev_priv,
- intel_hdmi->ddc_bus);
- bool ret;
+ &connector->display_info.hdmi.scdc.scrambling;
+ struct i2c_adapter *adapter =
+ intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
if (!sink_scrambling->supported)
- return;
-
- DRM_DEBUG_KMS("Setting sink scrambling for enc:%s connector:%s\n",
- encoder->base.name, connector->name);
+ return true;
- /* Set TMDS bit clock ratio to 1/40 or 1/10 */
- ret = drm_scdc_set_high_tmds_clock_ratio(adptr, high_tmds_clock_ratio);
- if (!ret) {
- DRM_ERROR("Set TMDS ratio failed\n");
- return;
- }
-
- /* Enable/disable sink scrambling */
- ret = drm_scdc_set_scrambling(adptr, scrambling);
- if (!ret) {
- DRM_ERROR("Set sink scrambling failed\n");
- return;
- }
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
+ connector->base.id, connector->name,
+ yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
- DRM_DEBUG_KMS("sink scrambling handled\n");
+ /* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
+ return drm_scdc_set_high_tmds_clock_ratio(adapter,
+ high_tmds_clock_ratio) &&
+ drm_scdc_set_scrambling(adapter, scrambling);
}
static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 0e3d3e89d66a..43aa92beff2a 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -100,6 +100,8 @@ enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
if (IS_CNL_WITH_PORT_F(dev_priv))
return PORT_F;
return PORT_E;
+ case HPD_PORT_F:
+ return PORT_F;
default:
return PORT_NONE; /* no port for this pin */
}
@@ -132,6 +134,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
case PORT_F:
if (IS_CNL_WITH_PORT_F(dev_priv))
return HPD_PORT_E;
+ return HPD_PORT_F;
default:
MISSING_CASE(port);
return HPD_NONE;
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 65e2afb9b955..291285277403 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -55,7 +55,7 @@ int intel_huc_auth(struct intel_huc *huc)
return -ENOEXEC;
vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ PIN_OFFSET_BIAS | guc->ggtt_pin_bias);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
@@ -63,7 +63,8 @@ int intel_huc_auth(struct intel_huc *huc)
}
ret = intel_guc_auth_huc(guc,
- guc_ggtt_offset(vma) + huc->fw.rsa_offset);
+ intel_guc_ggtt_offset(guc, vma) +
+ huc->fw.rsa_offset);
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
goto fail_unpin;
@@ -91,3 +92,28 @@ fail:
DRM_ERROR("HuC: Authentication failed %d\n", ret);
return ret;
}
+
+/**
+ * intel_huc_check_status() - check HuC status
+ * @huc: intel_huc structure
+ *
+ * This function reads status register to verify if HuC
+ * firmware was successfully loaded.
+ *
+ * Returns positive value if HuC firmware is loaded and verified
+ * and -ENODEV if HuC is not present.
+ */
+int intel_huc_check_status(struct intel_huc *huc)
+{
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
+ u32 status;
+
+ if (!HAS_HUC(dev_priv))
+ return -ENODEV;
+
+ intel_runtime_pm_get(dev_priv);
+ status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
+ intel_runtime_pm_put(dev_priv);
+
+ return status;
+}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index 5d6e804f9771..aa854907abac 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -37,5 +37,12 @@ struct intel_huc {
void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
+int intel_huc_check_status(struct intel_huc *huc);
+
+static inline int intel_huc_sanitize(struct intel_huc *huc)
+{
+ intel_uc_fw_sanitize(&huc->fw);
+ return 0;
+}
#endif
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index c66afa9b989a..f93d2384d482 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -118,7 +118,8 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Set the source address for the uCode */
- offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
+ offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
+ huc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
@@ -154,9 +155,8 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
* Called from intel_uc_init_hw() during driver load, resume from sleep and
* after a GPU reset. Note that HuC must be loaded before GuC.
*
- * The firmware image should have already been fetched into memory by the
- * earlier call to intel_uc_init_fw(), so here we need to only check that
- * fetch succeeded, and then transfer the image to the h/w.
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 8704f7f8d072..7c4c8fb1dae4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -139,6 +139,7 @@
#include "i915_gem_render_state.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
+#include "intel_workarounds.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
@@ -176,14 +177,16 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
static inline int rq_prio(const struct i915_request *rq)
{
- return rq->priotree.priority;
+ return rq->sched.attr.priority;
}
static inline bool need_preempt(const struct intel_engine_cs *engine,
const struct i915_request *last,
int prio)
{
- return engine->i915->preempt_context && prio > max(rq_prio(last), 0);
+ return (intel_engine_has_preemption(engine) &&
+ __execlists_need_preempt(prio, rq_prio(last)) &&
+ !i915_request_completed(last));
}
/**
@@ -221,7 +224,7 @@ static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- struct intel_context *ce = &ctx->engine[engine->id];
+ struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -255,9 +258,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
}
static struct i915_priolist *
-lookup_priolist(struct intel_engine_cs *engine,
- struct i915_priotree *pt,
- int prio)
+lookup_priolist(struct intel_engine_cs *engine, int prio)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_priolist *p;
@@ -328,10 +329,10 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
struct i915_priolist *uninitialized_var(p);
int last_prio = I915_PRIORITY_INVALID;
- lockdep_assert_held(&engine->timeline->lock);
+ lockdep_assert_held(&engine->timeline.lock);
list_for_each_entry_safe_reverse(rq, rn,
- &engine->timeline->requests,
+ &engine->timeline.requests,
link) {
if (i915_request_completed(rq))
return;
@@ -342,10 +343,11 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != last_prio) {
last_prio = rq_prio(rq);
- p = lookup_priolist(engine, &rq->priotree, last_prio);
+ p = lookup_priolist(engine, last_prio);
}
- list_add(&rq->priotree.link, &p->requests);
+ GEM_BUG_ON(p->priority != rq_prio(rq));
+ list_add(&rq->sched.link, &p->requests);
}
}
@@ -354,10 +356,13 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
- spin_lock_irq(&engine->timeline->lock);
__unwind_incomplete_requests(engine);
- spin_unlock_irq(&engine->timeline->lock);
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static inline void
@@ -374,6 +379,19 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq);
}
+inline void
+execlists_user_begin(struct intel_engine_execlists *execlists,
+ const struct execlist_port *port)
+{
+ execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
+}
+
+inline void
+execlists_user_end(struct intel_engine_execlists *execlists)
+{
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+}
+
static inline void
execlists_context_schedule_in(struct i915_request *rq)
{
@@ -382,10 +400,11 @@ execlists_context_schedule_in(struct i915_request *rq)
}
static inline void
-execlists_context_schedule_out(struct i915_request *rq)
+execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
{
intel_engine_context_out(rq->engine);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ execlists_context_status_change(rq, status);
+ trace_i915_request_out(rq);
}
static void
@@ -399,7 +418,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct i915_request *rq)
{
- struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
+ struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
struct i915_hw_ppgtt *ppgtt =
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
@@ -454,10 +473,12 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
- GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x, prio=%d\n",
+ GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
engine->name, n,
port[n].context_id, count,
rq->global_seqno,
+ rq->fence.context, rq->fence.seqno,
+ intel_engine_get_seqno(engine),
rq_prio(rq));
} else {
GEM_BUG_ON(!n);
@@ -506,7 +527,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
struct intel_context *ce =
- &engine->i915->preempt_context->engine[engine->id];
+ to_intel_context(engine->i915->preempt_context, engine);
unsigned int n;
GEM_BUG_ON(execlists->preempt_complete_status !=
@@ -535,7 +556,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
}
-static void execlists_dequeue(struct intel_engine_cs *engine)
+static bool __execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
@@ -545,6 +566,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct rb_node *rb;
bool submit = false;
+ lockdep_assert_held(&engine->timeline.lock);
+
/* Hardware submission is through 2 ports. Conceptually each port
* has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
* static for a context, and unique to each, so we only execute
@@ -566,7 +589,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- spin_lock_irq(&engine->timeline->lock);
rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
@@ -581,7 +603,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
EXECLISTS_ACTIVE_USER));
GEM_BUG_ON(!port_count(&port[0]));
if (port_count(&port[0]) > 1)
- goto unlock;
+ return false;
/*
* If we write to ELSP a second time before the HW has had
@@ -591,11 +613,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the HW to indicate that it has had a chance to respond.
*/
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
- goto unlock;
+ return false;
if (need_preempt(engine, last, execlists->queue_priority)) {
inject_preempt_context(engine);
- goto unlock;
+ return false;
}
/*
@@ -620,7 +642,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* priorities of the ports haven't been switch.
*/
if (port_count(&port[1]))
- goto unlock;
+ return false;
/*
* WaIdleLiteRestore:bdw,skl
@@ -637,7 +659,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
- list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
+ list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -657,7 +679,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
if (port == last_port) {
__list_del_many(&p->requests,
- &rq->priotree.link);
+ &rq->sched.link);
goto done;
}
@@ -671,7 +693,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (ctx_single_port_submission(last->ctx) ||
ctx_single_port_submission(rq->ctx)) {
__list_del_many(&p->requests,
- &rq->priotree.link);
+ &rq->sched.link);
goto done;
}
@@ -684,7 +706,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(port_isset(port));
}
- INIT_LIST_HEAD(&rq->priotree.link);
+ INIT_LIST_HEAD(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
last = rq;
@@ -697,8 +719,27 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
+
done:
- execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
+ /*
+ * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
+ *
+ * We choose queue_priority such that if we add a request of greater
+ * priority than this, we kick the submission tasklet to decide on
+ * the right order of submitting the requests to hardware. We must
+ * also be prepared to reorder requests as they are in-flight on the
+ * HW. We derive the queue_priority then as the first "hole" in
+ * the HW submission ports and if there are no available slots,
+ * the priority of the lowest executing request, i.e. last.
+ *
+ * When we do receive a higher priority request ready to run from the
+ * user, see queue_request(), the queue_priority is bumped to that
+ * request triggering preemption on the next dequeue (or subsequent
+ * interrupt for secondary ports).
+ */
+ execlists->queue_priority =
+ port != execlists->port ? rq_prio(last) : INT_MIN;
+
execlists->first = rb;
if (submit)
port_assign(port, last);
@@ -706,13 +747,25 @@ done:
/* We must always keep the beast fed if we have work piled up */
GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
-unlock:
- spin_unlock_irq(&engine->timeline->lock);
+ /* Re-evaluate the executing context setup after each preemptive kick */
+ if (last)
+ execlists_user_begin(execlists, execlists->port);
+
+ return submit;
+}
- if (submit) {
- execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ unsigned long flags;
+ bool submit;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ submit = __execlists_dequeue(engine);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ if (submit)
execlists_submit_ports(engine);
- }
GEM_BUG_ON(port_isset(execlists->port) &&
!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
@@ -727,13 +780,18 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
while (num_ports-- && port_isset(port)) {
struct i915_request *rq = port_request(port);
- GEM_BUG_ON(!execlists->active);
- intel_engine_context_out(rq->engine);
+ GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n",
+ rq->engine->name,
+ (unsigned int)(port - execlists->port),
+ rq->global_seqno,
+ rq->fence.context, rq->fence.seqno,
+ intel_engine_get_seqno(rq->engine));
- execlists_context_status_change(rq,
- i915_request_completed(rq) ?
- INTEL_CONTEXT_SCHEDULE_OUT :
- INTEL_CONTEXT_SCHEDULE_PREEMPTED);
+ GEM_BUG_ON(!execlists->active);
+ execlists_context_schedule_out(rq,
+ i915_request_completed(rq) ?
+ INTEL_CONTEXT_SCHEDULE_OUT :
+ INTEL_CONTEXT_SCHEDULE_PREEMPTED);
i915_request_put(rq);
@@ -741,7 +799,82 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
port++;
}
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+ execlists_user_end(execlists);
+}
+
+static void clear_gtiir(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int i;
+
+ /*
+ * Clear any pending interrupt state.
+ *
+ * We do it twice out of paranoia that some of the IIR are
+ * double buffered, and so if we only reset it once there may
+ * still be an interrupt pending.
+ */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ static const struct {
+ u8 bank;
+ u8 bit;
+ } gen11_gtiir[] = {
+ [RCS] = {0, GEN11_RCS0},
+ [BCS] = {0, GEN11_BCS},
+ [_VCS(0)] = {1, GEN11_VCS(0)},
+ [_VCS(1)] = {1, GEN11_VCS(1)},
+ [_VCS(2)] = {1, GEN11_VCS(2)},
+ [_VCS(3)] = {1, GEN11_VCS(3)},
+ [_VECS(0)] = {1, GEN11_VECS(0)},
+ [_VECS(1)] = {1, GEN11_VECS(1)},
+ };
+ unsigned long irqflags;
+
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir));
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ for (i = 0; i < 2; i++) {
+ gen11_reset_one_iir(dev_priv,
+ gen11_gtiir[engine->id].bank,
+ gen11_gtiir[engine->id].bit);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ } else {
+ static const u8 gtiir[] = {
+ [RCS] = 0,
+ [BCS] = 0,
+ [VCS] = 1,
+ [VCS2] = 1,
+ [VECS] = 3,
+ };
+
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
+
+ for (i = 0; i < 2; i++) {
+ I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+ engine->irq_keep_mask);
+ POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
+ }
+ GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
+ engine->irq_keep_mask);
+ }
+}
+
+static void reset_irq(struct intel_engine_cs *engine)
+{
+ /* Mark all CS interrupts as complete */
+ smp_store_mb(engine->execlists.active, 0);
+ synchronize_hardirq(engine->i915->drm.irq);
+
+ clear_gtiir(engine);
+
+ /*
+ * The port is checked prior to scheduling a tasklet, but
+ * just in case we have suspended the tasklet to do the
+ * wedging make sure that when it wakes, it decides there
+ * is no work to do by clearing the irq_posted bit.
+ */
+ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
}
static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -751,7 +884,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
struct rb_node *rb;
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ GEM_TRACE("%s current %d\n",
+ engine->name, intel_engine_get_seqno(engine));
/*
* Before we call engine->cancel_requests(), we should have exclusive
@@ -771,11 +905,12 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Cancel the requests on the HW and clear the ELSP tracker. */
execlists_cancel_port_requests(execlists);
+ reset_irq(engine);
- spin_lock(&engine->timeline->lock);
+ spin_lock(&engine->timeline.lock);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->timeline->requests, link) {
+ list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
if (!i915_request_completed(rq))
dma_fence_set_error(&rq->fence, -EIO);
@@ -786,8 +921,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
while (rb) {
struct i915_priolist *p = to_priolist(rb);
- list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
- INIT_LIST_HEAD(&rq->priotree.link);
+ list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+ INIT_LIST_HEAD(&rq->sched.link);
dma_fence_set_error(&rq->fence, -EIO);
__i915_request_submit(rq);
@@ -807,18 +942,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
execlists->first = NULL;
GEM_BUG_ON(port_isset(execlists->port));
- spin_unlock(&engine->timeline->lock);
-
- /*
- * The port is checked prior to scheduling a tasklet, but
- * just in case we have suspended the tasklet to do the
- * wedging make sure that when it wakes, it decides there
- * is no work to do by clearing the irq_posted bit.
- */
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-
- /* Mark all CS interrupts as complete */
- execlists->active = 0;
+ spin_unlock(&engine->timeline.lock);
local_irq_restore(flags);
}
@@ -831,7 +955,7 @@ static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port * const port = execlists->port;
+ struct execlist_port *port = execlists->port;
struct drm_i915_private *dev_priv = engine->i915;
bool fw = false;
@@ -959,10 +1083,13 @@ static void execlists_submission_tasklet(unsigned long data)
EXECLISTS_ACTIVE_USER));
rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x, prio=%d\n",
+ GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
engine->name,
port->context_id, count,
rq ? rq->global_seqno : 0,
+ rq ? rq->fence.context : 0,
+ rq ? rq->fence.seqno : 0,
+ intel_engine_get_seqno(engine),
rq ? rq_prio(rq) : 0);
/* Check the context/desc id for this event matches */
@@ -970,28 +1097,43 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_BUG_ON(count == 0);
if (--count == 0) {
+ /*
+ * On the final event corresponding to the
+ * submission of this context, we expect either
+ * an element-switch event or a completion
+ * event (and on completion, the active-idle
+ * marker). No more preemptions, lite-restore
+ * or otherwise.
+ */
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
GEM_BUG_ON(port_isset(&port[1]) &&
!(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
+ GEM_BUG_ON(!port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
+
+ /*
+ * We rely on the hardware being strongly
+ * ordered, that the breadcrumb write is
+ * coherent (visible from the CPU) before the
+ * user interrupt and CSB is processed.
+ */
GEM_BUG_ON(!i915_request_completed(rq));
- execlists_context_schedule_out(rq);
- trace_i915_request_out(rq);
+
+ execlists_context_schedule_out(rq,
+ INTEL_CONTEXT_SCHEDULE_OUT);
i915_request_put(rq);
GEM_TRACE("%s completed ctx=%d\n",
engine->name, port->context_id);
- execlists_port_complete(execlists, port);
+ port = execlists_port_complete(execlists, port);
+ if (port_isset(port))
+ execlists_user_begin(execlists, port);
+ else
+ execlists_user_end(execlists);
} else {
port_set(port, port_pack(rq, count));
}
-
- /* After the final element, the hw should be idle */
- GEM_BUG_ON(port_count(port) == 0 &&
- !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
- if (port_count(port) == 0)
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_USER);
}
if (head != execlists->csb_head) {
@@ -1014,18 +1156,23 @@ static void execlists_submission_tasklet(unsigned long data)
}
static void queue_request(struct intel_engine_cs *engine,
- struct i915_priotree *pt,
+ struct i915_sched_node *node,
int prio)
{
- list_add_tail(&pt->link, &lookup_priolist(engine, pt, prio)->requests);
+ list_add_tail(&node->link,
+ &lookup_priolist(engine, prio)->requests);
+}
+
+static void __submit_queue(struct intel_engine_cs *engine, int prio)
+{
+ engine->execlists.queue_priority = prio;
+ tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void submit_queue(struct intel_engine_cs *engine, int prio)
{
- if (prio > engine->execlists.queue_priority) {
- engine->execlists.queue_priority = prio;
- tasklet_hi_schedule(&engine->execlists.tasklet);
- }
+ if (prio > engine->execlists.queue_priority)
+ __submit_queue(engine, prio);
}
static void execlists_submit_request(struct i915_request *request)
@@ -1034,42 +1181,45 @@ static void execlists_submit_request(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
- queue_request(engine, &request->priotree, rq_prio(request));
+ queue_request(engine, &request->sched, rq_prio(request));
submit_queue(engine, rq_prio(request));
GEM_BUG_ON(!engine->execlists.first);
- GEM_BUG_ON(list_empty(&request->priotree.link));
+ GEM_BUG_ON(list_empty(&request->sched.link));
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static struct i915_request *pt_to_request(struct i915_priotree *pt)
+static struct i915_request *sched_to_request(struct i915_sched_node *node)
{
- return container_of(pt, struct i915_request, priotree);
+ return container_of(node, struct i915_request, sched);
}
static struct intel_engine_cs *
-pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
+sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
{
- struct intel_engine_cs *engine = pt_to_request(pt)->engine;
+ struct intel_engine_cs *engine = sched_to_request(node)->engine;
GEM_BUG_ON(!locked);
if (engine != locked) {
- spin_unlock(&locked->timeline->lock);
- spin_lock(&engine->timeline->lock);
+ spin_unlock(&locked->timeline.lock);
+ spin_lock(&engine->timeline.lock);
}
return engine;
}
-static void execlists_schedule(struct i915_request *request, int prio)
+static void execlists_schedule(struct i915_request *request,
+ const struct i915_sched_attr *attr)
{
- struct intel_engine_cs *engine;
+ struct i915_priolist *uninitialized_var(pl);
+ struct intel_engine_cs *engine, *last;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
+ const int prio = attr->priority;
LIST_HEAD(dfs);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
@@ -1077,23 +1227,23 @@ static void execlists_schedule(struct i915_request *request, int prio)
if (i915_request_completed(request))
return;
- if (prio <= READ_ONCE(request->priotree.priority))
+ if (prio <= READ_ONCE(request->sched.attr.priority))
return;
/* Need BKL in order to use the temporary link inside i915_dependency */
lockdep_assert_held(&request->i915->drm.struct_mutex);
- stack.signaler = &request->priotree;
+ stack.signaler = &request->sched;
list_add(&stack.dfs_link, &dfs);
/*
* Recursively bump all dependent priorities to match the new request.
*
* A naive approach would be to use recursion:
- * static void update_priorities(struct i915_priotree *pt, prio) {
- * list_for_each_entry(dep, &pt->signalers_list, signal_link)
+ * static void update_priorities(struct i915_sched_node *node, prio) {
+ * list_for_each_entry(dep, &node->signalers_list, signal_link)
* update_priorities(dep->signal, prio)
- * queue_request(pt);
+ * queue_request(node);
* }
* but that may have unlimited recursion depth and so runs a very
* real risk of overunning the kernel stack. Instead, we build
@@ -1105,7 +1255,7 @@ static void execlists_schedule(struct i915_request *request, int prio)
* last element in the list is the request we must execute first.
*/
list_for_each_entry(dep, &dfs, dfs_link) {
- struct i915_priotree *pt = dep->signaler;
+ struct i915_sched_node *node = dep->signaler;
/*
* Within an engine, there can be no cycle, but we may
@@ -1113,14 +1263,14 @@ static void execlists_schedule(struct i915_request *request, int prio)
* (redundant dependencies are not eliminated) and across
* engines.
*/
- list_for_each_entry(p, &pt->signalers_list, signal_link) {
+ list_for_each_entry(p, &node->signalers_list, signal_link) {
GEM_BUG_ON(p == dep); /* no cycles! */
- if (i915_priotree_signaled(p->signaler))
+ if (i915_sched_node_signaled(p->signaler))
continue;
- GEM_BUG_ON(p->signaler->priority < pt->priority);
- if (prio > READ_ONCE(p->signaler->priority))
+ GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
+ if (prio > READ_ONCE(p->signaler->attr.priority))
list_move_tail(&p->dfs_link, &dfs);
}
}
@@ -1131,37 +1281,45 @@ static void execlists_schedule(struct i915_request *request, int prio)
* execlists_submit_request()), we can set our own priority and skip
* acquiring the engine locks.
*/
- if (request->priotree.priority == I915_PRIORITY_INVALID) {
- GEM_BUG_ON(!list_empty(&request->priotree.link));
- request->priotree.priority = prio;
+ if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
+ GEM_BUG_ON(!list_empty(&request->sched.link));
+ request->sched.attr = *attr;
if (stack.dfs_link.next == stack.dfs_link.prev)
return;
__list_del_entry(&stack.dfs_link);
}
+ last = NULL;
engine = request->engine;
- spin_lock_irq(&engine->timeline->lock);
+ spin_lock_irq(&engine->timeline.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
- struct i915_priotree *pt = dep->signaler;
+ struct i915_sched_node *node = dep->signaler;
INIT_LIST_HEAD(&dep->dfs_link);
- engine = pt_lock_engine(pt, engine);
+ engine = sched_lock_engine(node, engine);
- if (prio <= pt->priority)
+ if (prio <= node->attr.priority)
continue;
- pt->priority = prio;
- if (!list_empty(&pt->link)) {
- __list_del_entry(&pt->link);
- queue_request(engine, pt, prio);
+ node->attr.priority = prio;
+ if (!list_empty(&node->link)) {
+ if (last != engine) {
+ pl = lookup_priolist(engine, prio);
+ last = engine;
+ }
+ GEM_BUG_ON(pl->priority != prio);
+ list_move_tail(&node->link, &pl->requests);
}
- submit_queue(engine, prio);
+
+ if (prio > engine->execlists.queue_priority &&
+ i915_sw_fence_done(&sched_to_request(node)->submit))
+ __submit_queue(engine, prio);
}
- spin_unlock_irq(&engine->timeline->lock);
+ spin_unlock_irq(&engine->timeline.lock);
}
static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
@@ -1191,7 +1349,7 @@ static struct intel_ring *
execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
- struct intel_context *ce = &ctx->engine[engine->id];
+ struct intel_context *ce = to_intel_context(ctx, engine);
void *vaddr;
int ret;
@@ -1225,6 +1383,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
+ ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
@@ -1243,7 +1402,7 @@ err:
static void execlists_context_unpin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
- struct intel_context *ce = &ctx->engine[engine->id];
+ struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(ce->pin_count == 0);
@@ -1262,8 +1421,8 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
static int execlists_request_alloc(struct i915_request *request)
{
- struct intel_engine_cs *engine = request->engine;
- struct intel_context *ce = &request->ctx->engine[engine->id];
+ struct intel_context *ce =
+ to_intel_context(request->ctx, request->engine);
int ret;
GEM_BUG_ON(!ce->pin_count);
@@ -1386,11 +1545,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ *batch++ = MI_LOAD_REGISTER_IMM(3);
+
/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
- *batch++ = MI_LOAD_REGISTER_IMM(1);
*batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
*batch++ = _MASKED_BIT_DISABLE(
GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+
+ /* BSpec: 11391 */
+ *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
+ *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
+
+ /* BSpec: 11299 */
+ *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
+ *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
+
*batch++ = MI_NOOP;
/* WaClearSlmSpaceAtContextSwitch:kbl */
@@ -1523,6 +1692,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
+ case 11:
+ return 0;
case 10:
wa_bb_fn[0] = gen10_init_indirectctx_bb;
wa_bb_fn[1] = NULL;
@@ -1575,14 +1746,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
-static u8 gtiir[] = {
- [RCS] = 0,
- [BCS] = 0,
- [VCS] = 1,
- [VCS2] = 1,
- [VECS] = 3,
-};
-
static void enable_execlists(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1642,6 +1805,8 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
+ intel_whitelist_workarounds_apply(engine);
+
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
@@ -1652,7 +1817,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- return init_workarounds_ring(engine);
+ return 0;
}
static int gen9_init_render_ring(struct intel_engine_cs *engine)
@@ -1663,49 +1828,25 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- return init_workarounds_ring(engine);
-}
-
-static void reset_irq(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int i;
-
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
+ intel_whitelist_workarounds_apply(engine);
- /*
- * Clear any pending interrupt state.
- *
- * We do it twice out of paranoia that some of the IIR are double
- * buffered, and if we only reset it once there may still be
- * an interrupt pending.
- */
- for (i = 0; i < 2; i++) {
- I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
- GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
- POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
- }
- GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
- (GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift));
-
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ return 0;
}
static void reset_common_ring(struct intel_engine_cs *engine,
struct i915_request *request)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct intel_context *ce;
unsigned long flags;
+ u32 *regs;
- GEM_TRACE("%s seqno=%x\n",
- engine->name, request ? request->global_seqno : 0);
+ GEM_TRACE("%s request global=%x, current=%d\n",
+ engine->name, request ? request->global_seqno : 0,
+ intel_engine_get_seqno(engine));
/* See execlists_cancel_requests() for the irq/spinlock split. */
local_irq_save(flags);
- reset_irq(engine);
-
/*
* Catch up with any missed context-switch interrupts.
*
@@ -1716,14 +1857,12 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* requests were completed.
*/
execlists_cancel_port_requests(execlists);
+ reset_irq(engine);
/* Push back any incomplete requests for replay after the reset. */
- spin_lock(&engine->timeline->lock);
+ spin_lock(&engine->timeline.lock);
__unwind_incomplete_requests(engine);
- spin_unlock(&engine->timeline->lock);
-
- /* Mark all CS interrupts as complete */
- execlists->active = 0;
+ spin_unlock(&engine->timeline.lock);
local_irq_restore(flags);
@@ -1749,14 +1888,24 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- ce = &request->ctx->engine[engine->id];
- execlists_init_reg_state(ce->lrc_reg_state,
- request->ctx, engine, ce->ring);
+ regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
+ if (engine->default_state) {
+ void *defaults;
+
+ defaults = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (!IS_ERR(defaults)) {
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ defaults + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+ i915_gem_object_unpin_map(engine->default_state);
+ }
+ }
+ execlists_init_reg_state(regs, request->ctx, engine, request->ring);
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
- ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
- i915_ggtt_offset(ce->ring->vma);
- ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
+ regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
+ regs[CTX_RING_HEAD + 1] = request->postfix;
request->ring->head = request->postfix;
intel_ring_update_space(request->ring);
@@ -1817,7 +1966,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
}
- cs = intel_ring_begin(rq, 4);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1846,6 +1995,9 @@ static int gen8_emit_bb_start(struct i915_request *rq,
(flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
@@ -1988,7 +2140,7 @@ static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
cs = gen8_emit_ggtt_write(cs, request->global_seqno,
intel_hws_seqno_address(request->engine));
*cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
@@ -2004,7 +2156,7 @@ static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
intel_hws_seqno_address(request->engine));
*cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
@@ -2016,7 +2168,7 @@ static int gen8_init_rcs_context(struct i915_request *rq)
{
int ret;
- ret = intel_ring_workarounds_emit(rq);
+ ret = intel_ctx_workarounds_emit(rq);
if (ret)
return ret;
@@ -2076,11 +2228,13 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->unpark = NULL;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+ if (engine->i915->preempt_context)
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
engine->i915->caps.scheduler =
I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY;
- if (engine->i915->preempt_context)
+ if (intel_engine_has_preemption(engine))
engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
}
@@ -2119,7 +2273,20 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
static inline void
logical_ring_default_irqs(struct intel_engine_cs *engine)
{
- unsigned shift = engine->irq_shift;
+ unsigned int shift = 0;
+
+ if (INTEL_GEN(engine->i915) < 11) {
+ const u8 irq_shifts[] = {
+ [RCS] = GEN8_RCS_IRQ_SHIFT,
+ [BCS] = GEN8_BCS_IRQ_SHIFT,
+ [VCS] = GEN8_VCS1_IRQ_SHIFT,
+ [VCS2] = GEN8_VCS2_IRQ_SHIFT,
+ [VECS] = GEN8_VECS_IRQ_SHIFT,
+ };
+
+ shift = irq_shifts[engine->id];
+ }
+
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
@@ -2175,9 +2342,13 @@ static int logical_ring_init(struct intel_engine_cs *engine)
}
engine->execlists.preempt_complete_status = ~0u;
- if (engine->i915->preempt_context)
+ if (engine->i915->preempt_context) {
+ struct intel_context *ce =
+ to_intel_context(engine->i915->preempt_context, engine);
+
engine->execlists.preempt_complete_status =
- upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);
+ upper_32_bits(ce->lrc_desc);
+ }
return 0;
@@ -2431,8 +2602,10 @@ populate_lr_context(struct i915_gem_context *ctx,
defaults = i915_gem_object_pin_map(engine->default_state,
I915_MAP_WB);
- if (IS_ERR(defaults))
- return PTR_ERR(defaults);
+ if (IS_ERR(defaults)) {
+ ret = PTR_ERR(defaults);
+ goto err_unpin_ctx;
+ }
memcpy(vaddr + start, defaults + start, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
@@ -2450,19 +2623,20 @@ populate_lr_context(struct i915_gem_context *ctx,
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
+err_unpin_ctx:
i915_gem_object_unpin_map(ctx_obj);
-
- return 0;
+ return ret;
}
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
- struct intel_context *ce = &ctx->engine[engine->id];
+ struct intel_context *ce = to_intel_context(ctx, engine);
struct i915_vma *vma;
uint32_t context_size;
struct intel_ring *ring;
+ struct i915_timeline *timeline;
int ret;
if (ce->state)
@@ -2477,10 +2651,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
- if (IS_ERR(ctx_obj)) {
- DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
+ if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- }
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
@@ -2488,7 +2660,14 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
goto error_deref_obj;
}
- ring = intel_engine_create_ring(engine, ctx->ring_size);
+ timeline = i915_timeline_create(ctx->i915, ctx->name);
+ if (IS_ERR(timeline)) {
+ ret = PTR_ERR(timeline);
+ goto error_deref_obj;
+ }
+
+ ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
+ i915_timeline_put(timeline);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
@@ -2530,7 +2709,8 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
*/
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
for_each_engine(engine, dev_priv, id) {
- struct intel_context *ce = &ctx->engine[engine->id];
+ struct intel_context *ce =
+ to_intel_context(ctx, engine);
u32 *reg;
if (!ce->state)
@@ -2552,3 +2732,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
}
}
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_lrc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 59d7b86012e9..4ec7d8dd13c8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -108,7 +108,7 @@ static inline uint64_t
intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- return ctx->engine[engine->id].lrc_desc;
+ return to_intel_context(ctx, engine)->lrc_desc;
}
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e125d16a1aa7..48f618dc9abb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
@@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
if (HAS_PCH_SPLIT(dev_priv)) {
pipe_config->has_pch_encoder = true;
@@ -1175,8 +1180,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
out:
mutex_unlock(&dev->mode_config.mutex);
- intel_panel_init(&intel_connector->panel, fixed_mode, NULL,
- downclock_mode);
+ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index c0b34b7943b9..9f0bd6a4cb79 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -178,7 +178,8 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
{
bool result = false;
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv) ||
+ IS_ICELAKE(dev_priv)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
@@ -217,6 +218,8 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
return GEN9_VEBOX_MOCS(index);
case VCS2:
return GEN9_MFX1_MOCS(index);
+ case VCS3:
+ return GEN11_MFX2_MOCS(index);
default:
MISSING_CASE(engine_id);
return INVALID_MMIO_REG;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 36671a937fa4..c2f10d899329 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -807,6 +807,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
ret = PTR_ERR(vma);
goto out_pin_section;
}
+ intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
ret = i915_vma_put_fence(vma);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 41d00b1603e3..b443278e569c 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1928,13 +1928,11 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode,
- struct drm_display_mode *alt_fixed_mode,
struct drm_display_mode *downclock_mode)
{
intel_panel_init_backlight_funcs(panel);
panel->fixed_mode = fixed_mode;
- panel->alt_fixed_mode = alt_fixed_mode;
panel->downclock_mode = downclock_mode;
return 0;
@@ -1948,10 +1946,6 @@ void intel_panel_fini(struct intel_panel *panel)
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
- if (panel->alt_fixed_mode)
- drm_mode_destroy(intel_connector->base.dev,
- panel->alt_fixed_mode);
-
if (panel->downclock_mode)
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 1f5cd572a7ff..39a4e4edda07 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -569,7 +569,8 @@ unlock:
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
- uint32_t *val)
+ uint32_t *val,
+ bool set_wa)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
*source = INTEL_PIPE_CRC_SOURCE_PF;
@@ -582,7 +583,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
- if ((IS_HASWELL(dev_priv) ||
+ if (set_wa && (IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
hsw_pipe_A_crc_wa(dev_priv, true);
@@ -600,7 +601,8 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
- enum intel_pipe_crc_source *source, u32 *val)
+ enum intel_pipe_crc_source *source, u32 *val,
+ bool set_wa)
{
if (IS_GEN2(dev_priv))
return i8xx_pipe_crc_ctl_reg(source, val);
@@ -611,7 +613,7 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
return ilk_pipe_crc_ctl_reg(source, val);
else
- return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
}
static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
@@ -636,7 +638,7 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
return -EIO;
}
- ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val, true);
if (ret != 0)
goto out;
@@ -764,13 +766,12 @@ display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
{
int i;
- for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
- if (!strcmp(buf, pipe_crc_objects[i])) {
- *o = i;
- return 0;
- }
+ i = match_string(pipe_crc_objects, ARRAY_SIZE(pipe_crc_objects), buf);
+ if (i < 0)
+ return i;
- return -EINVAL;
+ *o = i;
+ return 0;
}
static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv,
@@ -796,13 +797,12 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
return 0;
}
- for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
- if (!strcmp(buf, pipe_crc_sources[i])) {
- *s = i;
- return 0;
- }
+ i = match_string(pipe_crc_sources, ARRAY_SIZE(pipe_crc_sources), buf);
+ if (i < 0)
+ return i;
- return -EINVAL;
+ *s = i;
+ return 0;
}
static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
@@ -916,7 +916,7 @@ int intel_pipe_crc_create(struct drm_minor *minor)
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
@@ -934,10 +934,11 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
return -EIO;
}
- ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
+ ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val, true);
if (ret != 0)
goto out;
+ pipe_crc->source = source;
I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
@@ -959,3 +960,39 @@ out:
return ret;
}
+
+void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ u32 val = 0;
+
+ if (!crtc->crc.opened)
+ return;
+
+ if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val, false) < 0)
+ return;
+
+ /* Don't need pipe_crc->lock here, IRQs are not generated. */
+ pipe_crc->skipped = 0;
+
+ I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
+ POSTING_READ(PIPE_CRC_CTL(crtc->index));
+}
+
+void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+
+ /* Swallow crc's until we stop generating them. */
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->skipped = INT_MIN;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
+ POSTING_READ(PIPE_CRC_CTL(crtc->index));
+ synchronize_irq(dev_priv->drm.irq);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b8da4dcdd584..53aaaa3e6886 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3567,6 +3567,23 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
+static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+{
+ u8 enabled_slices;
+
+ /* Slice 1 will always be enabled */
+ enabled_slices = 1;
+
+ /* Gen prior to GEN11 have only one DBuf slice */
+ if (INTEL_GEN(dev_priv) < 11)
+ return enabled_slices;
+
+ if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
+ enabled_slices++;
+
+ return enabled_slices;
+}
+
/*
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
@@ -3754,9 +3771,42 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
return true;
}
+static unsigned int intel_get_ddb_size(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *cstate,
+ const unsigned int total_data_rate,
+ const int num_active,
+ struct skl_ddb_allocation *ddb)
+{
+ const struct drm_display_mode *adjusted_mode;
+ u64 total_data_bw;
+ u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
+
+ WARN_ON(ddb_size == 0);
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return ddb_size - 4; /* 4 blocks for bypass path allocation */
+
+ adjusted_mode = &cstate->base.adjusted_mode;
+ total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
+
+ /*
+ * 12GB/s is maximum BW supported by single DBuf slice.
+ */
+ if (total_data_bw >= GBps(12) || num_active > 1) {
+ ddb->enabled_slices = 2;
+ } else {
+ ddb->enabled_slices = 1;
+ ddb_size /= 2;
+ }
+
+ return ddb_size;
+}
+
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
const struct intel_crtc_state *cstate,
+ const unsigned int total_data_rate,
+ struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
@@ -3779,11 +3829,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
else
*num_active = hweight32(dev_priv->active_crtcs);
- ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- WARN_ON(ddb_size == 0);
-
- if (INTEL_GEN(dev_priv) < 11)
- ddb_size -= 4; /* 4 blocks for bypass path allocation */
+ ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
+ *num_active, ddb);
/*
* If the state doesn't change the active CRTC's, then there's
@@ -3817,14 +3864,64 @@ static unsigned int skl_cursor_allocation(int num_active)
return 8;
}
-static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
+ struct skl_ddb_entry *entry, u32 reg)
{
- entry->start = reg & 0x3ff;
- entry->end = (reg >> 16) & 0x3ff;
+ u16 mask;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask = ICL_DDB_ENTRY_MASK;
+ else
+ mask = SKL_DDB_ENTRY_MASK;
+ entry->start = reg & mask;
+ entry->end = (reg >> DDB_ENTRY_END_SHIFT) & mask;
+
if (entry->end)
entry->end += 1;
}
+static void
+skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
+ const enum pipe pipe,
+ const enum plane_id plane_id,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ u32 val, val2 = 0;
+ int fourcc, pixel_format;
+
+ /* Cursor doesn't support NV12/planar, so no extra calculation needed */
+ if (plane_id == PLANE_CURSOR) {
+ val = I915_READ(CUR_BUF_CFG(pipe));
+ skl_ddb_entry_init_from_hw(dev_priv,
+ &ddb->plane[pipe][plane_id], val);
+ return;
+ }
+
+ val = I915_READ(PLANE_CTL(pipe, plane_id));
+
+ /* No DDB allocated for disabled planes */
+ if (!(val & PLANE_CTL_ENABLE))
+ return;
+
+ pixel_format = val & PLANE_CTL_FORMAT_MASK;
+ fourcc = skl_format_to_fourcc(pixel_format,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK);
+
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
+
+ if (fourcc == DRM_FORMAT_NV12) {
+ skl_ddb_entry_init_from_hw(dev_priv,
+ &ddb->plane[pipe][plane_id], val2);
+ skl_ddb_entry_init_from_hw(dev_priv,
+ &ddb->uv_plane[pipe][plane_id], val);
+ } else {
+ skl_ddb_entry_init_from_hw(dev_priv,
+ &ddb->plane[pipe][plane_id], val);
+ }
+}
+
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
{
@@ -3832,6 +3929,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
memset(ddb, 0, sizeof(*ddb));
+ ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+
for_each_intel_crtc(&dev_priv->drm, crtc) {
enum intel_display_power_domain power_domain;
enum plane_id plane_id;
@@ -3841,16 +3940,9 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
- for_each_plane_id_on_crtc(crtc, plane_id) {
- u32 val;
-
- if (plane_id != PLANE_CURSOR)
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- else
- val = I915_READ(CUR_BUF_CFG(pipe));
-
- skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
- }
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(dev_priv, pipe,
+ plane_id, ddb);
intel_display_power_put(dev_priv, power_domain);
}
@@ -4009,9 +4101,9 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
static unsigned int
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
- int y)
+ const int plane)
{
- struct intel_plane *plane = to_intel_plane(pstate->plane);
+ struct intel_plane *intel_plane = to_intel_plane(pstate->plane);
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
uint32_t data_rate;
uint32_t width = 0, height = 0;
@@ -4025,9 +4117,9 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
fb = pstate->fb;
format = fb->format->format;
- if (plane->id == PLANE_CURSOR)
+ if (intel_plane->id == PLANE_CURSOR)
return 0;
- if (y && format != DRM_FORMAT_NV12)
+ if (plane == 1 && format != DRM_FORMAT_NV12)
return 0;
/*
@@ -4038,19 +4130,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- /* for planar format */
- if (format == DRM_FORMAT_NV12) {
- if (y) /* y-plane data rate */
- data_rate = width * height *
- fb->format->cpp[0];
- else /* uv-plane data rate */
- data_rate = (width / 2) * (height / 2) *
- fb->format->cpp[1];
- } else {
- /* for packed formats */
- data_rate = width * height * fb->format->cpp[0];
+ /* UV plane does 1/2 pixel sub-sampling */
+ if (plane == 1 && format == DRM_FORMAT_NV12) {
+ width /= 2;
+ height /= 2;
}
+ data_rate = width * height * fb->format->cpp[plane];
+
down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
@@ -4063,8 +4150,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
*/
static unsigned int
skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
- unsigned *plane_data_rate,
- unsigned *plane_y_data_rate)
+ unsigned int *plane_data_rate,
+ unsigned int *uv_plane_data_rate)
{
struct drm_crtc_state *cstate = &intel_cstate->base;
struct drm_atomic_state *state = cstate->state;
@@ -4080,17 +4167,17 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
enum plane_id plane_id = to_intel_plane(plane)->id;
unsigned int rate;
- /* packed/uv */
+ /* packed/y */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 0);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
- /* y-plane */
+ /* uv-plane */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 1);
- plane_y_data_rate[plane_id] = rate;
+ uv_plane_data_rate[plane_id] = rate;
total_data_rate += rate;
}
@@ -4099,8 +4186,7 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
}
static uint16_t
-skl_ddb_min_alloc(const struct drm_plane_state *pstate,
- const int y)
+skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
{
struct drm_framebuffer *fb = pstate->fb;
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
@@ -4111,8 +4197,8 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
if (WARN_ON(!fb))
return 0;
- /* For packed formats, no y-plane, return 0 */
- if (y && fb->format->format != DRM_FORMAT_NV12)
+ /* For packed formats, and uv-plane, return 0 */
+ if (plane == 1 && fb->format->format != DRM_FORMAT_NV12)
return 0;
/* For Non Y-tile return 8-blocks */
@@ -4131,15 +4217,12 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
/* Halve UV plane width and height for NV12 */
- if (fb->format->format == DRM_FORMAT_NV12 && !y) {
+ if (plane == 1) {
src_w /= 2;
src_h /= 2;
}
- if (fb->format->format == DRM_FORMAT_NV12 && !y)
- plane_bpp = fb->format->cpp[1];
- else
- plane_bpp = fb->format->cpp[0];
+ plane_bpp = fb->format->cpp[plane];
if (drm_rotation_90_or_270(pstate->rotation)) {
switch (plane_bpp) {
@@ -4167,7 +4250,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
static void
skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
- uint16_t *minimum, uint16_t *y_minimum)
+ uint16_t *minimum, uint16_t *uv_minimum)
{
const struct drm_plane_state *pstate;
struct drm_plane *plane;
@@ -4182,7 +4265,7 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
continue;
minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
- y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+ uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
}
minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4200,17 +4283,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
uint16_t alloc_size, start;
uint16_t minimum[I915_MAX_PLANES] = {};
- uint16_t y_minimum[I915_MAX_PLANES] = {};
+ uint16_t uv_minimum[I915_MAX_PLANES] = {};
unsigned int total_data_rate;
enum plane_id plane_id;
int num_active;
- unsigned plane_data_rate[I915_MAX_PLANES] = {};
- unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
+ unsigned int plane_data_rate[I915_MAX_PLANES] = {};
+ unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {};
uint16_t total_min_blocks = 0;
/* Clear the partitioning for disabled planes. */
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+ memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
if (WARN_ON(!state))
return 0;
@@ -4220,12 +4303,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
+ total_data_rate = skl_get_total_relative_data_rate(cstate,
+ plane_data_rate,
+ uv_plane_data_rate);
+ skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
+ alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
- skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
+ skl_ddb_calc_min(cstate, num_active, minimum, uv_minimum);
/*
* 1. Allocate the mininum required blocks for each active plane
@@ -4235,7 +4322,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
total_min_blocks += minimum[plane_id];
- total_min_blocks += y_minimum[plane_id];
+ total_min_blocks += uv_minimum[plane_id];
}
if (total_min_blocks > alloc_size) {
@@ -4255,16 +4342,13 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*
* FIXME: we may not allocate every single block here.
*/
- total_data_rate = skl_get_total_relative_data_rate(cstate,
- plane_data_rate,
- plane_y_data_rate);
if (total_data_rate == 0)
return 0;
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- unsigned int data_rate, y_data_rate;
- uint16_t plane_blocks, y_plane_blocks = 0;
+ unsigned int data_rate, uv_data_rate;
+ uint16_t plane_blocks, uv_plane_blocks;
if (plane_id == PLANE_CURSOR)
continue;
@@ -4288,21 +4372,20 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
start += plane_blocks;
- /*
- * allocation for y_plane part of planar format:
- */
- y_data_rate = plane_y_data_rate[plane_id];
+ /* Allocate DDB for UV plane for planar format/NV12 */
+ uv_data_rate = uv_plane_data_rate[plane_id];
- y_plane_blocks = y_minimum[plane_id];
- y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
- total_data_rate);
+ uv_plane_blocks = uv_minimum[plane_id];
+ uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate,
+ total_data_rate);
- if (y_data_rate) {
- ddb->y_plane[pipe][plane_id].start = start;
- ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks;
+ if (uv_data_rate) {
+ ddb->uv_plane[pipe][plane_id].start = start;
+ ddb->uv_plane[pipe][plane_id].end =
+ start + uv_plane_blocks;
}
- start += y_plane_blocks;
+ start += uv_plane_blocks;
}
return 0;
@@ -4398,7 +4481,7 @@ static int
skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
- struct skl_wm_params *wp)
+ struct skl_wm_params *wp, int plane_id)
{
struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
const struct drm_plane_state *pstate = &intel_pstate->base;
@@ -4411,6 +4494,12 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
if (!intel_wm_plane_visible(cstate, intel_pstate))
return 0;
+ /* only NV12 format has two planes */
+ if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
+ DRM_DEBUG_KMS("Non NV12 format have single plane\n");
+ return -EINVAL;
+ }
+
wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
@@ -4418,6 +4507,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->is_planar = fb->format->format == DRM_FORMAT_NV12;
if (plane->id == PLANE_CURSOR) {
wp->width = intel_pstate->base.crtc_w;
@@ -4430,8 +4520,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
}
- wp->cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
- fb->format->cpp[0];
+ if (plane_id == 1 && wp->is_planar)
+ wp->width /= 2;
+
+ wp->cpp = fb->format->cpp[plane_id];
wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
intel_pstate);
@@ -4499,9 +4591,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint16_t ddb_allocation,
int level,
const struct skl_wm_params *wp,
- uint16_t *out_blocks, /* out */
- uint8_t *out_lines, /* out */
- bool *enabled /* out */)
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */)
{
const struct drm_plane_state *pstate = &intel_pstate->base;
uint32_t latency = dev_priv->wm.skl_latency[level];
@@ -4515,7 +4606,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (latency == 0 ||
!intel_wm_plane_visible(cstate, intel_pstate)) {
- *enabled = false;
+ result->plane_en = false;
return 0;
}
@@ -4568,6 +4659,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
} else {
res_blocks++;
}
+
+ /*
+ * Make sure result blocks for higher latency levels are atleast
+ * as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special cases.
+ * Also covers Display WA #1125 for RC.
+ */
+ if (result_prev->plane_res_b > res_blocks)
+ res_blocks = result_prev->plane_res_b;
}
if (INTEL_GEN(dev_priv) >= 11) {
@@ -4596,7 +4696,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if ((level > 0 && res_lines > 31) ||
res_blocks >= ddb_allocation ||
min_disp_buf_needed >= ddb_allocation) {
- *enabled = false;
+ result->plane_en = false;
/*
* If there are no valid level 0 watermarks, then we can't
@@ -4615,10 +4715,21 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
}
+ /*
+ * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
+ * disable wm level 1-7 on NV12 planes
+ */
+ if (wp->is_planar && level >= 1 &&
+ (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+ IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
+ result->plane_en = false;
+ return 0;
+ }
+
/* The number of lines are ignored for the level 0 watermark. */
- *out_lines = level ? res_lines : 0;
- *out_blocks = res_blocks;
- *enabled = true;
+ result->plane_res_b = res_blocks;
+ result->plane_res_l = res_lines;
+ result->plane_en = true;
return 0;
}
@@ -4629,7 +4740,8 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
const struct skl_wm_params *wm_params,
- struct skl_plane_wm *wm)
+ struct skl_plane_wm *wm,
+ int plane_id)
{
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct drm_plane *plane = intel_pstate->base.plane;
@@ -4637,15 +4749,26 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
int level, max_level = ilk_wm_max_level(dev_priv);
+ enum plane_id intel_plane_id = intel_plane->id;
int ret;
if (WARN_ON(!intel_pstate->base.fb))
return -EINVAL;
- ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
+ ddb_blocks = plane_id ?
+ skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
+ skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
for (level = 0; level <= max_level; level++) {
- struct skl_wm_level *result = &wm->wm[level];
+ struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
+ &wm->wm[level];
+ struct skl_wm_level *result_prev;
+
+ if (level)
+ result_prev = plane_id ? &wm->uv_wm[level - 1] :
+ &wm->wm[level - 1];
+ else
+ result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
ret = skl_compute_plane_wm(dev_priv,
cstate,
@@ -4653,13 +4776,15 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
ddb_blocks,
level,
wm_params,
- &result->plane_res_b,
- &result->plane_res_l,
- &result->plane_en);
+ result_prev,
+ result);
if (ret)
return ret;
}
+ if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
+ wm->is_planar = true;
+
return 0;
}
@@ -4769,20 +4894,39 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
wm = &pipe_wm->planes[plane_id];
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
- memset(&wm_params, 0, sizeof(struct skl_wm_params));
ret = skl_compute_plane_wm_params(dev_priv, cstate,
- intel_pstate, &wm_params);
+ intel_pstate, &wm_params, 0);
if (ret)
return ret;
ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
- intel_pstate, &wm_params, wm);
+ intel_pstate, &wm_params, wm, 0);
if (ret)
return ret;
+
skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
ddb_blocks, &wm->trans_wm);
+
+ /* uv plane watermarks must also be validated for NV12/Planar */
+ if (wm_params.is_planar) {
+ memset(&wm_params, 0, sizeof(struct skl_wm_params));
+ wm->is_planar = true;
+
+ ret = skl_compute_plane_wm_params(dev_priv, cstate,
+ intel_pstate,
+ &wm_params, 1);
+ if (ret)
+ return ret;
+
+ ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
+ intel_pstate, &wm_params,
+ wm, 1);
+ if (ret)
+ return ret;
+ }
}
+
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
return 0;
@@ -4833,10 +4977,21 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
&ddb->plane[pipe][plane_id]);
- if (INTEL_GEN(dev_priv) < 11)
+ if (INTEL_GEN(dev_priv) >= 11)
+ return skl_ddb_entry_write(dev_priv,
+ PLANE_BUF_CFG(pipe, plane_id),
+ &ddb->plane[pipe][plane_id]);
+ if (wm->is_planar) {
+ skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
+ &ddb->uv_plane[pipe][plane_id]);
skl_ddb_entry_write(dev_priv,
PLANE_NV12_BUF_CFG(pipe, plane_id),
- &ddb->y_plane[pipe][plane_id]);
+ &ddb->plane[pipe][plane_id]);
+ } else {
+ skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
+ &ddb->plane[pipe][plane_id]);
+ I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+ }
}
static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
@@ -4944,15 +5099,13 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
struct drm_plane *plane;
enum pipe pipe = intel_crtc->pipe;
- WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
-
drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
enum plane_id plane_id = to_intel_plane(plane)->id;
if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
&new_ddb->plane[pipe][plane_id]) &&
- skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id],
- &new_ddb->y_plane[pipe][plane_id]))
+ skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
+ &new_ddb->uv_plane[pipe][plane_id]))
continue;
plane_state = drm_atomic_get_plane_state(state, plane);
@@ -4966,69 +5119,16 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
static int
skl_compute_ddb(struct drm_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct intel_crtc *intel_crtc;
struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
- uint32_t realloc_pipes = pipes_modified(state);
- int ret;
-
- /*
- * If this is our first atomic update following hardware readout,
- * we can't trust the DDB that the BIOS programmed for us. Let's
- * pretend that all pipes switched active status so that we'll
- * ensure a full DDB recompute.
- */
- if (dev_priv->wm.distrust_bios_wm) {
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
- state->acquire_ctx);
- if (ret)
- return ret;
-
- intel_state->active_pipe_changes = ~0;
-
- /*
- * We usually only initialize intel_state->active_crtcs if we
- * we're doing a modeset; make sure this field is always
- * initialized during the sanitization process that happens
- * on the first commit too.
- */
- if (!intel_state->modeset)
- intel_state->active_crtcs = dev_priv->active_crtcs;
- }
-
- /*
- * If the modeset changes which CRTC's are active, we need to
- * recompute the DDB allocation for *all* active pipes, even
- * those that weren't otherwise being modified in any way by this
- * atomic commit. Due to the shrinking of the per-pipe allocations
- * when new active CRTC's are added, it's possible for a pipe that
- * we were already using and aren't changing at all here to suddenly
- * become invalid if its DDB needs exceeds its new allocation.
- *
- * Note that if we wind up doing a full DDB recompute, we can't let
- * any other display updates race with this transaction, so we need
- * to grab the lock on *all* CRTC's.
- */
- if (intel_state->active_pipe_changes) {
- realloc_pipes = ~0;
- intel_state->wm_results.dirty_pipes = ~0;
- }
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *cstate;
+ int ret, i;
- /*
- * We're not recomputing for the pipes not included in the commit, so
- * make sure we start with the current state.
- */
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
- for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
- struct intel_crtc_state *cstate;
-
- cstate = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(cstate))
- return PTR_ERR(cstate);
-
+ for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
ret = skl_allocate_pipe_ddb(cstate, ddb);
if (ret)
return ret;
@@ -5042,12 +5142,12 @@ skl_compute_ddb(struct drm_atomic_state *state)
}
static void
-skl_copy_wm_for_pipe(struct skl_wm_values *dst,
- struct skl_wm_values *src,
- enum pipe pipe)
+skl_copy_ddb_for_pipe(struct skl_ddb_values *dst,
+ struct skl_ddb_values *src,
+ enum pipe pipe)
{
- memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
- sizeof(dst->ddb.y_plane[pipe]));
+ memcpy(dst->ddb.uv_plane[pipe], src->ddb.uv_plane[pipe],
+ sizeof(dst->ddb.uv_plane[pipe]));
memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
sizeof(dst->ddb.plane[pipe]));
}
@@ -5090,23 +5190,23 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
}
static int
-skl_compute_wm(struct drm_atomic_state *state)
+skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_wm_values *results = &intel_state->wm_results;
struct drm_device *dev = state->dev;
- struct skl_pipe_wm *pipe_wm;
- bool changed = false;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct drm_crtc *crtc;
+ const struct drm_crtc_state *cstate;
+ struct intel_crtc *intel_crtc;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ uint32_t realloc_pipes = pipes_modified(state);
int ret, i;
/*
* When we distrust bios wm we always need to recompute to set the
* expected DDB allocations for each CRTC.
*/
- if (to_i915(dev)->wm.distrust_bios_wm)
- changed = true;
+ if (dev_priv->wm.distrust_bios_wm)
+ (*changed) = true;
/*
* If this transaction isn't actually touching any CRTC's, don't
@@ -5117,14 +5217,86 @@ skl_compute_wm(struct drm_atomic_state *state)
* hold _all_ CRTC state mutexes.
*/
for_each_new_crtc_in_state(state, crtc, cstate, i)
- changed = true;
+ (*changed) = true;
- if (!changed)
+ if (!*changed)
return 0;
+ /*
+ * If this is our first atomic update following hardware readout,
+ * we can't trust the DDB that the BIOS programmed for us. Let's
+ * pretend that all pipes switched active status so that we'll
+ * ensure a full DDB recompute.
+ */
+ if (dev_priv->wm.distrust_bios_wm) {
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ intel_state->active_pipe_changes = ~0;
+
+ /*
+ * We usually only initialize intel_state->active_crtcs if we
+ * we're doing a modeset; make sure this field is always
+ * initialized during the sanitization process that happens
+ * on the first commit too.
+ */
+ if (!intel_state->modeset)
+ intel_state->active_crtcs = dev_priv->active_crtcs;
+ }
+
+ /*
+ * If the modeset changes which CRTC's are active, we need to
+ * recompute the DDB allocation for *all* active pipes, even
+ * those that weren't otherwise being modified in any way by this
+ * atomic commit. Due to the shrinking of the per-pipe allocations
+ * when new active CRTC's are added, it's possible for a pipe that
+ * we were already using and aren't changing at all here to suddenly
+ * become invalid if its DDB needs exceeds its new allocation.
+ *
+ * Note that if we wind up doing a full DDB recompute, we can't let
+ * any other display updates race with this transaction, so we need
+ * to grab the lock on *all* CRTC's.
+ */
+ if (intel_state->active_pipe_changes) {
+ realloc_pipes = ~0;
+ intel_state->wm_results.dirty_pipes = ~0;
+ }
+
+ /*
+ * We're not recomputing for the pipes not included in the commit, so
+ * make sure we start with the current state.
+ */
+ for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
+ struct intel_crtc_state *cstate;
+
+ cstate = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(cstate))
+ return PTR_ERR(cstate);
+ }
+
+ return 0;
+}
+
+static int
+skl_compute_wm(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct skl_ddb_values *results = &intel_state->wm_results;
+ struct skl_pipe_wm *pipe_wm;
+ bool changed = false;
+ int ret, i;
+
/* Clear all dirty flags */
results->dirty_pipes = 0;
+ ret = skl_ddb_add_affected_pipes(state, &changed);
+ if (ret || !changed)
+ return ret;
+
ret = skl_compute_ddb(state);
if (ret)
return ret;
@@ -5197,8 +5369,8 @@ static void skl_initial_wm(struct intel_atomic_state *state,
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *results = &state->wm_results;
- struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
+ struct skl_ddb_values *results = &state->wm_results;
+ struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
@@ -5209,7 +5381,7 @@ static void skl_initial_wm(struct intel_atomic_state *state,
if (cstate->base.active_changed)
skl_atomic_update_crtc_wm(state, cstate);
- skl_copy_wm_for_pipe(hw_vals, results, pipe);
+ skl_copy_ddb_for_pipe(hw_vals, results, pipe);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5341,7 +5513,7 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
void skl_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
+ struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
@@ -5362,8 +5534,12 @@ void skl_wm_get_hw_state(struct drm_device *dev)
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
} else {
- /* Easy/common case; just sanitize DDB now if everything off */
- memset(ddb, 0, sizeof(*ddb));
+ /*
+ * Easy/common case; just sanitize DDB now if everything off
+ * Keep dbuf slice info intact
+ */
+ memset(ddb->plane, 0, sizeof(ddb->plane));
+ memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
}
}
@@ -6572,7 +6748,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
rps->efficient_freq = rps->rp1_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
- IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
u32 ddcc_status = 0;
if (sandybridge_pcode_read(dev_priv,
@@ -6585,7 +6761,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
rps->max_freq);
}
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
/* Store the frequency values in 16.66 MHZ units, which is
* the natural hardware unit for SKL
*/
@@ -6890,15 +7066,18 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int min_freq = 15;
+ const int min_freq = 15;
+ const int scaling_factor = 180;
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
unsigned int max_gpu_freq, min_gpu_freq;
- int scaling_factor = 180;
struct cpufreq_policy *policy;
WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
+ if (rps->max_freq <= rps->min_freq)
+ return;
+
policy = cpufreq_cpu_get(0);
if (policy) {
max_ia_freq = policy->cpuinfo.max_freq;
@@ -6918,13 +7097,12 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
+ if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
/* Convert GT frequency to 50 HZ units */
- min_gpu_freq = rps->min_freq / GEN9_FREQ_SCALER;
- max_gpu_freq = rps->max_freq / GEN9_FREQ_SCALER;
- } else {
- min_gpu_freq = rps->min_freq;
- max_gpu_freq = rps->max_freq;
+ min_gpu_freq /= GEN9_FREQ_SCALER;
+ max_gpu_freq /= GEN9_FREQ_SCALER;
}
/*
@@ -6933,10 +7111,10 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
* the PCU should use as a reference to determine the ring frequency.
*/
for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
- int diff = max_gpu_freq - gpu_freq;
+ const int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
@@ -8026,10 +8204,10 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
intel_disable_gt_powersave(dev_priv);
- if (INTEL_GEN(dev_priv) < 11)
- gen6_reset_rps_interrupts(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 11)
+ gen11_reset_rps_interrupts(dev_priv);
else
- WARN_ON_ONCE(1);
+ gen6_reset_rps_interrupts(dev_priv);
}
static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
@@ -8142,8 +8320,6 @@ static void intel_enable_rps(struct drm_i915_private *dev_priv)
cherryview_enable_rps(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv)) {
valleyview_enable_rps(dev_priv);
- } else if (WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11)) {
- /* TODO */
} else if (INTEL_GEN(dev_priv) >= 9) {
gen9_enable_rps(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
@@ -8487,6 +8663,13 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
+static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ /* This is not an Wa. Enable to reduce Sampler power */
+ I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
+ I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+}
+
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (!HAS_PCH_CNP(dev_priv))
@@ -9013,7 +9196,9 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ dev_priv->display.init_clock_gating = icl_init_clock_gating;
+ else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
else if (IS_COFFEELAKE(dev_priv))
dev_priv->display.init_clock_gating = cfl_init_clock_gating;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 23175c5c4a50..db27f2faa1de 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -93,7 +93,115 @@ static void psr_aux_io_power_put(struct intel_dp *intel_dp)
intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
}
-static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
+{
+ u32 debug_mask, mask;
+
+ /* No PSR interrupts on VLV/CHV */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return;
+
+ mask = EDP_PSR_ERROR(TRANSCODER_EDP);
+ debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
+ EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ mask |= EDP_PSR_ERROR(TRANSCODER_A) |
+ EDP_PSR_ERROR(TRANSCODER_B) |
+ EDP_PSR_ERROR(TRANSCODER_C);
+
+ debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
+ EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
+ EDP_PSR_POST_EXIT(TRANSCODER_B) |
+ EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
+ EDP_PSR_POST_EXIT(TRANSCODER_C) |
+ EDP_PSR_PRE_ENTRY(TRANSCODER_C);
+ }
+
+ if (debug)
+ mask |= debug_mask;
+
+ WRITE_ONCE(dev_priv->psr.debug, debug);
+ I915_WRITE(EDP_PSR_IMR, ~mask);
+}
+
+static void psr_event_print(u32 val, bool psr2_enabled)
+{
+ DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
+ if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
+ DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
+ if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
+ DRM_DEBUG_KMS("\tPSR2 disabled\n");
+ if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
+ DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
+ if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
+ DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
+ if (val & PSR_EVENT_GRAPHICS_RESET)
+ DRM_DEBUG_KMS("\tGraphics reset\n");
+ if (val & PSR_EVENT_PCH_INTERRUPT)
+ DRM_DEBUG_KMS("\tPCH interrupt\n");
+ if (val & PSR_EVENT_MEMORY_UP)
+ DRM_DEBUG_KMS("\tMemory up\n");
+ if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
+ DRM_DEBUG_KMS("\tFront buffer modification\n");
+ if (val & PSR_EVENT_WD_TIMER_EXPIRE)
+ DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
+ if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
+ DRM_DEBUG_KMS("\tPIPE registers updated\n");
+ if (val & PSR_EVENT_REGISTER_UPDATE)
+ DRM_DEBUG_KMS("\tRegister updated\n");
+ if (val & PSR_EVENT_HDCP_ENABLE)
+ DRM_DEBUG_KMS("\tHDCP enabled\n");
+ if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
+ DRM_DEBUG_KMS("\tKVMR session enabled\n");
+ if (val & PSR_EVENT_VBI_ENABLE)
+ DRM_DEBUG_KMS("\tVBI enabled\n");
+ if (val & PSR_EVENT_LPSP_MODE_EXIT)
+ DRM_DEBUG_KMS("\tLPSP mode exited\n");
+ if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
+ DRM_DEBUG_KMS("\tPSR disabled\n");
+}
+
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
+{
+ u32 transcoders = BIT(TRANSCODER_EDP);
+ enum transcoder cpu_transcoder;
+ ktime_t time_ns = ktime_get();
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ transcoders |= BIT(TRANSCODER_A) |
+ BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C);
+
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ /* FIXME: Exit PSR and link train manually when this happens. */
+ if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
+ DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
+
+ if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
+ dev_priv->psr.last_entry_attempt = time_ns;
+ DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
+ transcoder_name(cpu_transcoder));
+ }
+
+ if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
+ dev_priv->psr.last_exit = time_ns;
+ DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
+ transcoder_name(cpu_transcoder));
+
+ if (INTEL_GEN(dev_priv) >= 9) {
+ u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+ bool psr2_enabled = dev_priv->psr.psr2_enabled;
+
+ I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+ psr_event_print(val, psr2_enabled);
+ }
+ }
+ }
+}
+
+static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
{
uint8_t psr_caps = 0;
@@ -122,6 +230,18 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
return alpm_caps & DP_ALPM_CAP;
}
+static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
+{
+ u8 val = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
+ val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
+ else
+ DRM_ERROR("Unable to get sink synchronization latency\n");
+ return val;
+}
+
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv =
@@ -130,33 +250,36 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
- if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+ if (intel_dp->psr_dpcd[0]) {
dev_priv->psr.sink_support = true;
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
}
if (INTEL_GEN(dev_priv) >= 9 &&
- (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
- uint8_t frame_sync_cap;
+ (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+ /*
+ * All panels that supports PSR version 03h (PSR2 +
+ * Y-coordinate) can handle Y-coordinates in VSC but we are
+ * only sure that it is going to be used when required by the
+ * panel. This way panel is capable to do selective update
+ * without a aux frame sync.
+ *
+ * To support PSR version 02h and PSR version 03h without
+ * Y-coordinate requirement panels we would need to enable
+ * GTC first.
+ */
+ dev_priv->psr.sink_psr2_support =
+ intel_dp_get_y_coord_required(intel_dp);
+ DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support
+ ? "supported" : "not supported");
- dev_priv->psr.sink_support = true;
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
- &frame_sync_cap) != 1)
- frame_sync_cap = 0;
- dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP;
- /* PSR2 needs frame sync as well */
- dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
- DRM_DEBUG_KMS("PSR2 %s on sink",
- dev_priv->psr.psr2_support ? "supported" : "not supported");
-
- if (dev_priv->psr.psr2_support) {
- dev_priv->psr.y_cord_support =
- intel_dp_get_y_cord_status(intel_dp);
+ if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
dev_priv->psr.alpm =
intel_dp_get_alpm_status(intel_dp);
+ dev_priv->psr.sink_sync_latency =
+ intel_dp_get_sink_sync_latency(intel_dp);
}
}
}
@@ -193,21 +316,17 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
struct edp_vsc_psr psr_vsc;
- if (dev_priv->psr.psr2_support) {
+ if (dev_priv->psr.psr2_enabled) {
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
psr_vsc.sdp_header.HB1 = 0x7;
- if (dev_priv->psr.colorimetry_support &&
- dev_priv->psr.y_cord_support) {
+ if (dev_priv->psr.colorimetry_support) {
psr_vsc.sdp_header.HB2 = 0x5;
psr_vsc.sdp_header.HB3 = 0x13;
- } else if (dev_priv->psr.y_cord_support) {
+ } else {
psr_vsc.sdp_header.HB2 = 0x4;
psr_vsc.sdp_header.HB3 = 0xe;
- } else {
- psr_vsc.sdp_header.HB2 = 0x3;
- psr_vsc.sdp_header.HB3 = 0xc;
}
} else {
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
@@ -228,31 +347,12 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
}
-static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
-{
- if (INTEL_GEN(dev_priv) >= 9)
- return DP_AUX_CH_CTL(port);
- else
- return EDP_PSR_AUX_CTL;
-}
-
-static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
-{
- if (INTEL_GEN(dev_priv) >= 9)
- return DP_AUX_CH_DATA(port, index);
- else
- return EDP_PSR_AUX_DATA(index);
-}
-
-static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
+static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t aux_clock_divider;
- i915_reg_t aux_ctl_reg;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ u32 aux_clock_divider, aux_ctl;
+ int i;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
@@ -260,41 +360,47 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
[3] = 1 - 1,
[4] = DP_SET_POWER_D0,
};
- enum port port = dig_port->base.port;
- u32 aux_ctl;
- int i;
+ u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
+ EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
+ EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
+ EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
BUILD_BUG_ON(sizeof(aux_msg) > 20);
+ for (i = 0; i < sizeof(aux_msg); i += 4)
+ I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
+ intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
- /* Enable AUX frame sync at sink */
- if (dev_priv->psr.aux_frame_sync)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
- DP_AUX_FRAME_SYNC_ENABLE);
+ /* Start with bits set for DDI_AUX_CTL register */
+ aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
+ aux_clock_divider);
+
+ /* Select only valid bits for SRD_AUX_CTL */
+ aux_ctl &= psr_aux_mask;
+ I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
+}
+
+static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u8 dpcd_val = DP_PSR_ENABLE;
+
/* Enable ALPM at sink for psr2 */
- if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
+ if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE);
- if (dev_priv->psr.link_standby)
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
- else
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE);
-
- aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
- /* Setup AUX registers */
- for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
- intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+ if (dev_priv->psr.psr2_enabled)
+ dpcd_val |= DP_PSR_ENABLE_PSR2;
+ if (dev_priv->psr.link_standby)
+ dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
- aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
- aux_clock_divider);
- I915_WRITE(aux_ctl_reg, aux_ctl);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
static void vlv_psr_enable_source(struct intel_dp *intel_dp,
@@ -396,25 +502,16 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* with the 5 or 6 idle patterns.
*/
uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- uint32_t val;
- uint8_t sink_latency;
-
- val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+ u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
* good enough. */
- val |= EDP_PSR2_ENABLE |
- EDP_SU_TRACK_ENABLE;
+ val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ val |= EDP_Y_COORDINATE_ENABLE;
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_SYNCHRONIZATION_LATENCY_IN_SINK,
- &sink_latency) == 1) {
- sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
- } else {
- sink_latency = 0;
- }
- val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
+ val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
val |= EDP_PSR2_TP2_TIME_2500;
@@ -440,7 +537,7 @@ static void hsw_psr_activate(struct intel_dp *intel_dp)
*/
/* psr1 and psr2 are mutually exclusive.*/
- if (dev_priv->psr.psr2_support)
+ if (dev_priv->psr.psr2_enabled)
hsw_activate_psr2(intel_dp);
else
hsw_activate_psr1(intel_dp);
@@ -460,7 +557,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* dynamically during PSR enable, and extracted from sink
* caps during eDP detection.
*/
- if (!dev_priv->psr.psr2_support)
+ if (!dev_priv->psr.sink_psr2_support)
return false;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
@@ -478,15 +575,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- /*
- * FIXME:enable psr2 only for y-cordinate psr2 panels
- * After gtc implementation , remove this restriction.
- */
- if (!dev_priv->psr.y_cord_support) {
- DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n");
- return false;
- }
-
return true;
}
@@ -568,7 +656,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (dev_priv->psr.psr2_support)
+ if (dev_priv->psr.psr2_enabled)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
else
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
@@ -586,14 +674,24 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 chicken;
psr_aux_io_power_get(intel_dp);
- if (dev_priv->psr.psr2_support) {
- chicken = PSR2_VSC_ENABLE_PROG_HEADER;
- if (dev_priv->psr.y_cord_support)
- chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
+ /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
+ * use hardcoded values PSR AUX transactions
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hsw_psr_setup_aux(intel_dp);
+
+ if (dev_priv->psr.psr2_enabled) {
+ u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
+
+ if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
+ chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
+ | PSR2_ADD_VERTICAL_LINE_COUNT);
+
+ else
+ chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
I915_WRITE(EDP_PSR_DEBUG,
@@ -613,7 +711,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
I915_WRITE(EDP_PSR_DEBUG,
EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP);
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
}
}
@@ -644,7 +743,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
goto unlock;
}
- dev_priv->psr.psr2_support = crtc_state->has_psr2;
+ dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.setup_vsc(intel_dp, crtc_state);
@@ -714,12 +813,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
i915_reg_t psr_status;
u32 psr_status_mask;
- if (dev_priv->psr.aux_frame_sync)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
- 0);
-
- if (dev_priv->psr.psr2_support) {
+ if (dev_priv->psr.psr2_enabled) {
psr_status = EDP_PSR2_STATUS;
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
@@ -743,7 +837,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
dev_priv->psr.active = false;
} else {
- if (dev_priv->psr.psr2_support)
+ if (dev_priv->psr.psr2_enabled)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
else
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
@@ -789,53 +883,59 @@ void intel_psr_disable(struct intel_dp *intel_dp,
cancel_delayed_work_sync(&dev_priv->psr.work);
}
-static void intel_psr_work(struct work_struct *work)
+static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct intel_dp *intel_dp;
+ i915_reg_t reg;
+ u32 mask;
+ int err;
+
+ intel_dp = dev_priv->psr.enabled;
+ if (!intel_dp)
+ return false;
- /* We have to make sure PSR is ready for re-enable
- * otherwise it keeps disabled until next full enable/disable cycle.
- * PSR might take some time to get fully disabled
- * and be ready for re-enable.
- */
if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_support) {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR2_STATUS,
- EDP_PSR2_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
- return;
- }
+ if (dev_priv->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
- }
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
}
} else {
- if (intel_wait_for_register(dev_priv,
- VLV_PSRSTAT(pipe),
- VLV_EDP_PSR_IN_TRANS,
- 0,
- 1)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
- }
+ struct drm_crtc *crtc =
+ dp_to_dig_port(intel_dp)->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ reg = VLV_PSRSTAT(pipe);
+ mask = VLV_EDP_PSR_IN_TRANS;
}
+
+ mutex_unlock(&dev_priv->psr.lock);
+
+ err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+ if (err)
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+
+ /* After the unlocked wait, verify that PSR is still wanted! */
mutex_lock(&dev_priv->psr.lock);
- intel_dp = dev_priv->psr.enabled;
+ return err == 0 && dev_priv->psr.enabled;
+}
- if (!intel_dp)
+static void intel_psr_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ /*
+ * We have to make sure PSR is ready for re-enable
+ * otherwise it keeps disabled until next full enable/disable cycle.
+ * PSR might take some time to get fully disabled
+ * and be ready for re-enable.
+ */
+ if (!psr_wait_for_idle(dev_priv))
goto unlock;
/*
@@ -846,7 +946,7 @@ static void intel_psr_work(struct work_struct *work)
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
- intel_psr_activate(intel_dp);
+ intel_psr_activate(dev_priv->psr.enabled);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
@@ -862,11 +962,7 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
return;
if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.aux_frame_sync)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
- 0);
- if (dev_priv->psr.psr2_support) {
+ if (dev_priv->psr.psr2_enabled) {
val = I915_READ(EDP_PSR2_CTL);
WARN_ON(!(val & EDP_PSR2_ENABLE));
I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
@@ -957,6 +1053,7 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
* intel_psr_invalidate - Invalidade PSR
* @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the invalidate
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking. This function gets called every
@@ -966,7 +1063,7 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
*/
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits)
+ unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct drm_crtc *crtc;
enum pipe pipe;
@@ -974,6 +1071,9 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
+ if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+ return;
+
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
@@ -1014,6 +1114,9 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
+ if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+ return;
+
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
@@ -1027,8 +1130,23 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
- if (frontbuffer_bits)
- intel_psr_exit(dev_priv);
+ if (frontbuffer_bits) {
+ if (dev_priv->psr.psr2_enabled ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_psr_exit(dev_priv);
+ } else {
+ /*
+ * Display WA #0884: all
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CURSURFLIVE(pipe), 0);
+ }
+ }
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
if (!work_busy(&dev_priv->psr.work.work))
@@ -1055,9 +1173,12 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (!dev_priv->psr.sink_support)
return;
- /* Per platform default: all disabled. */
- if (i915_modparams.enable_psr == -1)
+ if (i915_modparams.enable_psr == -1) {
+ i915_modparams.enable_psr = dev_priv->vbt.psr.enable;
+
+ /* Per platform default: all disabled. */
i915_modparams.enable_psr = 0;
+ }
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -1090,6 +1211,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
dev_priv->psr.activate = vlv_psr_activate;
dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
} else {
+ dev_priv->psr.has_hw_tracking = true;
dev_priv->psr.enable_source = hsw_psr_enable_source;
dev_priv->psr.disable_source = hsw_psr_disable;
dev_priv->psr.enable_sink = hsw_psr_enable_sink;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d599524a759..8f19349a6055 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -36,6 +36,7 @@
#include "i915_gem_render_state.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_workarounds.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
@@ -557,7 +558,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
*/
if (request) {
struct drm_i915_private *dev_priv = request->i915;
- struct intel_context *ce = &request->ctx->engine[engine->id];
+ struct intel_context *ce = to_intel_context(request->ctx,
+ engine);
struct i915_hw_ppgtt *ppgtt;
if (ce->state) {
@@ -599,7 +601,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
- ret = intel_ring_workarounds_emit(rq);
+ ret = intel_ctx_workarounds_emit(rq);
if (ret != 0)
return ret;
@@ -617,6 +619,8 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
+ intel_whitelist_workarounds_apply(engine);
+
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
@@ -658,7 +662,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) >= 6)
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
- return init_workarounds_ring(engine);
+ return 0;
}
static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
@@ -693,17 +697,17 @@ static void cancel_requests(struct intel_engine_cs *engine)
struct i915_request *request;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &engine->timeline->requests, link) {
+ list_for_each_entry(request, &engine->timeline.requests, link) {
GEM_BUG_ON(!request->global_seqno);
if (!i915_request_completed(request))
dma_fence_set_error(&request->fence, -EIO);
}
/* Remaining _unready_ requests will be nop'ed when submitted */
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static void i9xx_submit_request(struct i915_request *request)
@@ -1062,7 +1066,6 @@ err:
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
- GEM_BUG_ON(!list_empty(&ring->request_list));
ring->tail = tail;
ring->head = tail;
ring->emit = tail;
@@ -1114,19 +1117,24 @@ err:
}
struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+intel_engine_create_ring(struct intel_engine_cs *engine,
+ struct i915_timeline *timeline,
+ int size)
{
struct intel_ring *ring;
struct i915_vma *vma;
GEM_BUG_ON(!is_power_of_2(size));
GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
+ GEM_BUG_ON(timeline == &engine->timeline);
+ lockdep_assert_held(&engine->i915->drm.struct_mutex);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ring->request_list);
+ ring->timeline = i915_timeline_get(timeline);
ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if
@@ -1157,12 +1165,13 @@ intel_ring_free(struct intel_ring *ring)
i915_vma_close(ring->vma);
__i915_gem_object_release_unless_active(obj);
+ i915_timeline_put(ring->timeline);
kfree(ring);
}
-static int context_pin(struct i915_gem_context *ctx)
+static int context_pin(struct intel_context *ce)
{
- struct i915_vma *vma = ctx->engine[RCS].state;
+ struct i915_vma *vma = ce->state;
int ret;
/*
@@ -1253,7 +1262,7 @@ static struct intel_ring *
intel_ring_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
- struct intel_context *ce = &ctx->engine[engine->id];
+ struct intel_context *ce = to_intel_context(ctx, engine);
int ret;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
@@ -1275,7 +1284,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
}
if (ce->state) {
- ret = context_pin(ctx);
+ ret = context_pin(ce);
if (ret)
goto err;
@@ -1296,7 +1305,7 @@ err:
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
- struct intel_context *ce = &ctx->engine[engine->id];
+ struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(ce->pin_count == 0);
@@ -1315,6 +1324,7 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct intel_ring *ring;
+ struct i915_timeline *timeline;
int err;
intel_engine_setup_common(engine);
@@ -1323,7 +1333,14 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
if (err)
goto err;
- ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
+ timeline = i915_timeline_create(engine->i915, engine->name);
+ if (IS_ERR(timeline)) {
+ err = PTR_ERR(timeline);
+ goto err;
+ }
+
+ ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
+ i915_timeline_put(timeline);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
goto err;
@@ -1424,7 +1441,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(rq->ctx->engine[RCS].state) | flags;
+ *cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1515,7 +1532,7 @@ static int switch_context(struct i915_request *rq)
hw_flags = MI_FORCE_RESTORE;
}
- if (to_ctx->engine[engine->id].state &&
+ if (to_intel_context(to_ctx, engine)->state &&
(to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
GEM_BUG_ON(engine->id != RCS);
@@ -1563,7 +1580,7 @@ static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
+ GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
@@ -1593,6 +1610,7 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
if (intel_ring_update_space(ring) >= bytes)
return 0;
+ GEM_BUG_ON(list_empty(&ring->request_list));
list_for_each_entry(target, &ring->request_list, ring_link) {
/* Would completion of this request free enough space? */
if (bytes <= __intel_ring_space(target->postfix,
@@ -1692,17 +1710,18 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
need_wrap &= ~1;
GEM_BUG_ON(need_wrap > ring->space);
GEM_BUG_ON(ring->emit + need_wrap > ring->size);
+ GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
/* Fill the tail with MI_NOOP */
- memset(ring->vaddr + ring->emit, 0, need_wrap);
- ring->emit = 0;
+ memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
ring->space -= need_wrap;
+ ring->emit = 0;
}
GEM_BUG_ON(ring->emit > ring->size - bytes);
GEM_BUG_ON(ring->space < bytes);
cs = ring->vaddr + ring->emit;
- GEM_DEBUG_EXEC(memset(cs, POISON_INUSE, bytes));
+ GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
ring->emit += bytes;
ring->space -= bytes;
@@ -1712,22 +1731,24 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct i915_request *rq)
{
- int num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
- u32 *cs;
+ int num_dwords;
+ void *cs;
+ num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
if (num_dwords == 0)
return 0;
- num_dwords = CACHELINE_BYTES / sizeof(u32) - num_dwords;
+ num_dwords = CACHELINE_DWORDS - num_dwords;
+ GEM_BUG_ON(num_dwords & 1);
+
cs = intel_ring_begin(rq, num_dwords);
if (IS_ERR(cs))
return PTR_ERR(cs);
- while (num_dwords--)
- *cs++ = MI_NOOP;
-
+ memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
intel_ring_advance(rq, cs);
+ GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
return 0;
}
@@ -1943,8 +1964,6 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
-
if (INTEL_GEN(dev_priv) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
@@ -2029,6 +2048,8 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (HAS_L3_DPF(dev_priv))
engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+
if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
@@ -2079,7 +2100,6 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
engine->emit_flush = gen6_bsd_ring_flush;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
} else {
- engine->mmio_base = BSD_RING_BASE;
engine->emit_flush = bsd_ring_flush;
if (IS_GEN5(dev_priv))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0320c2c4cfba..010750e8ee44 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -3,15 +3,19 @@
#define _INTEL_RINGBUFFER_H_
#include <linux/hashtable.h>
+#include <linux/seqlock.h>
#include "i915_gem_batch_pool.h"
-#include "i915_gem_timeline.h"
+#include "i915_reg.h"
#include "i915_pmu.h"
#include "i915_request.h"
#include "i915_selftest.h"
+#include "i915_timeline.h"
+#include "intel_gpu_commands.h"
struct drm_printer;
+struct i915_sched_attr;
#define I915_CMD_HASH_ORDER 9
@@ -84,7 +88,7 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
}
#define I915_MAX_SLICES 3
-#define I915_MAX_SUBSLICES 3
+#define I915_MAX_SUBSLICES 8
#define instdone_slice_mask(dev_priv__) \
(INTEL_GEN(dev_priv__) == 7 ? \
@@ -125,7 +129,9 @@ struct intel_ring {
struct i915_vma *vma;
void *vaddr;
+ struct i915_timeline *timeline;
struct list_head request_list;
+ struct list_head active_link;
u32 head;
u32 tail;
@@ -330,10 +336,10 @@ struct intel_engine_cs {
u8 instance;
u32 context_size;
u32 mmio_base;
- unsigned int irq_shift;
struct intel_ring *buffer;
- struct intel_timeline *timeline;
+
+ struct i915_timeline timeline;
struct drm_i915_gem_object *default_state;
@@ -459,7 +465,8 @@ struct intel_engine_cs {
*
* Called under the struct_mutex.
*/
- void (*schedule)(struct i915_request *request, int priority);
+ void (*schedule)(struct i915_request *request,
+ const struct i915_sched_attr *attr);
/*
* Cancel all requests on the hardware, or queued for execution.
@@ -561,6 +568,7 @@ struct intel_engine_cs {
#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
+#define I915_ENGINE_HAS_PREEMPTION BIT(2)
unsigned int flags;
/*
@@ -591,7 +599,7 @@ struct intel_engine_cs {
/**
* @lock: Lock protecting the below fields.
*/
- spinlock_t lock;
+ seqlock_t lock;
/**
* @enabled: Reference count indicating number of listeners.
*/
@@ -620,16 +628,29 @@ struct intel_engine_cs {
} stats;
};
-static inline bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
+static inline bool
+intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
}
-static inline bool intel_engine_supports_stats(struct intel_engine_cs *engine)
+static inline bool
+intel_engine_supports_stats(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_SUPPORTS_STATS;
}
+static inline bool
+intel_engine_has_preemption(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_HAS_PREEMPTION;
+}
+
+static inline bool __execlists_need_preempt(int prio, int last)
+{
+ return prio > max(0, last);
+}
+
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
unsigned int bit)
@@ -637,6 +658,13 @@ execlists_set_active(struct intel_engine_execlists *execlists,
__set_bit(bit, (unsigned long *)&execlists->active);
}
+static inline bool
+execlists_set_active_once(struct intel_engine_execlists *execlists,
+ unsigned int bit)
+{
+ return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
+}
+
static inline void
execlists_clear_active(struct intel_engine_execlists *execlists,
unsigned int bit)
@@ -651,6 +679,10 @@ execlists_is_active(const struct intel_engine_execlists *execlists,
return test_bit(bit, (unsigned long *)&execlists->active);
}
+void execlists_user_begin(struct intel_engine_execlists *execlists,
+ const struct execlist_port *port);
+void execlists_user_end(struct intel_engine_execlists *execlists);
+
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
@@ -663,7 +695,7 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
return execlists->port_mask + 1;
}
-static inline void
+static inline struct execlist_port *
execlists_port_complete(struct intel_engine_execlists * const execlists,
struct execlist_port * const port)
{
@@ -674,6 +706,8 @@ execlists_port_complete(struct intel_engine_execlists * const execlists,
memmove(port, port + 1, m * sizeof(struct execlist_port));
memset(port + m, 0, sizeof(struct execlist_port));
+
+ return port;
}
static inline unsigned int
@@ -736,7 +770,9 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+intel_engine_create_ring(struct intel_engine_cs *engine,
+ struct i915_timeline *timeline,
+ int size);
int intel_ring_pin(struct intel_ring *ring,
struct drm_i915_private *i915,
unsigned int offset_bias);
@@ -854,12 +890,9 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
* wtih serialising this hint with anything, so document it as
* a hint and nothing more.
*/
- return READ_ONCE(engine->timeline->seqno);
+ return READ_ONCE(engine->timeline.seqno);
}
-int init_workarounds_ring(struct intel_engine_cs *engine);
-int intel_ring_workarounds_emit(struct i915_request *rq);
-
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
@@ -939,7 +972,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
-void intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
+bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
void intel_engine_cancel_signaling(struct i915_request *request);
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
@@ -1037,7 +1070,7 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine)
if (READ_ONCE(engine->stats.enabled) == 0)
return;
- spin_lock_irqsave(&engine->stats.lock, flags);
+ write_seqlock_irqsave(&engine->stats.lock, flags);
if (engine->stats.enabled > 0) {
if (engine->stats.active++ == 0)
@@ -1045,7 +1078,7 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->stats.active == 0);
}
- spin_unlock_irqrestore(&engine->stats.lock, flags);
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
static inline void intel_engine_context_out(struct intel_engine_cs *engine)
@@ -1055,7 +1088,7 @@ static inline void intel_engine_context_out(struct intel_engine_cs *engine)
if (READ_ONCE(engine->stats.enabled) == 0)
return;
- spin_lock_irqsave(&engine->stats.lock, flags);
+ write_seqlock_irqsave(&engine->stats.lock, flags);
if (engine->stats.enabled > 0) {
ktime_t last;
@@ -1082,7 +1115,7 @@ static inline void intel_engine_context_out(struct intel_engine_cs *engine)
}
}
- spin_unlock_irqrestore(&engine->stats.lock, flags);
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
int intel_enable_engine_stats(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 66de4b2dc8b7..53a6eaa9671a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -542,6 +542,29 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
dev_priv->csr.dc_state = val;
}
+/**
+ * gen9_set_dc_state - set target display C power state
+ * @dev_priv: i915 device instance
+ * @state: target DC power state
+ * - DC_STATE_DISABLE
+ * - DC_STATE_EN_UPTO_DC5
+ * - DC_STATE_EN_UPTO_DC6
+ * - DC_STATE_EN_DC9
+ *
+ * Signal to DMC firmware/HW the target DC power state passed in @state.
+ * DMC/HW can turn off individual display clocks and power rails when entering
+ * a deeper DC power state (higher in number) and turns these back when exiting
+ * that state to a shallower power state (lower in number). The HW will decide
+ * when to actually enter a given state on an on-demand basis, for instance
+ * depending on the active state of display pipes. The state of display
+ * registers backed by affected power rails are saved/restored as needed.
+ *
+ * Based on the above enabling a deeper DC power state is asynchronous wrt.
+ * enabling it. Disabling a deeper power state is synchronous: for instance
+ * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
+ * back on and register state is restored. This is guaranteed by the MMIO write
+ * to DC_STATE_EN blocking until the state is restored.
+ */
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
{
uint32_t val;
@@ -635,7 +658,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
+static void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
@@ -649,13 +672,6 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
-void skl_disable_dc6(struct drm_i915_private *dev_priv)
-{
- DRM_DEBUG_KMS("Disabling DC6\n");
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-}
-
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -2626,32 +2642,69 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+static inline
+bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, bool enable)
{
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ u32 val, status;
+ val = I915_READ(reg);
+ val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
udelay(10);
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout\n");
+ status = I915_READ(reg) & DBUF_POWER_STATE;
+ if ((enable && !status) || (!enable && status)) {
+ DRM_ERROR("DBus power %s timeout!\n",
+ enable ? "enable" : "disable");
+ return false;
+ }
+ return true;
+}
+
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
+}
- udelay(10);
+static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) < 11)
+ return 1;
+ return 2;
+}
- if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
- DRM_ERROR("DBuf power disable timeout!\n");
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices)
+{
+ u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+ u32 val;
+ bool ret;
+
+ if (req_slices > intel_dbuf_max_slices(dev_priv)) {
+ DRM_ERROR("Invalid number of dbuf slices requested\n");
+ return;
+ }
+
+ if (req_slices == hw_enabled_slices || req_slices == 0)
+ return;
+
+ val = I915_READ(DBUF_CTL_S2);
+ if (req_slices > hw_enabled_slices)
+ ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
+ else
+ ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+
+ if (ret)
+ dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
}
-/*
- * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when
- * needed and keep it disabled as much as possible.
- */
static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
{
I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
@@ -2663,6 +2716,8 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
!(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power enable timeout\n");
+ else
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -2676,6 +2731,8 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power disable timeout!\n");
+ else
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 96e213ec202d..26975df4e593 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
adjusted_mode);
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
/*
* Make the CRTC code factor in the SDVO pixel multiplier. The
* SDVO device will factor out the multiplier during mode_set.
@@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (intel_sdvo->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
@@ -2779,9 +2785,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
return false;
for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
- drm_property_add_enum(
- intel_sdvo_connector->tv_format, i,
- i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+ drm_property_add_enum(intel_sdvo_connector->tv_format, i,
+ tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index dbdcf85032df..ee23613f9fd4 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -48,6 +48,7 @@ bool intel_format_is_yuv(u32 format)
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_NV12:
return true;
default:
return false;
@@ -130,7 +131,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
if (scanline < min || scanline > max)
break;
- if (timeout <= 0) {
+ if (!timeout) {
DRM_ERROR("Potential atomic update failure on pipe %c\n",
pipe_name(crtc->pipe));
break;
@@ -935,20 +936,11 @@ intel_check_sprite_plane(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = state->base.fb;
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y, src_w, src_h;
- struct drm_rect *src = &state->base.src;
- struct drm_rect *dst = &state->base.dst;
- struct drm_rect clip = {};
int max_stride = INTEL_GEN(dev_priv) >= 9 ? 32768 : 16384;
- int hscale, vscale;
int max_scale, min_scale;
bool can_scale;
int ret;
-
- *src = drm_plane_state_src(&state->base);
- *dst = drm_plane_state_dest(&state->base);
+ uint32_t pixel_format = 0;
if (!fb) {
state->base.visible = false;
@@ -969,11 +961,14 @@ intel_check_sprite_plane(struct intel_plane *plane,
/* setup can_scale, min_scale, max_scale */
if (INTEL_GEN(dev_priv) >= 9) {
+ if (state->base.fb)
+ pixel_format = state->base.fb->format->format;
/* use scaler when colorkey is not required */
if (!state->ckey.flags) {
can_scale = 1;
min_scale = 1;
- max_scale = skl_max_scale(crtc, crtc_state);
+ max_scale =
+ skl_max_scale(crtc, crtc_state, pixel_format);
} else {
can_scale = 0;
min_scale = DRM_PLANE_HELPER_NO_SCALING;
@@ -985,64 +980,19 @@ intel_check_sprite_plane(struct intel_plane *plane,
min_scale = plane->can_scale ? 1 : (1 << 16);
}
- /*
- * FIXME the following code does a bunch of fuzzy adjustments to the
- * coordinates and sizes. We probably need some way to decide whether
- * more strict checking should be done instead.
- */
- drm_rect_rotate(src, fb->width << 16, fb->height << 16,
- state->base.rotation);
-
- hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
- BUG_ON(hscale < 0);
-
- vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
- BUG_ON(vscale < 0);
-
- if (crtc_state->base.enable)
- drm_mode_get_hv_timing(&crtc_state->base.mode,
- &clip.x2, &clip.y2);
-
- state->base.visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale);
-
- crtc_x = dst->x1;
- crtc_y = dst->y1;
- crtc_w = drm_rect_width(dst);
- crtc_h = drm_rect_height(dst);
+ ret = drm_atomic_helper_check_plane_state(&state->base,
+ &crtc_state->base,
+ min_scale, max_scale,
+ true, true);
+ if (ret)
+ return ret;
if (state->base.visible) {
- /* check again in case clipping clamped the results */
- hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
- if (hscale < 0) {
- DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
- drm_rect_debug_print("src: ", src, true);
- drm_rect_debug_print("dst: ", dst, false);
-
- return hscale;
- }
-
- vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
- if (vscale < 0) {
- DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
- drm_rect_debug_print("src: ", src, true);
- drm_rect_debug_print("dst: ", dst, false);
-
- return vscale;
- }
-
- /* Make the source viewport size an exact multiple of the scaling factors. */
- drm_rect_adjust_size(src,
- drm_rect_width(dst) * hscale - drm_rect_width(src),
- drm_rect_height(dst) * vscale - drm_rect_height(src));
-
- drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
- state->base.rotation);
-
- /* sanity check to make sure the src viewport wasn't enlarged */
- WARN_ON(src->x1 < (int) state->base.src_x ||
- src->y1 < (int) state->base.src_y ||
- src->x2 > (int) state->base.src_x + state->base.src_w ||
- src->y2 > (int) state->base.src_y + state->base.src_h);
+ struct drm_rect *src = &state->base.src;
+ struct drm_rect *dst = &state->base.dst;
+ unsigned int crtc_w = drm_rect_width(dst);
+ unsigned int crtc_h = drm_rect_height(dst);
+ uint32_t src_x, src_y, src_w, src_h;
/*
* Hardware doesn't handle subpixel coordinates.
@@ -1055,58 +1005,40 @@ intel_check_sprite_plane(struct intel_plane *plane,
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
- if (intel_format_is_yuv(fb->format->format)) {
- src_x &= ~1;
- src_w &= ~1;
-
- /*
- * Must keep src and dst the
- * same if we can't scale.
- */
- if (!can_scale)
- crtc_w &= ~1;
+ src->x1 = src_x << 16;
+ src->x2 = (src_x + src_w) << 16;
+ src->y1 = src_y << 16;
+ src->y2 = (src_y + src_h) << 16;
- if (crtc_w == 0)
- state->base.visible = false;
+ if (intel_format_is_yuv(fb->format->format) &&
+ fb->format->format != DRM_FORMAT_NV12 &&
+ (src_x % 2 || src_w % 2)) {
+ DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
+ src_x, src_w);
+ return -EINVAL;
}
- }
-
- /* Check size restrictions when scaling */
- if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) {
- unsigned int width_bytes;
- int cpp = fb->format->cpp[0];
- WARN_ON(!can_scale);
+ /* Check size restrictions when scaling */
+ if (src_w != crtc_w || src_h != crtc_h) {
+ unsigned int width_bytes;
+ int cpp = fb->format->cpp[0];
- /* FIXME interlacing min height is 6 */
+ WARN_ON(!can_scale);
- if (crtc_w < 3 || crtc_h < 3)
- state->base.visible = false;
+ width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
- if (src_w < 3 || src_h < 3)
- state->base.visible = false;
-
- width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
-
- if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 ||
- width_bytes > 4096 || fb->pitches[0] > 4096)) {
- DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
- return -EINVAL;
+ /* FIXME interlacing min height is 6 */
+ if (INTEL_GEN(dev_priv) < 9 && (
+ src_w < 3 || src_h < 3 ||
+ src_w > 2048 || src_h > 2048 ||
+ crtc_w < 3 || crtc_h < 3 ||
+ width_bytes > 4096 || fb->pitches[0] > 4096)) {
+ DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
+ return -EINVAL;
+ }
}
}
- if (state->base.visible) {
- src->x1 = src_x << 16;
- src->x2 = (src_x + src_w) << 16;
- src->y1 = src_y << 16;
- src->y2 = (src_y + src_h) << 16;
- }
-
- dst->x1 = crtc_x;
- dst->x2 = crtc_x + crtc_w;
- dst->y1 = crtc_y;
- dst->y2 = crtc_y + crtc_h;
-
if (INTEL_GEN(dev_priv) >= 9) {
ret = skl_check_plane_surface(crtc_state, state);
if (ret)
@@ -1248,6 +1180,19 @@ static uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
+static uint32_t skl_planar_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+};
+
static const uint64_t skl_plane_format_modifiers_noccs[] = {
I915_FORMAT_MOD_Yf_TILED,
I915_FORMAT_MOD_Y_TILED,
@@ -1342,6 +1287,7 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
if (modifier == I915_FORMAT_MOD_Yf_TILED)
return true;
/* fall through */
@@ -1441,8 +1387,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->disable_plane = skl_disable_plane;
intel_plane->get_hw_state = skl_plane_get_hw_state;
- plane_formats = skl_plane_formats;
- num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ if (skl_plane_has_planar(dev_priv, pipe,
+ PLANE_SPRITE0 + plane)) {
+ plane_formats = skl_planar_formats;
+ num_plane_formats = ARRAY_SIZE(skl_planar_formats);
+ } else {
+ plane_formats = skl_plane_formats;
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ }
if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
modifiers = skl_plane_format_modifiers_ccs;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 885fc3809f7f..b55b5c157e38 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
if (!tv_mode)
return false;
- pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
+ adjusted_mode->crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
/* TV has it's own notion of sync and other mode flags, so clear them. */
- pipe_config->base.adjusted_mode.flags = 0;
+ adjusted_mode->flags = 0;
/*
* FIXME: We don't check whether the input mode is actually what we want
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index e5bf0d37bf43..1cffaf7b5dbe 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -69,13 +69,15 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
{
- int guc_log_level = 0; /* disabled */
+ int guc_log_level;
- /* Enable if we're running on platform with GuC and debug config */
- if (HAS_GUC(dev_priv) && intel_uc_is_using_guc() &&
- (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
- IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)))
- guc_log_level = 1 + GUC_LOG_VERBOSITY_MAX;
+ if (!HAS_GUC(dev_priv) || !intel_uc_is_using_guc())
+ guc_log_level = GUC_LOG_LEVEL_DISABLED;
+ else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ guc_log_level = GUC_LOG_LEVEL_MAX;
+ else
+ guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE;
/* Any platform specific fine-tuning can be done here */
@@ -83,7 +85,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
}
/**
- * intel_uc_sanitize_options - sanitize uC related modparam options
+ * sanitize_options_early - sanitize uC related modparam options
* @dev_priv: device private
*
* In case of "enable_guc" option this function will attempt to modify
@@ -99,7 +101,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
* unless GuC is enabled on given platform and the driver is compiled with
* debug config when this modparam will default to "enable(1..4)".
*/
-void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
+static void sanitize_options_early(struct drm_i915_private *dev_priv)
{
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
@@ -142,51 +144,53 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
i915_modparams.guc_log_level = 0;
}
- if (i915_modparams.guc_log_level > 1 + GUC_LOG_VERBOSITY_MAX) {
+ if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"guc_log_level", i915_modparams.guc_log_level,
"verbosity too high");
- i915_modparams.guc_log_level = 1 + GUC_LOG_VERBOSITY_MAX;
+ i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX;
}
- DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s verbosity:%d)\n",
+ DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n",
i915_modparams.guc_log_level,
yesno(i915_modparams.guc_log_level),
- i915_modparams.guc_log_level - 1);
+ yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)),
+ GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level));
/* Make sure that sanitization was done */
GEM_BUG_ON(i915_modparams.enable_guc < 0);
GEM_BUG_ON(i915_modparams.guc_log_level < 0);
}
-void intel_uc_init_early(struct drm_i915_private *dev_priv)
+void intel_uc_init_early(struct drm_i915_private *i915)
{
- intel_guc_init_early(&dev_priv->guc);
- intel_huc_init_early(&dev_priv->huc);
-}
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
-void intel_uc_init_fw(struct drm_i915_private *dev_priv)
-{
- if (!USES_GUC(dev_priv))
- return;
+ intel_guc_init_early(guc);
+ intel_huc_init_early(huc);
- if (USES_HUC(dev_priv))
- intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+ sanitize_options_early(i915);
- intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
+ if (USES_GUC(i915))
+ intel_uc_fw_fetch(i915, &guc->fw);
+
+ if (USES_HUC(i915))
+ intel_uc_fw_fetch(i915, &huc->fw);
}
-void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
+void intel_uc_cleanup_early(struct drm_i915_private *i915)
{
- if (!USES_GUC(dev_priv))
- return;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
- intel_uc_fw_fini(&dev_priv->guc.fw);
+ if (USES_HUC(i915))
+ intel_uc_fw_fini(&huc->fw);
- if (USES_HUC(dev_priv))
- intel_uc_fw_fini(&dev_priv->huc.fw);
+ if (USES_GUC(i915))
+ intel_uc_fw_fini(&guc->fw);
- guc_free_load_err_log(&dev_priv->guc);
+ guc_free_load_err_log(guc);
}
/**
@@ -223,10 +227,13 @@ static int guc_enable_communication(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ gen9_enable_guc_interrupts(dev_priv);
+
if (HAS_GUC_CT(dev_priv))
- return intel_guc_enable_ct(guc);
+ return intel_guc_ct_enable(&guc->ct);
guc->send = intel_guc_send_mmio;
+ guc->handler = intel_guc_to_host_event_handler_mmio;
return 0;
}
@@ -235,9 +242,12 @@ static void guc_disable_communication(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc);
if (HAS_GUC_CT(dev_priv))
- intel_guc_disable_ct(guc);
+ intel_guc_ct_disable(&guc->ct);
+
+ gen9_disable_guc_interrupts(dev_priv);
guc->send = intel_guc_send_nop;
+ guc->handler = intel_guc_to_host_event_handler_nop;
}
int intel_uc_init_misc(struct drm_i915_private *dev_priv)
@@ -248,24 +258,13 @@ int intel_uc_init_misc(struct drm_i915_private *dev_priv)
if (!USES_GUC(dev_priv))
return 0;
- ret = intel_guc_init_wq(guc);
- if (ret) {
- DRM_ERROR("Couldn't allocate workqueues for GuC\n");
- goto err;
- }
+ intel_guc_init_ggtt_pin_bias(guc);
- ret = intel_guc_log_relay_create(guc);
- if (ret) {
- DRM_ERROR("Couldn't allocate relay for GuC log\n");
- goto err_relay;
- }
+ ret = intel_guc_init_wq(guc);
+ if (ret)
+ return ret;
return 0;
-
-err_relay:
- intel_guc_fini_wq(guc);
-err:
- return ret;
}
void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
@@ -276,8 +275,6 @@ void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
return;
intel_guc_fini_wq(guc);
-
- intel_guc_log_relay_destroy(guc);
}
int intel_uc_init(struct drm_i915_private *dev_priv)
@@ -325,6 +322,24 @@ void intel_uc_fini(struct drm_i915_private *dev_priv)
intel_guc_fini(guc);
}
+void intel_uc_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
+
+ if (!USES_GUC(i915))
+ return;
+
+ GEM_BUG_ON(!HAS_GUC(i915));
+
+ guc_disable_communication(guc);
+
+ intel_huc_sanitize(huc);
+ intel_guc_sanitize(guc);
+
+ __intel_uc_reset_hw(i915);
+}
+
int intel_uc_init_hw(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
@@ -336,14 +351,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!HAS_GUC(dev_priv));
- guc_disable_communication(guc);
gen9_reset_guc_interrupts(dev_priv);
- /* init WOPCM */
- I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
- I915_WRITE(DMA_GUC_WOPCM_OFFSET,
- GUC_WOPCM_OFFSET_VALUE | HUC_LOADING_AGENT_GUC);
-
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
if (IS_GEN9(dev_priv))
@@ -390,12 +399,9 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
}
if (USES_GUC_SUBMISSION(dev_priv)) {
- if (i915_modparams.guc_log_level)
- gen9_enable_guc_interrupts(dev_priv);
-
ret = intel_guc_submission_enable(guc);
if (ret)
- goto err_interrupts;
+ goto err_communication;
}
dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
@@ -410,8 +416,6 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
/*
* We've failed to load the firmware :(
*/
-err_interrupts:
- gen9_disable_guc_interrupts(dev_priv);
err_communication:
guc_disable_communication(guc);
err_log_capture:
@@ -441,9 +445,6 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
intel_guc_submission_disable(guc);
guc_disable_communication(guc);
-
- if (USES_GUC_SUBMISSION(dev_priv))
- gen9_disable_guc_interrupts(dev_priv);
}
int intel_uc_suspend(struct drm_i915_private *i915)
@@ -479,8 +480,7 @@ int intel_uc_resume(struct drm_i915_private *i915)
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
- if (i915_modparams.guc_log_level)
- gen9_enable_guc_interrupts(i915);
+ gen9_enable_guc_interrupts(i915);
err = intel_guc_resume(guc);
if (err) {
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index f76d51d1ce70..25d73ada74ae 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -28,13 +28,12 @@
#include "intel_huc.h"
#include "i915_params.h"
-void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv);
+void intel_uc_cleanup_early(struct drm_i915_private *dev_priv);
void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
-void intel_uc_init_fw(struct drm_i915_private *dev_priv);
-void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
int intel_uc_init_misc(struct drm_i915_private *dev_priv);
void intel_uc_fini_misc(struct drm_i915_private *dev_priv);
+void intel_uc_sanitize(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_uc_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index 3ec0ce505b76..6e8e0b546743 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -95,15 +95,6 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
- /* Header and uCode will be loaded to WOPCM */
- size = uc_fw->header_size + uc_fw->ucode_size;
- if (size > intel_guc_wopcm_size(dev_priv)) {
- DRM_WARN("%s: Firmware is too large to fit in WOPCM\n",
- intel_uc_fw_type_repr(uc_fw->type));
- err = -E2BIG;
- goto fail;
- }
-
/* now RSA */
if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) {
DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
@@ -209,6 +200,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
struct i915_vma *vma))
{
struct i915_vma *vma;
+ u32 ggtt_pin_bias;
int err;
DRM_DEBUG_DRIVER("%s fw load %s\n",
@@ -230,8 +222,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
goto fail;
}
+ ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->guc.ggtt_pin_bias;
vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ PIN_OFFSET_BIAS | ggtt_pin_bias);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index d5fd4609c785..87910aa83267 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -30,7 +30,7 @@ struct drm_i915_private;
struct i915_vma;
/* Home of GuC, HuC and DMC firmwares */
-#define INTEL_UC_FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
+#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_FAIL = -1,
@@ -115,6 +115,28 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
return uc_fw->path != NULL;
}
+static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
+{
+ if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS)
+ uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+}
+
+/**
+ * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded.
+ * @uc_fw: uC firmware.
+ *
+ * Get the size of the firmware and header that will be uploaded to WOPCM.
+ *
+ * Return: Upload firmware size, or zero on firmware fetch failure.
+ */
+static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
+{
+ if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return 0;
+
+ return uc_fw->header_size + uc_fw->ucode_size;
+}
+
void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4df7c2ef8576..448293eb638d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -62,6 +62,11 @@ static inline void
fw_domain_reset(struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
{
+ /*
+ * We don't really know if the powerwell for the forcewake domain we are
+ * trying to reset here does exist at this point (engines could be fused
+ * off in ICL+), so no waiting for acks
+ */
__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
}
@@ -134,7 +139,9 @@ fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
* in the hope that the original ack will be delivered along with
* the fallback ack.
*
- * This workaround is described in HSDES #1604254524
+ * This workaround is described in HSDES #1604254524 and it's known as:
+ * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
+ * although the name is a bit misleading.
*/
pass = 1;
@@ -1353,6 +1360,23 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
fw_domain_reset(dev_priv, d);
}
+static void fw_domain_fini(struct drm_i915_private *dev_priv,
+ enum forcewake_domain_id domain_id)
+{
+ struct intel_uncore_forcewake_domain *d;
+
+ if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
+ return;
+
+ d = &dev_priv->uncore.fw_domain[domain_id];
+
+ WARN_ON(d->wake_count);
+ WARN_ON(hrtimer_cancel(&d->timer));
+ memset(d, 0, sizeof(*d));
+
+ dev_priv->uncore.fw_domains &= ~BIT(domain_id);
+}
+
static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
@@ -1372,7 +1396,8 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 11) {
int i;
- dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_fallback;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
@@ -1565,6 +1590,40 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
&dev_priv->uncore.pmic_bus_access_nb);
}
+/*
+ * We might have detected that some engines are fused off after we initialized
+ * the forcewake domains. Prune them, to make sure they only reference existing
+ * engines.
+ */
+void intel_uncore_prune(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 11) {
+ enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
+ enum forcewake_domain_id domain_id;
+ int i;
+
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
+
+ if (HAS_ENGINE(dev_priv, _VCS(i)))
+ continue;
+
+ if (fw_domains & BIT(domain_id))
+ fw_domain_fini(dev_priv, domain_id);
+ }
+
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
+
+ if (HAS_ENGINE(dev_priv, _VECS(i)))
+ continue;
+
+ if (fw_domains & BIT(domain_id))
+ fw_domain_fini(dev_priv, domain_id);
+ }
+ }
+}
+
void intel_uncore_fini(struct drm_i915_private *dev_priv)
{
/* Paranoia: make sure we have disabled everything before we exit. */
@@ -1646,11 +1705,10 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
const i915_reg_t mode = RING_MI_MODE(base);
I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register_fw(dev_priv,
- mode,
- MODE_IDLE,
- MODE_IDLE,
- 500))
+ if (__intel_wait_for_register_fw(dev_priv,
+ mode, MODE_IDLE, MODE_IDLE,
+ 500, 0,
+ NULL))
DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
engine->name);
@@ -1804,9 +1862,10 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
/* Wait for the device to ack the reset requests */
- err = intel_wait_for_register_fw(dev_priv,
- GEN6_GDRST, hw_domain_mask, 0,
- 500);
+ err = __intel_wait_for_register_fw(dev_priv,
+ GEN6_GDRST, hw_domain_mask, 0,
+ 500, 0,
+ NULL);
if (err)
DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
hw_domain_mask);
@@ -1854,6 +1913,50 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
}
/**
+ * gen11_reset_engines - reset individual engines
+ * @dev_priv: i915 device
+ * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
+ *
+ * This function will reset the individual engines that are set in engine_mask.
+ * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
+ *
+ * Note: It is responsibility of the caller to handle the difference between
+ * asking full domain reset versus reset for all available individual engines.
+ *
+ * Returns 0 on success, nonzero on error.
+ */
+static int gen11_reset_engines(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
+{
+ struct intel_engine_cs *engine;
+ const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+ [RCS] = GEN11_GRDOM_RENDER,
+ [BCS] = GEN11_GRDOM_BLT,
+ [VCS] = GEN11_GRDOM_MEDIA,
+ [VCS2] = GEN11_GRDOM_MEDIA2,
+ [VCS3] = GEN11_GRDOM_MEDIA3,
+ [VCS4] = GEN11_GRDOM_MEDIA4,
+ [VECS] = GEN11_GRDOM_VECS,
+ [VECS2] = GEN11_GRDOM_VECS2,
+ };
+ u32 hw_mask;
+
+ BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
+
+ if (engine_mask == ALL_ENGINES) {
+ hw_mask = GEN11_GRDOM_FULL;
+ } else {
+ unsigned int tmp;
+
+ hw_mask = 0;
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
+ hw_mask |= hw_engine_mask[engine->id];
+ }
+
+ return gen6_hw_domain_reset(dev_priv, hw_mask);
+}
+
+/**
* __intel_wait_for_register_fw - wait until register matches expected state
* @dev_priv: the i915 device
* @reg: the register to read
@@ -1940,7 +2043,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
u32 reg_value;
int ret;
- might_sleep();
+ might_sleep_if(slow_timeout_ms);
spin_lock_irq(&dev_priv->uncore.lock);
intel_uncore_forcewake_get__locked(dev_priv, fw);
@@ -1952,7 +2055,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
intel_uncore_forcewake_put__locked(dev_priv, fw);
spin_unlock_irq(&dev_priv->uncore.lock);
- if (ret)
+ if (ret && slow_timeout_ms)
ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
(reg_value & mask) == value,
slow_timeout_ms * 1000, 10, 1000);
@@ -1971,11 +2074,12 @@ static int gen8_reset_engine_start(struct intel_engine_cs *engine)
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
- ret = intel_wait_for_register_fw(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700);
+ ret = __intel_wait_for_register_fw(dev_priv,
+ RING_RESET_CTL(engine->mmio_base),
+ RESET_CTL_READY_TO_RESET,
+ RESET_CTL_READY_TO_RESET,
+ 700, 0,
+ NULL);
if (ret)
DRM_ERROR("%s: reset request timeout\n", engine->name);
@@ -2000,7 +2104,10 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
if (gen8_reset_engine_start(engine))
goto not_ready;
- return gen6_reset_engines(dev_priv, engine_mask);
+ if (INTEL_GEN(dev_priv) >= 11)
+ return gen11_reset_engines(dev_priv, engine_mask);
+ else
+ return gen6_reset_engines(dev_priv, engine_mask);
not_ready:
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
@@ -2038,15 +2145,31 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
int retry;
int ret;
- might_sleep();
+ /*
+ * We want to perform per-engine reset from atomic context (e.g.
+ * softirq), which imposes the constraint that we cannot sleep.
+ * However, experience suggests that spending a bit of time waiting
+ * for a reset helps in various cases, so for a full-device reset
+ * we apply the opposite rule and wait if we want to. As we should
+ * always follow up a failed per-engine reset with a full device reset,
+ * being a little faster, stricter and more error prone for the
+ * atomic case seems an acceptable compromise.
+ *
+ * Unfortunately this leads to a bimodal routine, when the goal was
+ * to have a single reset function that worked for resetting any
+ * number of engines simultaneously.
+ */
+ might_sleep_if(engine_mask == ALL_ENGINES);
- /* If the power well sleeps during the reset, the reset
+ /*
+ * If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
for (retry = 0; retry < 3; retry++) {
- /* We stop engines, otherwise we might get failed reset and a
+ /*
+ * We stop engines, otherwise we might get failed reset and a
* dead gpu (on elk). Also as modern gpu as kbl can suffer
* from system hang if batchbuffer is progressing when
* the reset is issued, regardless of READY_TO_RESET ack.
@@ -2060,9 +2183,11 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
i915_stop_engines(dev_priv, engine_mask);
ret = -ENODEV;
- if (reset)
+ if (reset) {
+ GEM_TRACE("engine_mask=%x\n", engine_mask);
ret = reset(dev_priv, engine_mask);
- if (ret != -ETIMEDOUT)
+ }
+ if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
break;
cond_resched();
@@ -2085,12 +2210,14 @@ bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
int intel_reset_guc(struct drm_i915_private *dev_priv)
{
+ u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC :
+ GEN9_GRDOM_GUC;
int ret;
GEM_BUG_ON(!HAS_GUC(dev_priv));
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
+ ret = gen6_hw_domain_reset(dev_priv, guc_domain);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index dfdf444e4bcc..47478d609630 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -140,6 +140,7 @@ struct intel_uncore {
void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
void intel_uncore_init(struct drm_i915_private *dev_priv);
+void intel_uncore_prune(struct drm_i915_private *dev_priv);
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
void intel_uncore_fini(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
new file mode 100644
index 000000000000..74bf76f3fddc
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -0,0 +1,275 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#include "intel_wopcm.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: WOPCM Layout
+ *
+ * The layout of the WOPCM will be fixed after writing to GuC WOPCM size and
+ * offset registers whose values are calculated and determined by HuC/GuC
+ * firmware size and set of hardware requirements/restrictions as shown below:
+ *
+ * ::
+ *
+ * +=========> +====================+ <== WOPCM Top
+ * ^ | HW contexts RSVD |
+ * | +===> +====================+ <== GuC WOPCM Top
+ * | ^ | |
+ * | | | |
+ * | | | |
+ * | GuC | |
+ * | WOPCM | |
+ * | Size +--------------------+
+ * WOPCM | | GuC FW RSVD |
+ * | | +--------------------+
+ * | | | GuC Stack RSVD |
+ * | | +------------------- +
+ * | v | GuC WOPCM RSVD |
+ * | +===> +====================+ <== GuC WOPCM base
+ * | | WOPCM RSVD |
+ * | +------------------- + <== HuC Firmware Top
+ * v | HuC FW |
+ * +=========> +====================+ <== WOPCM Base
+ *
+ * GuC accessible WOPCM starts at GuC WOPCM base and ends at GuC WOPCM top.
+ * The top part of the WOPCM is reserved for hardware contexts (e.g. RC6
+ * context).
+ */
+
+/* Default WOPCM size 1MB. */
+#define GEN9_WOPCM_SIZE (1024 * 1024)
+/* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */
+#define WOPCM_RESERVED_SIZE (16 * 1024)
+
+/* 16KB reserved at the beginning of GuC WOPCM. */
+#define GUC_WOPCM_RESERVED (16 * 1024)
+/* 8KB from GUC_WOPCM_RESERVED is reserved for GuC stack. */
+#define GUC_WOPCM_STACK_RESERVED (8 * 1024)
+
+/* GuC WOPCM Offset value needs to be aligned to 16KB. */
+#define GUC_WOPCM_OFFSET_ALIGNMENT (1UL << GUC_WOPCM_OFFSET_SHIFT)
+
+/* 24KB at the end of WOPCM is reserved for RC6 CTX on BXT. */
+#define BXT_WOPCM_RC6_CTX_RESERVED (24 * 1024)
+/* 36KB WOPCM reserved at the end of WOPCM on CNL. */
+#define CNL_WOPCM_HW_CTX_RESERVED (36 * 1024)
+
+/* 128KB from GUC_WOPCM_RESERVED is reserved for FW on Gen9. */
+#define GEN9_GUC_FW_RESERVED (128 * 1024)
+#define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED)
+
+/**
+ * intel_wopcm_init_early() - Early initialization of the WOPCM.
+ * @wopcm: pointer to intel_wopcm.
+ *
+ * Setup the size of WOPCM which will be used by later on WOPCM partitioning.
+ */
+void intel_wopcm_init_early(struct intel_wopcm *wopcm)
+{
+ wopcm->size = GEN9_WOPCM_SIZE;
+
+ DRM_DEBUG_DRIVER("WOPCM size: %uKiB\n", wopcm->size / 1024);
+}
+
+static inline u32 context_reserved_size(struct drm_i915_private *i915)
+{
+ if (IS_GEN9_LP(i915))
+ return BXT_WOPCM_RC6_CTX_RESERVED;
+ else if (INTEL_GEN(i915) >= 10)
+ return CNL_WOPCM_HW_CTX_RESERVED;
+ else
+ return 0;
+}
+
+static inline int gen9_check_dword_gap(u32 guc_wopcm_base, u32 guc_wopcm_size)
+{
+ u32 offset;
+
+ /*
+ * GuC WOPCM size shall be at least a dword larger than the offset from
+ * WOPCM base (GuC WOPCM offset from WOPCM base + GEN9_GUC_WOPCM_OFFSET)
+ * due to hardware limitation on Gen9.
+ */
+ offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET;
+ if (offset > guc_wopcm_size ||
+ (guc_wopcm_size - offset) < sizeof(u32)) {
+ DRM_ERROR("GuC WOPCM size %uKiB is too small. %uKiB needed.\n",
+ guc_wopcm_size / 1024,
+ (u32)(offset + sizeof(u32)) / 1024);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static inline int gen9_check_huc_fw_fits(u32 guc_wopcm_size, u32 huc_fw_size)
+{
+ /*
+ * On Gen9 & CNL A0, hardware requires the total available GuC WOPCM
+ * size to be larger than or equal to HuC firmware size. Otherwise,
+ * firmware uploading would fail.
+ */
+ if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) {
+ DRM_ERROR("HuC FW (%uKiB) won't fit in GuC WOPCM (%uKiB).\n",
+ huc_fw_size / 1024,
+ (guc_wopcm_size - GUC_WOPCM_RESERVED) / 1024);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static inline int check_hw_restriction(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 huc_fw_size)
+{
+ int err = 0;
+
+ if (IS_GEN9(i915))
+ err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
+
+ if (!err &&
+ (IS_GEN9(i915) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
+ err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size);
+
+ return err;
+}
+
+/**
+ * intel_wopcm_init() - Initialize the WOPCM structure.
+ * @wopcm: pointer to intel_wopcm.
+ *
+ * This function will partition WOPCM space based on GuC and HuC firmware sizes
+ * and will allocate max remaining for use by GuC. This function will also
+ * enforce platform dependent hardware restrictions on GuC WOPCM offset and
+ * size. It will fail the WOPCM init if any of these checks were failed, so that
+ * the following GuC firmware uploading would be aborted.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_wopcm_init(struct intel_wopcm *wopcm)
+{
+ struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
+ u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->guc.fw);
+ u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->huc.fw);
+ u32 ctx_rsvd = context_reserved_size(i915);
+ u32 guc_wopcm_base;
+ u32 guc_wopcm_size;
+ u32 guc_wopcm_rsvd;
+ int err;
+
+ GEM_BUG_ON(!wopcm->size);
+
+ if (guc_fw_size >= wopcm->size) {
+ DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.",
+ guc_fw_size / 1024);
+ return -E2BIG;
+ }
+
+ if (huc_fw_size >= wopcm->size) {
+ DRM_ERROR("HuC FW (%uKiB) is too big to fit in WOPCM.",
+ huc_fw_size / 1024);
+ return -E2BIG;
+ }
+
+ guc_wopcm_base = ALIGN(huc_fw_size + WOPCM_RESERVED_SIZE,
+ GUC_WOPCM_OFFSET_ALIGNMENT);
+ if ((guc_wopcm_base + ctx_rsvd) >= wopcm->size) {
+ DRM_ERROR("GuC WOPCM base (%uKiB) is too big.\n",
+ guc_wopcm_base / 1024);
+ return -E2BIG;
+ }
+
+ guc_wopcm_size = wopcm->size - guc_wopcm_base - ctx_rsvd;
+ guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
+
+ DRM_DEBUG_DRIVER("Calculated GuC WOPCM Region: [%uKiB, %uKiB)\n",
+ guc_wopcm_base / 1024, guc_wopcm_size / 1024);
+
+ guc_wopcm_rsvd = GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
+ if ((guc_fw_size + guc_wopcm_rsvd) > guc_wopcm_size) {
+ DRM_ERROR("Need %uKiB WOPCM for GuC, %uKiB available.\n",
+ (guc_fw_size + guc_wopcm_rsvd) / 1024,
+ guc_wopcm_size / 1024);
+ return -E2BIG;
+ }
+
+ err = check_hw_restriction(i915, guc_wopcm_base, guc_wopcm_size,
+ huc_fw_size);
+ if (err)
+ return err;
+
+ wopcm->guc.base = guc_wopcm_base;
+ wopcm->guc.size = guc_wopcm_size;
+
+ return 0;
+}
+
+static inline int write_and_verify(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, u32 val, u32 mask,
+ u32 locked_bit)
+{
+ u32 reg_val;
+
+ GEM_BUG_ON(val & ~mask);
+
+ I915_WRITE(reg, val);
+
+ reg_val = I915_READ(reg);
+
+ return (reg_val & mask) != (val | locked_bit) ? -EIO : 0;
+}
+
+/**
+ * intel_wopcm_init_hw() - Setup GuC WOPCM registers.
+ * @wopcm: pointer to intel_wopcm.
+ *
+ * Setup the GuC WOPCM size and offset registers with the calculated values. It
+ * will verify the register values to make sure the registers are locked with
+ * correct values.
+ *
+ * Return: 0 on success. -EIO if registers were locked with incorrect values.
+ */
+int intel_wopcm_init_hw(struct intel_wopcm *wopcm)
+{
+ struct drm_i915_private *dev_priv = wopcm_to_i915(wopcm);
+ u32 huc_agent;
+ u32 mask;
+ int err;
+
+ if (!USES_GUC(dev_priv))
+ return 0;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!wopcm->guc.size);
+ GEM_BUG_ON(!wopcm->guc.base);
+
+ err = write_and_verify(dev_priv, GUC_WOPCM_SIZE, wopcm->guc.size,
+ GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED,
+ GUC_WOPCM_SIZE_LOCKED);
+ if (err)
+ goto err_out;
+
+ huc_agent = USES_HUC(dev_priv) ? HUC_LOADING_AGENT_GUC : 0;
+ mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
+ err = write_and_verify(dev_priv, DMA_GUC_WOPCM_OFFSET,
+ wopcm->guc.base | huc_agent, mask,
+ GUC_WOPCM_OFFSET_VALID);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ DRM_ERROR("Failed to init WOPCM registers:\n");
+ DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n",
+ I915_READ(DMA_GUC_WOPCM_OFFSET));
+ DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", I915_READ(GUC_WOPCM_SIZE));
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h
new file mode 100644
index 000000000000..6298910a384c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_wopcm.h
@@ -0,0 +1,31 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#ifndef _INTEL_WOPCM_H_
+#define _INTEL_WOPCM_H_
+
+#include <linux/types.h>
+
+/**
+ * struct intel_wopcm - Overall WOPCM info and WOPCM regions.
+ * @size: Size of overall WOPCM.
+ * @guc: GuC WOPCM Region info.
+ * @guc.base: GuC WOPCM base which is offset from WOPCM base.
+ * @guc.size: Size of the GuC WOPCM region.
+ */
+struct intel_wopcm {
+ u32 size;
+ struct {
+ u32 base;
+ u32 size;
+ } guc;
+};
+
+void intel_wopcm_init_early(struct intel_wopcm *wopcm);
+int intel_wopcm_init(struct intel_wopcm *wopcm);
+int intel_wopcm_init_hw(struct intel_wopcm *wopcm);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
new file mode 100644
index 000000000000..2df3538ceba5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -0,0 +1,949 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_workarounds.h"
+
+/**
+ * DOC: Hardware workarounds
+ *
+ * This file is intended as a central place to implement most [1]_ of the
+ * required workarounds for hardware to work as originally intended. They fall
+ * in five basic categories depending on how/when they are applied:
+ *
+ * - Workarounds that touch registers that are saved/restored to/from the HW
+ * context image. The list is emitted (via Load Register Immediate commands)
+ * everytime a new context is created.
+ * - GT workarounds. The list of these WAs is applied whenever these registers
+ * revert to default values (on GPU reset, suspend/resume [2]_, etc..).
+ * - Display workarounds. The list is applied during display clock-gating
+ * initialization.
+ * - Workarounds that whitelist a privileged register, so that UMDs can manage
+ * them directly. This is just a special case of a MMMIO workaround (as we
+ * write the list of these to/be-whitelisted registers to some special HW
+ * registers).
+ * - Workaround batchbuffers, that get executed automatically by the hardware
+ * on every HW context restore.
+ *
+ * .. [1] Please notice that there are other WAs that, due to their nature,
+ * cannot be applied from a central place. Those are peppered around the rest
+ * of the code, as needed.
+ *
+ * .. [2] Technically, some registers are powercontext saved & restored, so they
+ * survive a suspend/resume. In practice, writing them again is not too
+ * costly and simplifies things. We can revisit this in the future.
+ *
+ * Layout
+ * ''''''
+ *
+ * Keep things in this file ordered by WA type, as per the above (context, GT,
+ * display, register whitelist, batchbuffer). Then, inside each type, keep the
+ * following order:
+ *
+ * - Infrastructure functions and macros
+ * - WAs per platform in standard gen/chrono order
+ * - Public functions to init or apply the given workaround type.
+ */
+
+static int wa_add(struct drm_i915_private *dev_priv,
+ i915_reg_t addr,
+ const u32 mask, const u32 val)
+{
+ const unsigned int idx = dev_priv->workarounds.count;
+
+ if (WARN_ON(idx >= I915_MAX_WA_REGS))
+ return -ENOSPC;
+
+ dev_priv->workarounds.reg[idx].addr = addr;
+ dev_priv->workarounds.reg[idx].value = val;
+ dev_priv->workarounds.reg[idx].mask = mask;
+
+ dev_priv->workarounds.count++;
+
+ return 0;
+}
+
+#define WA_REG(addr, mask, val) do { \
+ const int r = wa_add(dev_priv, (addr), (mask), (val)); \
+ if (r) \
+ return r; \
+ } while (0)
+
+#define WA_SET_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+
+#define WA_CLR_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+
+#define WA_SET_FIELD_MASKED(addr, mask, value) \
+ WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
+
+static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisableAsyncFlipPerfMode:bdw,chv */
+ WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
+ /* WaDisablePartialInstShootdown:bdw,chv */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* Use Force Non-Coherent whenever executing a 3D context. This is a
+ * workaround for for a possible hang in the unlikely event a TLB
+ * invalidation occurs during a PSD flush.
+ */
+ /* WaForceEnableNonCoherent:bdw,chv */
+ /* WaHdcDisableFetchWhenMasked:bdw,chv */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ HDC_FORCE_NON_COHERENT);
+
+ /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
+ * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
+ * polygons in the same 8x4 pixel/sample area to be processed without
+ * stalling waiting for the earlier ones to write to Hierarchical Z
+ * buffer."
+ *
+ * This optimization is off by default for BDW and CHV; turn it on.
+ */
+ WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* Wa4x4STCOptimizationDisable:bdw,chv */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
+
+ return 0;
+}
+
+static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ ret = gen8_ctx_workarounds_init(dev_priv);
+ if (ret)
+ return ret;
+
+ /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+ /* WaDisableDopClockGating:bdw
+ *
+ * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
+ * to disable EUTC clock gating.
+ */
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
+
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+ (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+
+ return 0;
+}
+
+static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ ret = gen8_ctx_workarounds_init(dev_priv);
+ if (ret)
+ return ret;
+
+ /* WaDisableThreadStallDopClockGating:chv */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+ /* Improve HiZ throughput on CHV. */
+ WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+
+ return 0;
+}
+
+static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ if (HAS_LLC(dev_priv)) {
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
+ *
+ * Must match Display Engine. See
+ * WaCompressedResourceDisplayNewHashMode.
+ */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN9_PBE_COMPRESSED_HASH_SELECTION);
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
+ }
+
+ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ FLOW_CONTROL_ENABLE |
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
+ if (!IS_COFFEELAKE(dev_priv))
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
+ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX |
+ GEN9_ENABLE_GPGPU_PREEMPTION);
+
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
+ /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
+ WA_SET_BIT_MASKED(CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+ GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+ /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+ * both tied to WaForceContextSaveRestoreNonCoherent
+ * in some hsds for skl. We keep the tie for all gen9. The
+ * documentation is a bit hazy and so we want to get common behaviour,
+ * even though there is no clear evidence we would need both on kbl/bxt.
+ * This area has been source of system hangs so we play it safe
+ * and mimic the skl regardless of what bspec says.
+ *
+ * Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+
+ /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+
+ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv))
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+
+ /*
+ * Supporting preemption with fine-granularity requires changes in the
+ * batch buffer programming. Since we can't break old userspace, we
+ * need to set our default preemption level to safe value. Userspace is
+ * still able to use more fine-grained preemption levels, since in
+ * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
+ * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
+ * not real HW workarounds, but merely a way to start using preemption
+ * while maintaining old contract with userspace.
+ */
+
+ /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
+ WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+ /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
+ /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
+ if (IS_GEN9_LP(dev_priv))
+ WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+
+ return 0;
+}
+
+static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
+{
+ u8 vals[3] = { 0, 0, 0 };
+ unsigned int i;
+
+ for (i = 0; i < 3; i++) {
+ u8 ss;
+
+ /*
+ * Only consider slices where one, and only one, subslice has 7
+ * EUs
+ */
+ if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+ continue;
+
+ /*
+ * subslice_7eu[i] != 0 (because of the check above) and
+ * ss_max == 4 (maximum number of subslices possible per slice)
+ *
+ * -> 0 <= ss <= 3;
+ */
+ ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+ vals[i] = 3 - ss;
+ }
+
+ if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+ return 0;
+
+ /* Tune IZ hashing. See intel_device_info_runtime_init() */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN9_IZ_HASHING_MASK(2) |
+ GEN9_IZ_HASHING_MASK(1) |
+ GEN9_IZ_HASHING_MASK(0),
+ GEN9_IZ_HASHING(2, vals[2]) |
+ GEN9_IZ_HASHING(1, vals[1]) |
+ GEN9_IZ_HASHING(0, vals[0]));
+
+ return 0;
+}
+
+static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ ret = gen9_ctx_workarounds_init(dev_priv);
+ if (ret)
+ return ret;
+
+ return skl_tune_iz_hashing(dev_priv);
+}
+
+static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ ret = gen9_ctx_workarounds_init(dev_priv);
+ if (ret)
+ return ret;
+
+ /* WaDisableThreadStallDopClockGating:bxt */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ STALL_DOP_GATING_DISABLE);
+
+ /* WaToEnableHwFixForPushConstHWBug:bxt */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ return 0;
+}
+
+static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ ret = gen9_ctx_workarounds_init(dev_priv);
+ if (ret)
+ return ret;
+
+ /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE);
+
+ /* WaToEnableHwFixForPushConstHWBug:kbl */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableSbeCacheDispatchPortSharing:kbl */
+ WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+ return 0;
+}
+
+static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ ret = gen9_ctx_workarounds_init(dev_priv);
+ if (ret)
+ return ret;
+
+ /* WaToEnableHwFixForPushConstHWBug:glk */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ return 0;
+}
+
+static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ ret = gen9_ctx_workarounds_init(dev_priv);
+ if (ret)
+ return ret;
+
+ /* WaToEnableHwFixForPushConstHWBug:cfl */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableSbeCacheDispatchPortSharing:cfl */
+ WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+ return 0;
+}
+
+static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ /* WaForceContextSaveRestoreNonCoherent:cnl */
+ WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+
+ /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
+
+ /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
+ if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
+
+ /* WaPushConstantDereferenceHoldDisable:cnl */
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
+
+ /* FtrEnableFastAnisoL1BankingFix:cnl */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
+
+ /* WaDisable3DMidCmdPreemption:cnl */
+ WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+ /* WaDisableGPGPUMidCmdPreemption:cnl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
+ /* WaDisableEarlyEOT:cnl */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+
+ return 0;
+}
+
+static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ /* Wa_1604370585:icl (pre-prod)
+ * Formerly known as WaPushConstantDereferenceHoldDisable
+ */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ PUSH_CONSTANT_DEREF_DISABLE);
+
+ /* WaForceEnableNonCoherent:icl
+ * This is not the same workaround as in early Gen9 platforms, where
+ * lacking this could cause system hangs, but coherency performance
+ * overhead is high and only a few compute workloads really need it
+ * (the register is whitelisted in hardware now, so UMDs can opt in
+ * for coherency if they have a good reason).
+ */
+ WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+
+ return 0;
+}
+
+int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+ int err = 0;
+
+ dev_priv->workarounds.count = 0;
+
+ if (INTEL_GEN(dev_priv) < 8)
+ err = 0;
+ else if (IS_BROADWELL(dev_priv))
+ err = bdw_ctx_workarounds_init(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ err = chv_ctx_workarounds_init(dev_priv);
+ else if (IS_SKYLAKE(dev_priv))
+ err = skl_ctx_workarounds_init(dev_priv);
+ else if (IS_BROXTON(dev_priv))
+ err = bxt_ctx_workarounds_init(dev_priv);
+ else if (IS_KABYLAKE(dev_priv))
+ err = kbl_ctx_workarounds_init(dev_priv);
+ else if (IS_GEMINILAKE(dev_priv))
+ err = glk_ctx_workarounds_init(dev_priv);
+ else if (IS_COFFEELAKE(dev_priv))
+ err = cfl_ctx_workarounds_init(dev_priv);
+ else if (IS_CANNONLAKE(dev_priv))
+ err = cnl_ctx_workarounds_init(dev_priv);
+ else if (IS_ICELAKE(dev_priv))
+ err = icl_ctx_workarounds_init(dev_priv);
+ else
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ if (err)
+ return err;
+
+ DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
+ dev_priv->workarounds.count);
+ return 0;
+}
+
+int intel_ctx_workarounds_emit(struct i915_request *rq)
+{
+ struct i915_workarounds *w = &rq->i915->workarounds;
+ u32 *cs;
+ int ret, i;
+
+ if (w->count == 0)
+ return 0;
+
+ ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ cs = intel_ring_begin(rq, (w->count * 2 + 2));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(w->count);
+ for (i = 0; i < w->count; i++) {
+ *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+ *cs++ = w->reg[i].value;
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+}
+
+static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+}
+
+static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+ /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
+ _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+ I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaDisableKillLogic:bxt,skl,kbl */
+ if (!IS_COFFEELAKE(dev_priv))
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ ECOCHK_DIS_TLB);
+
+ if (HAS_LLC(dev_priv)) {
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
+ *
+ * Must match Display Engine. See
+ * WaCompressedResourceDisplayNewHashMode.
+ */
+ I915_WRITE(MMCD_MISC_CTRL,
+ I915_READ(MMCD_MISC_CTRL) |
+ MMCD_PCLA |
+ MMCD_HOTSPOT_EN);
+ }
+
+ /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(dev_priv)) {
+ u32 val = I915_READ(GEN8_L3SQCREG1);
+
+ val &= ~L3_PRIO_CREDITS_MASK;
+ val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
+ I915_WRITE(GEN8_L3SQCREG1, val);
+ }
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+ I915_WRITE(GEN8_L3SQCREG4,
+ I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+}
+
+static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+ gen9_gt_workarounds_apply(dev_priv);
+
+ /* WaEnableGapsTsvCreditFix:skl */
+ I915_WRITE(GEN8_GARBCNTL,
+ I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+
+ /* WaDisableGafsUnitClkGating:skl */
+ I915_WRITE(GEN7_UCGCTL4,
+ I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaInPlaceDecompressionHang:skl */
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+ gen9_gt_workarounds_apply(dev_priv);
+
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ I915_WRITE(FF_SLICE_CS_CHICKEN2,
+ _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+
+ /* WaInPlaceDecompressionHang:bxt */
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+ gen9_gt_workarounds_apply(dev_priv);
+
+ /* WaEnableGapsTsvCreditFix:kbl */
+ I915_WRITE(GEN8_GARBCNTL,
+ I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+
+ /* WaDisableDynamicCreditSharing:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GAMT_CHKN_BIT_REG,
+ I915_READ(GAMT_CHKN_BIT_REG) |
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+ /* WaDisableGafsUnitClkGating:kbl */
+ I915_WRITE(GEN7_UCGCTL4,
+ I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaInPlaceDecompressionHang:kbl */
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+ gen9_gt_workarounds_apply(dev_priv);
+}
+
+static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+ gen9_gt_workarounds_apply(dev_priv);
+
+ /* WaEnableGapsTsvCreditFix:cfl */
+ I915_WRITE(GEN8_GARBCNTL,
+ I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+
+ /* WaDisableGafsUnitClkGating:cfl */
+ I915_WRITE(GEN7_UCGCTL4,
+ I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaInPlaceDecompressionHang:cfl */
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+ /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ I915_WRITE(GAMT_CHKN_BIT_REG,
+ I915_READ(GAMT_CHKN_BIT_REG) |
+ GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+
+ /* WaInPlaceDecompressionHang:cnl */
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ /* WaEnablePreemptionGranularityControlByUMD:cnl */
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+}
+
+static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+ /* This is not an Wa. Enable for better image quality */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+
+ /* WaInPlaceDecompressionHang:icl */
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ /* WaPipelineFlushCoherentLines:icl */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /* Wa_1405543622:icl
+ * Formerly known as WaGAPZPriorityScheme
+ */
+ I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
+ GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+ /* Wa_1604223664:icl
+ * Formerly known as WaL3BankAddressHashing
+ */
+ I915_WRITE(GEN8_GARBCNTL,
+ (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ I915_WRITE(GEN11_GLBLINVL,
+ (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+
+ /* WaModifyGamTlbPartitioning:icl */
+ I915_WRITE(GEN11_GACB_PERF_CTRL,
+ (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
+
+ /* Wa_1405733216:icl
+ * Formerly known as WaDisableCleanEvicts
+ */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+ /* Wa_1405766107:icl
+ * Formerly known as WaCL2SFHalfMaxAlloc
+ */
+ I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
+ GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+ GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+
+ /* Wa_220166154:icl
+ * Formerly known as WaDisCtxReload
+ */
+ I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
+ GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+
+ /* Wa_1405779004:icl (pre-prod) */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
+ I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
+ MSCUNIT_CLKGATE_DIS);
+
+ /* Wa_1406680159:icl */
+ I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
+ I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
+ GWUNIT_CLKGATE_DIS);
+
+ /* Wa_1604302699:icl */
+ I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
+ I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
+ GEN11_I2M_WRITE_DISABLE);
+
+ /* Wa_1406838659:icl (pre-prod) */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+ I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
+ I915_READ(INF_UNIT_LEVEL_CLKGATE) |
+ CGPSF_CLKGATE_DIS);
+
+ /* WaForwardProgressSoftReset:icl */
+ I915_WRITE(GEN10_SCRATCH_LNCF2,
+ I915_READ(GEN10_SCRATCH_LNCF2) |
+ PMFLUSHDONE_LNICRSDROP |
+ PMFLUSH_GAPL3UNBLOCK |
+ PMFLUSHDONE_LNEBLK);
+}
+
+void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) < 8)
+ return;
+ else if (IS_BROADWELL(dev_priv))
+ bdw_gt_workarounds_apply(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ chv_gt_workarounds_apply(dev_priv);
+ else if (IS_SKYLAKE(dev_priv))
+ skl_gt_workarounds_apply(dev_priv);
+ else if (IS_BROXTON(dev_priv))
+ bxt_gt_workarounds_apply(dev_priv);
+ else if (IS_KABYLAKE(dev_priv))
+ kbl_gt_workarounds_apply(dev_priv);
+ else if (IS_GEMINILAKE(dev_priv))
+ glk_gt_workarounds_apply(dev_priv);
+ else if (IS_COFFEELAKE(dev_priv))
+ cfl_gt_workarounds_apply(dev_priv);
+ else if (IS_CANNONLAKE(dev_priv))
+ cnl_gt_workarounds_apply(dev_priv);
+ else if (IS_ICELAKE(dev_priv))
+ icl_gt_workarounds_apply(dev_priv);
+ else
+ MISSING_CASE(INTEL_GEN(dev_priv));
+}
+
+struct whitelist {
+ i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
+ unsigned int count;
+ u32 nopid;
+};
+
+static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
+{
+ if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
+ return;
+
+ w->reg[w->count++] = reg;
+}
+
+static void bdw_whitelist_build(struct whitelist *w)
+{
+}
+
+static void chv_whitelist_build(struct whitelist *w)
+{
+}
+
+static void gen9_whitelist_build(struct whitelist *w)
+{
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
+ whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
+
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
+ whitelist_reg(w, GEN8_CS_CHICKEN1);
+
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
+ whitelist_reg(w, GEN8_HDC_CHICKEN1);
+}
+
+static void skl_whitelist_build(struct whitelist *w)
+{
+ gen9_whitelist_build(w);
+
+ /* WaDisableLSQCROPERFforOCL:skl */
+ whitelist_reg(w, GEN8_L3SQCREG4);
+}
+
+static void bxt_whitelist_build(struct whitelist *w)
+{
+ gen9_whitelist_build(w);
+}
+
+static void kbl_whitelist_build(struct whitelist *w)
+{
+ gen9_whitelist_build(w);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ whitelist_reg(w, GEN8_L3SQCREG4);
+}
+
+static void glk_whitelist_build(struct whitelist *w)
+{
+ gen9_whitelist_build(w);
+
+ /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+}
+
+static void cfl_whitelist_build(struct whitelist *w)
+{
+ gen9_whitelist_build(w);
+}
+
+static void cnl_whitelist_build(struct whitelist *w)
+{
+ /* WaEnablePreemptionGranularityControlByUMD:cnl */
+ whitelist_reg(w, GEN8_CS_CHICKEN1);
+}
+
+static void icl_whitelist_build(struct whitelist *w)
+{
+}
+
+static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
+ struct whitelist *w)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ GEM_BUG_ON(engine->id != RCS);
+
+ w->count = 0;
+ w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
+
+ if (INTEL_GEN(i915) < 8)
+ return NULL;
+ else if (IS_BROADWELL(i915))
+ bdw_whitelist_build(w);
+ else if (IS_CHERRYVIEW(i915))
+ chv_whitelist_build(w);
+ else if (IS_SKYLAKE(i915))
+ skl_whitelist_build(w);
+ else if (IS_BROXTON(i915))
+ bxt_whitelist_build(w);
+ else if (IS_KABYLAKE(i915))
+ kbl_whitelist_build(w);
+ else if (IS_GEMINILAKE(i915))
+ glk_whitelist_build(w);
+ else if (IS_COFFEELAKE(i915))
+ cfl_whitelist_build(w);
+ else if (IS_CANNONLAKE(i915))
+ cnl_whitelist_build(w);
+ else if (IS_ICELAKE(i915))
+ icl_whitelist_build(w);
+ else
+ MISSING_CASE(INTEL_GEN(i915));
+
+ return w;
+}
+
+static void whitelist_apply(struct intel_engine_cs *engine,
+ const struct whitelist *w)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ const u32 base = engine->mmio_base;
+ unsigned int i;
+
+ if (!w)
+ return;
+
+ intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+
+ for (i = 0; i < w->count; i++)
+ I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(w->reg[i]));
+
+ /* And clear the rest just in case of garbage */
+ for (; i < RING_MAX_NONPRIV_SLOTS; i++)
+ I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
+
+ intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+}
+
+void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+ struct whitelist w;
+
+ whitelist_apply(engine, whitelist_build(engine, &w));
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_workarounds.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
new file mode 100644
index 000000000000..b11d0623e626
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -0,0 +1,17 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef _I915_WORKAROUNDS_H_
+#define _I915_WORKAROUNDS_H_
+
+int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
+int intel_ctx_workarounds_emit(struct i915_request *rq);
+
+void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+
+void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 05bbef363fff..91c72911be3c 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1091,7 +1091,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
out_vma_unpin:
i915_vma_unpin(vma);
out_vma_close:
- i915_vma_close(vma);
+ i915_vma_destroy(vma);
return err;
}
@@ -1757,6 +1757,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
goto out_unlock;
}
+ if (ctx->ppgtt)
+ ctx->ppgtt->base.scrub_64K = true;
+
err = i915_subtests(tests, ctx);
out_unlock:
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7ecaed50d0b9..ddb03f009232 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -23,6 +23,7 @@
*/
#include "../i915_selftest.h"
+#include "igt_flush_test.h"
#include "mock_drm.h"
#include "huge_gem_object.h"
@@ -411,6 +412,8 @@ static int igt_ctx_exec(void *arg)
}
out_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 9c76f0305b6a..a00e2bd08bce 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -11,6 +11,7 @@
*/
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
+selftest(workarounds, intel_workarounds_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
@@ -20,4 +21,5 @@ selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
+selftest(execlists, intel_execlists_live_selftests)
selftest(guc, intel_guc_live_selftest)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 9a48aa441743..d16d74178e9d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -14,6 +14,7 @@ selftest(fence, i915_sw_fence_mock_selftests)
selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
+selftest(engine, intel_engine_cs_mock_selftests)
selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
selftest(timelines, i915_gem_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index 3000e6a7d82d..19f1c6a5c8fb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -1,25 +1,7 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017-2018 Intel Corporation
*/
#include "../i915_selftest.h"
@@ -35,21 +17,21 @@ struct __igt_sync {
bool set;
};
-static int __igt_sync(struct intel_timeline *tl,
+static int __igt_sync(struct i915_timeline *tl,
u64 ctx,
const struct __igt_sync *p,
const char *name)
{
int ret;
- if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
+ if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
name, p->name, ctx, p->seqno, yesno(p->expected));
return -EINVAL;
}
if (p->set) {
- ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
+ ret = __i915_timeline_sync_set(tl, ctx, p->seqno);
if (ret)
return ret;
}
@@ -77,37 +59,31 @@ static int igt_sync(void *arg)
{ "unwrap", UINT_MAX, true, false },
{},
}, *p;
- struct intel_timeline *tl;
+ struct i915_timeline tl;
int order, offset;
int ret = -ENODEV;
- tl = mock_timeline(0);
- if (!tl)
- return -ENOMEM;
-
+ mock_timeline_init(&tl, 0);
for (p = pass; p->name; p++) {
for (order = 1; order < 64; order++) {
for (offset = -1; offset <= (order > 1); offset++) {
u64 ctx = BIT_ULL(order) + offset;
- ret = __igt_sync(tl, ctx, p, "1");
+ ret = __igt_sync(&tl, ctx, p, "1");
if (ret)
goto out;
}
}
}
- mock_timeline_destroy(tl);
-
- tl = mock_timeline(0);
- if (!tl)
- return -ENOMEM;
+ mock_timeline_fini(&tl);
+ mock_timeline_init(&tl, 0);
for (order = 1; order < 64; order++) {
for (offset = -1; offset <= (order > 1); offset++) {
u64 ctx = BIT_ULL(order) + offset;
for (p = pass; p->name; p++) {
- ret = __igt_sync(tl, ctx, p, "2");
+ ret = __igt_sync(&tl, ctx, p, "2");
if (ret)
goto out;
}
@@ -115,7 +91,7 @@ static int igt_sync(void *arg)
}
out:
- mock_timeline_destroy(tl);
+ mock_timeline_fini(&tl);
return ret;
}
@@ -127,15 +103,13 @@ static unsigned int random_engine(struct rnd_state *rnd)
static int bench_sync(void *arg)
{
struct rnd_state prng;
- struct intel_timeline *tl;
+ struct i915_timeline tl;
unsigned long end_time, count;
u64 prng32_1M;
ktime_t kt;
int order, last_order;
- tl = mock_timeline(0);
- if (!tl)
- return -ENOMEM;
+ mock_timeline_init(&tl, 0);
/* Lookups from cache are very fast and so the random number generation
* and the loop itself becomes a significant factor in the per-iteration
@@ -167,7 +141,7 @@ static int bench_sync(void *arg)
do {
u64 id = i915_prandom_u64_state(&prng);
- __intel_timeline_sync_set(tl, id, 0);
+ __i915_timeline_sync_set(&tl, id, 0);
count++;
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
@@ -182,8 +156,8 @@ static int bench_sync(void *arg)
while (end_time--) {
u64 id = i915_prandom_u64_state(&prng);
- if (!__intel_timeline_sync_is_later(tl, id, 0)) {
- mock_timeline_destroy(tl);
+ if (!__i915_timeline_sync_is_later(&tl, id, 0)) {
+ mock_timeline_fini(&tl);
pr_err("Lookup of %llu failed\n", id);
return -EINVAL;
}
@@ -193,19 +167,17 @@ static int bench_sync(void *arg)
pr_info("%s: %lu random lookups, %lluns/lookup\n",
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
- mock_timeline_destroy(tl);
+ mock_timeline_fini(&tl);
cond_resched();
- tl = mock_timeline(0);
- if (!tl)
- return -ENOMEM;
+ mock_timeline_init(&tl, 0);
/* Benchmark setting the first N (in order) contexts */
count = 0;
kt = ktime_get();
end_time = jiffies + HZ/10;
do {
- __intel_timeline_sync_set(tl, count++, 0);
+ __i915_timeline_sync_set(&tl, count++, 0);
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
pr_info("%s: %lu in-order insertions, %lluns/insert\n",
@@ -215,9 +187,9 @@ static int bench_sync(void *arg)
end_time = count;
kt = ktime_get();
while (end_time--) {
- if (!__intel_timeline_sync_is_later(tl, end_time, 0)) {
+ if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) {
pr_err("Lookup of %lu failed\n", end_time);
- mock_timeline_destroy(tl);
+ mock_timeline_fini(&tl);
return -EINVAL;
}
}
@@ -225,12 +197,10 @@ static int bench_sync(void *arg)
pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
- mock_timeline_destroy(tl);
+ mock_timeline_fini(&tl);
cond_resched();
- tl = mock_timeline(0);
- if (!tl)
- return -ENOMEM;
+ mock_timeline_init(&tl, 0);
/* Benchmark searching for a random context id and maybe changing it */
prandom_seed_state(&prng, i915_selftest.random_seed);
@@ -241,8 +211,8 @@ static int bench_sync(void *arg)
u32 id = random_engine(&prng);
u32 seqno = prandom_u32_state(&prng);
- if (!__intel_timeline_sync_is_later(tl, id, seqno))
- __intel_timeline_sync_set(tl, id, seqno);
+ if (!__i915_timeline_sync_is_later(&tl, id, seqno))
+ __i915_timeline_sync_set(&tl, id, seqno);
count++;
} while (!time_after(jiffies, end_time));
@@ -250,7 +220,7 @@ static int bench_sync(void *arg)
kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
- mock_timeline_destroy(tl);
+ mock_timeline_fini(&tl);
cond_resched();
/* Benchmark searching for a known context id and changing the seqno */
@@ -258,9 +228,7 @@ static int bench_sync(void *arg)
({ int tmp = last_order; last_order = order; order += tmp; })) {
unsigned int mask = BIT(order) - 1;
- tl = mock_timeline(0);
- if (!tl)
- return -ENOMEM;
+ mock_timeline_init(&tl, 0);
count = 0;
kt = ktime_get();
@@ -272,8 +240,8 @@ static int bench_sync(void *arg)
*/
u64 id = (u64)(count & mask) << order;
- __intel_timeline_sync_is_later(tl, id, 0);
- __intel_timeline_sync_set(tl, id, 0);
+ __i915_timeline_sync_is_later(&tl, id, 0);
+ __i915_timeline_sync_set(&tl, id, 0);
count++;
} while (!time_after(jiffies, end_time));
@@ -281,7 +249,7 @@ static int bench_sync(void *arg)
pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
__func__, count, order,
(long long)div64_ul(ktime_to_ns(kt), count));
- mock_timeline_destroy(tl);
+ mock_timeline_fini(&tl);
cond_resched();
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index eb89e301b602..e90f97236e50 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -81,7 +81,7 @@ checked_vma_instance(struct drm_i915_gem_object *obj,
}
if (i915_vma_compare(vma, vm, view)) {
- pr_err("i915_vma_compare failed with create parmaters!\n");
+ pr_err("i915_vma_compare failed with create parameters!\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
new file mode 100644
index 000000000000..0d06f559243f
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -0,0 +1,70 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_drv.h"
+
+#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+
+struct wedge_me {
+ struct delayed_work work;
+ struct drm_i915_private *i915;
+ const void *symbol;
+};
+
+static void wedge_me(struct work_struct *work)
+{
+ struct wedge_me *w = container_of(work, typeof(*w), work.work);
+
+ pr_err("%pS timed out, cancelling all further testing.\n", w->symbol);
+
+ GEM_TRACE("%pS timed out.\n", w->symbol);
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(w->i915);
+}
+
+static void __init_wedge(struct wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const void *symbol)
+{
+ w->i915 = i915;
+ w->symbol = symbol;
+
+ INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
+ schedule_delayed_work(&w->work, timeout);
+}
+
+static void __fini_wedge(struct wedge_me *w)
+{
+ cancel_delayed_work_sync(&w->work);
+ destroy_delayed_work_on_stack(&w->work);
+ w->i915 = NULL;
+}
+
+#define wedge_on_timeout(W, DEV, TIMEOUT) \
+ for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
+ (W)->i915; \
+ __fini_wedge((W)))
+
+int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
+{
+ struct wedge_me w;
+
+ cond_resched();
+
+ if (flags & I915_WAIT_LOCKED &&
+ i915_gem_switch_to_kernel_context(i915)) {
+ pr_err("Failed to switch back to kernel context; declaring wedged\n");
+ i915_gem_set_wedged(i915);
+ }
+
+ wedge_on_timeout(&w, i915, HZ)
+ i915_gem_wait_for_idle(i915, flags);
+
+ return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.h b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
new file mode 100644
index 000000000000..63e009927c43
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef IGT_FLUSH_TEST_H
+#define IGT_FLUSH_TEST_H
+
+struct drm_i915_private;
+
+int igt_flush_test(struct drm_i915_private *i915, unsigned int flags);
+
+#endif /* IGT_FLUSH_TEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index 46580026c7fc..d6926e7820e5 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -412,10 +412,11 @@ static int igt_wakeup(void *arg)
* that they are ready for the next test. We wait until all
* threads are complete and waiting for us (i.e. not a seqno).
*/
- err = wait_var_event_timeout(&done, !atomic_read(&done), 10 * HZ);
- if (err) {
+ if (!wait_var_event_timeout(&done,
+ !atomic_read(&done), 10 * HZ)) {
pr_err("Timed out waiting for %d remaining waiters\n",
atomic_read(&done));
+ err = -ETIMEDOUT;
break;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c b/drivers/gpu/drm/i915/selftests/intel_engine_cs.c
new file mode 100644
index 000000000000..cfaa6b296835
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_engine_cs.c
@@ -0,0 +1,58 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+
+static int intel_mmio_bases_check(void *arg)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
+ const struct engine_info *info = &intel_engines[i];
+ char name[INTEL_ENGINE_CS_MAX_NAME];
+ u8 prev = U8_MAX;
+
+ __sprint_engine_name(name, info);
+
+ for (j = 0; j < MAX_MMIO_BASES; j++) {
+ u8 gen = info->mmio_bases[j].gen;
+ u32 base = info->mmio_bases[j].base;
+
+ if (gen >= prev) {
+ pr_err("%s: %s: mmio base for gen %x "
+ "is before the one for gen %x\n",
+ __func__, name, prev, gen);
+ return -EINVAL;
+ }
+
+ if (gen == 0)
+ break;
+
+ if (!base) {
+ pr_err("%s: %s: invalid mmio base (%x) "
+ "for gen %x at entry %u\n",
+ __func__, name, base, gen, j);
+ return -EINVAL;
+ }
+
+ prev = gen;
+ }
+
+ pr_info("%s: min gen supported for %s = %d\n",
+ __func__, name, prev);
+ }
+
+ return 0;
+}
+
+int intel_engine_cs_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_mmio_bases_check),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index df7898c8edcb..438e0b045a2c 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -25,10 +25,14 @@
#include <linux/kthread.h>
#include "../i915_selftest.h"
+#include "i915_random.h"
+#include "igt_flush_test.h"
#include "mock_context.h"
#include "mock_drm.h"
+#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
+
struct hang {
struct drm_i915_private *i915;
struct drm_i915_gem_object *hws;
@@ -250,58 +254,6 @@ static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
}
-struct wedge_me {
- struct delayed_work work;
- struct drm_i915_private *i915;
- const void *symbol;
-};
-
-static void wedge_me(struct work_struct *work)
-{
- struct wedge_me *w = container_of(work, typeof(*w), work.work);
-
- pr_err("%pS timed out, cancelling all further testing.\n",
- w->symbol);
- i915_gem_set_wedged(w->i915);
-}
-
-static void __init_wedge(struct wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const void *symbol)
-{
- w->i915 = i915;
- w->symbol = symbol;
-
- INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
- schedule_delayed_work(&w->work, timeout);
-}
-
-static void __fini_wedge(struct wedge_me *w)
-{
- cancel_delayed_work_sync(&w->work);
- destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
-}
-
-#define wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
- (W)->i915; \
- __fini_wedge((W)))
-
-static noinline int
-flush_test(struct drm_i915_private *i915, unsigned int flags)
-{
- struct wedge_me w;
-
- cond_resched();
-
- wedge_on_timeout(&w, i915, HZ)
- i915_gem_wait_for_idle(i915, flags);
-
- return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
-}
-
static void hang_fini(struct hang *h)
{
*h->batch = MI_BATCH_BUFFER_END;
@@ -315,10 +267,10 @@ static void hang_fini(struct hang *h)
kernel_context_close(h->ctx);
- flush_test(h->i915, I915_WAIT_LOCKED);
+ igt_flush_test(h->i915, I915_WAIT_LOCKED);
}
-static bool wait_for_hang(struct hang *h, struct i915_request *rq)
+static bool wait_until_running(struct hang *h, struct i915_request *rq)
{
return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
rq->fence.seqno),
@@ -433,7 +385,7 @@ static int igt_global_reset(void *arg)
mutex_lock(&i915->drm.struct_mutex);
reset_count = i915_reset_count(&i915->gpu_error);
- i915_reset(i915, I915_RESET_QUIET);
+ i915_reset(i915, ALL_ENGINES, NULL);
if (i915_reset_count(&i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
@@ -450,6 +402,11 @@ static int igt_global_reset(void *arg)
return err;
}
+static bool wait_for_idle(struct intel_engine_cs *engine)
+{
+ return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
+}
+
static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
{
struct intel_engine_cs *engine;
@@ -477,12 +434,21 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
if (active && !intel_engine_can_store_dword(engine))
continue;
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
reset_count = i915_reset_count(&i915->gpu_error);
reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
engine);
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
+ u32 seqno = intel_engine_get_seqno(engine);
+
if (active) {
struct i915_request *rq;
@@ -498,7 +464,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
__i915_request_add(rq, true);
mutex_unlock(&i915->drm.struct_mutex);
- if (!wait_for_hang(&h, rq)) {
+ if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n",
@@ -511,14 +477,12 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
+ GEM_BUG_ON(!rq->global_seqno);
+ seqno = rq->global_seqno - 1;
i915_request_put(rq);
}
- engine->hangcheck.stalled = true;
- engine->hangcheck.seqno =
- intel_engine_get_seqno(engine);
-
- err = i915_reset_engine(engine, I915_RESET_QUIET);
+ err = i915_reset_engine(engine, NULL);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
@@ -539,14 +503,25 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- engine->hangcheck.stalled = false;
+ if (!wait_for_idle(engine)) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("%s failed to idle after reset\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ break;
+ }
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
if (err)
break;
- err = flush_test(i915, 0);
+ err = igt_flush_test(i915, 0);
if (err)
break;
}
@@ -573,11 +548,25 @@ static int igt_reset_active_engine(void *arg)
return __igt_reset_engine(arg, true);
}
+struct active_engine {
+ struct task_struct *task;
+ struct intel_engine_cs *engine;
+ unsigned long resets;
+ unsigned int flags;
+};
+
+#define TEST_ACTIVE BIT(0)
+#define TEST_OTHERS BIT(1)
+#define TEST_SELF BIT(2)
+#define TEST_PRIORITY BIT(3)
+
static int active_engine(void *data)
{
- struct intel_engine_cs *engine = data;
- struct i915_request *rq[2] = {};
- struct i915_gem_context *ctx[2];
+ I915_RND_STATE(prng);
+ struct active_engine *arg = data;
+ struct intel_engine_cs *engine = arg->engine;
+ struct i915_request *rq[8] = {};
+ struct i915_gem_context *ctx[ARRAY_SIZE(rq)];
struct drm_file *file;
unsigned long count = 0;
int err = 0;
@@ -586,25 +575,20 @@ static int active_engine(void *data)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&engine->i915->drm.struct_mutex);
- ctx[0] = live_context(engine->i915, file);
- mutex_unlock(&engine->i915->drm.struct_mutex);
- if (IS_ERR(ctx[0])) {
- err = PTR_ERR(ctx[0]);
- goto err_file;
- }
-
- mutex_lock(&engine->i915->drm.struct_mutex);
- ctx[1] = live_context(engine->i915, file);
- mutex_unlock(&engine->i915->drm.struct_mutex);
- if (IS_ERR(ctx[1])) {
- err = PTR_ERR(ctx[1]);
- i915_gem_context_put(ctx[0]);
- goto err_file;
+ for (count = 0; count < ARRAY_SIZE(ctx); count++) {
+ mutex_lock(&engine->i915->drm.struct_mutex);
+ ctx[count] = live_context(engine->i915, file);
+ mutex_unlock(&engine->i915->drm.struct_mutex);
+ if (IS_ERR(ctx[count])) {
+ err = PTR_ERR(ctx[count]);
+ while (--count)
+ i915_gem_context_put(ctx[count]);
+ goto err_file;
+ }
}
while (!kthread_should_stop()) {
- unsigned int idx = count++ & 1;
+ unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
struct i915_request *old = rq[idx];
struct i915_request *new;
@@ -616,14 +600,28 @@ static int active_engine(void *data)
break;
}
+ if (arg->flags & TEST_PRIORITY)
+ ctx[idx]->sched.priority =
+ i915_prandom_u32_max_state(512, &prng);
+
rq[idx] = i915_request_get(new);
i915_request_add(new);
mutex_unlock(&engine->i915->drm.struct_mutex);
if (old) {
- i915_request_wait(old, 0, MAX_SCHEDULE_TIMEOUT);
+ if (i915_request_wait(old, 0, HZ) < 0) {
+ GEM_TRACE("%s timed out.\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(engine->i915);
+ i915_request_put(old);
+ err = -EIO;
+ break;
+ }
i915_request_put(old);
}
+
+ cond_resched();
}
for (count = 0; count < ARRAY_SIZE(rq); count++)
@@ -634,8 +632,9 @@ err_file:
return err;
}
-static int __igt_reset_engine_others(struct drm_i915_private *i915,
- bool active)
+static int __igt_reset_engines(struct drm_i915_private *i915,
+ const char *test_name,
+ unsigned int flags)
{
struct intel_engine_cs *engine, *other;
enum intel_engine_id id, tmp;
@@ -649,50 +648,68 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
if (!intel_has_reset_engine(i915))
return 0;
- if (active) {
+ if (flags & TEST_ACTIVE) {
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
mutex_unlock(&i915->drm.struct_mutex);
if (err)
return err;
+
+ if (flags & TEST_PRIORITY)
+ h.ctx->sched.priority = 1024;
}
for_each_engine(engine, i915, id) {
- struct task_struct *threads[I915_NUM_ENGINES] = {};
- unsigned long resets[I915_NUM_ENGINES];
+ struct active_engine threads[I915_NUM_ENGINES] = {};
unsigned long global = i915_reset_count(&i915->gpu_error);
- unsigned long count = 0;
+ unsigned long count = 0, reported;
IGT_TIMEOUT(end_time);
- if (active && !intel_engine_can_store_dword(engine))
+ if (flags & TEST_ACTIVE &&
+ !intel_engine_can_store_dword(engine))
continue;
+ if (!wait_for_idle(engine)) {
+ pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n",
+ engine->name, test_name);
+ err = -EIO;
+ break;
+ }
+
memset(threads, 0, sizeof(threads));
for_each_engine(other, i915, tmp) {
struct task_struct *tsk;
- resets[tmp] = i915_reset_engine_count(&i915->gpu_error,
- other);
+ threads[tmp].resets =
+ i915_reset_engine_count(&i915->gpu_error,
+ other);
+
+ if (!(flags & TEST_OTHERS))
+ continue;
- if (other == engine)
+ if (other == engine && !(flags & TEST_SELF))
continue;
- tsk = kthread_run(active_engine, other,
+ threads[tmp].engine = other;
+ threads[tmp].flags = flags;
+
+ tsk = kthread_run(active_engine, &threads[tmp],
"igt/%s", other->name);
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
goto unwind;
}
- threads[tmp] = tsk;
+ threads[tmp].task = tsk;
get_task_struct(tsk);
}
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
- if (active) {
- struct i915_request *rq;
+ u32 seqno = intel_engine_get_seqno(engine);
+ struct i915_request *rq = NULL;
+ if (flags & TEST_ACTIVE) {
mutex_lock(&i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
@@ -705,7 +722,7 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
__i915_request_add(rq, true);
mutex_unlock(&i915->drm.struct_mutex);
- if (!wait_for_hang(&h, rq)) {
+ if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n",
@@ -718,33 +735,48 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
break;
}
- i915_request_put(rq);
+ GEM_BUG_ON(!rq->global_seqno);
+ seqno = rq->global_seqno - 1;
}
- engine->hangcheck.stalled = true;
- engine->hangcheck.seqno =
- intel_engine_get_seqno(engine);
-
- err = i915_reset_engine(engine, I915_RESET_QUIET);
+ err = i915_reset_engine(engine, NULL);
if (err) {
- pr_err("i915_reset_engine(%s:%s) failed, err=%d\n",
- engine->name, active ? "active" : "idle", err);
+ pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
+ engine->name, test_name, err);
break;
}
- engine->hangcheck.stalled = false;
count++;
+
+ if (rq) {
+ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(rq);
+ }
+
+ if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("i915_reset_engine(%s:%s):"
+ " failed to idle after reset\n",
+ engine->name, test_name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ break;
+ }
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
- engine->name, active ? "active" : "idle", count);
-
- if (i915_reset_engine_count(&i915->gpu_error, engine) -
- resets[engine->id] != (active ? count : 0)) {
- pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
- engine->name, active ? "active" : "idle", count,
- i915_reset_engine_count(&i915->gpu_error,
- engine) - resets[engine->id]);
+ engine->name, test_name, count);
+
+ reported = i915_reset_engine_count(&i915->gpu_error, engine);
+ reported -= threads[engine->id].resets;
+ if (reported != (flags & TEST_ACTIVE ? count : 0)) {
+ pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu, expected %lu reported\n",
+ engine->name, test_name, count, reported,
+ (flags & TEST_ACTIVE ? count : 0));
if (!err)
err = -EINVAL;
}
@@ -753,24 +785,26 @@ unwind:
for_each_engine(other, i915, tmp) {
int ret;
- if (!threads[tmp])
+ if (!threads[tmp].task)
continue;
- ret = kthread_stop(threads[tmp]);
+ ret = kthread_stop(threads[tmp].task);
if (ret) {
pr_err("kthread for other engine %s failed, err=%d\n",
other->name, ret);
if (!err)
err = ret;
}
- put_task_struct(threads[tmp]);
+ put_task_struct(threads[tmp].task);
- if (resets[tmp] != i915_reset_engine_count(&i915->gpu_error,
- other)) {
+ if (other != engine &&
+ threads[tmp].resets !=
+ i915_reset_engine_count(&i915->gpu_error, other)) {
pr_err("Innocent engine %s was reset (count=%ld)\n",
other->name,
i915_reset_engine_count(&i915->gpu_error,
- other) - resets[tmp]);
+ other) -
+ threads[tmp].resets);
if (!err)
err = -EINVAL;
}
@@ -786,7 +820,7 @@ unwind:
if (err)
break;
- err = flush_test(i915, 0);
+ err = igt_flush_test(i915, 0);
if (err)
break;
}
@@ -794,7 +828,7 @@ unwind:
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
- if (active) {
+ if (flags & TEST_ACTIVE) {
mutex_lock(&i915->drm.struct_mutex);
hang_fini(&h);
mutex_unlock(&i915->drm.struct_mutex);
@@ -803,27 +837,56 @@ unwind:
return err;
}
-static int igt_reset_idle_engine_others(void *arg)
+static int igt_reset_engines(void *arg)
{
- return __igt_reset_engine_others(arg, false);
-}
+ static const struct {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "idle", 0 },
+ { "active", TEST_ACTIVE },
+ { "others-idle", TEST_OTHERS },
+ { "others-active", TEST_OTHERS | TEST_ACTIVE },
+ {
+ "others-priority",
+ TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY
+ },
+ {
+ "self-priority",
+ TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
+ },
+ { }
+ };
+ struct drm_i915_private *i915 = arg;
+ typeof(*phases) *p;
+ int err;
-static int igt_reset_active_engine_others(void *arg)
-{
- return __igt_reset_engine_others(arg, true);
+ for (p = phases; p->name; p++) {
+ if (p->flags & TEST_PRIORITY) {
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ continue;
+ }
+
+ err = __igt_reset_engines(arg, p->name, p->flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
-static u32 fake_hangcheck(struct i915_request *rq)
+static u32 fake_hangcheck(struct i915_request *rq, u32 mask)
{
- u32 reset_count;
+ struct i915_gpu_error *error = &rq->i915->gpu_error;
+ u32 reset_count = i915_reset_count(error);
- rq->engine->hangcheck.stalled = true;
- rq->engine->hangcheck.seqno = intel_engine_get_seqno(rq->engine);
+ error->stalled_mask = mask;
- reset_count = i915_reset_count(&rq->i915->gpu_error);
+ /* set_bit() must be after we have setup the backchannel (mask) */
+ smp_mb__before_atomic();
+ set_bit(I915_RESET_HANDOFF, &error->flags);
- set_bit(I915_RESET_HANDOFF, &rq->i915->gpu_error.flags);
- wake_up_all(&rq->i915->gpu_error.wait_queue);
+ wake_up_all(&error->wait_queue);
return reset_count;
}
@@ -858,21 +921,20 @@ static int igt_wait_reset(void *arg)
i915_request_get(rq);
__i915_request_add(rq, true);
- if (!wait_for_hang(&h, rq)) {
+ if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_reset(i915, 0);
i915_gem_set_wedged(i915);
err = -EIO;
goto out_rq;
}
- reset_count = fake_hangcheck(rq);
+ reset_count = fake_hangcheck(rq, ALL_ENGINES);
timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
if (timeout < 0) {
@@ -903,6 +965,23 @@ unlock:
return err;
}
+static int wait_for_others(struct drm_i915_private *i915,
+ struct intel_engine_cs *exclude)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ if (engine == exclude)
+ continue;
+
+ if (!wait_for_idle(engine))
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int igt_reset_queue(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -951,27 +1030,49 @@ static int igt_reset_queue(void *arg)
i915_request_get(rq);
__i915_request_add(rq, true);
- if (!wait_for_hang(&h, prev)) {
+ /*
+ * XXX We don't handle resetting the kernel context
+ * very well. If we trigger a device reset twice in
+ * quick succession while the kernel context is
+ * executing, we may end up skipping the breadcrumb.
+ * This is really only a problem for the selftest as
+ * normally there is a large interlude between resets
+ * (hangcheck), or we focus on resetting just one
+ * engine and so avoid repeatedly resetting innocents.
+ */
+ err = wait_for_others(i915, engine);
+ if (err) {
+ pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
+ __func__, engine->name);
+ i915_request_put(rq);
+ i915_request_put(prev);
+
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ goto fini;
+ }
+
+ if (!wait_until_running(&h, prev)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("%s: Failed to start request %x, at %x\n",
- __func__, prev->fence.seqno, hws_seqno(&h, prev));
- intel_engine_dump(prev->engine, &p,
- "%s\n", prev->engine->name);
+ pr_err("%s(%s): Failed to start request %x, at %x\n",
+ __func__, engine->name,
+ prev->fence.seqno, hws_seqno(&h, prev));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
i915_request_put(rq);
i915_request_put(prev);
- i915_reset(i915, 0);
i915_gem_set_wedged(i915);
err = -EIO;
goto fini;
}
- reset_count = fake_hangcheck(prev);
+ reset_count = fake_hangcheck(prev, ENGINE_MASK(id));
- i915_reset(i915, I915_RESET_QUIET);
+ i915_reset(i915, ENGINE_MASK(id), NULL);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
&i915->gpu_error.flags));
@@ -1013,7 +1114,7 @@ static int igt_reset_queue(void *arg)
i915_request_put(prev);
- err = flush_test(i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
if (err)
break;
}
@@ -1044,7 +1145,7 @@ static int igt_handle_error(void *arg)
if (!intel_has_reset_engine(i915))
return 0;
- if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ if (!engine || !intel_engine_can_store_dword(engine))
return 0;
mutex_lock(&i915->drm.struct_mutex);
@@ -1062,14 +1163,13 @@ static int igt_handle_error(void *arg)
i915_request_get(rq);
__i915_request_add(rq, true);
- if (!wait_for_hang(&h, rq)) {
+ if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_reset(i915, 0);
i915_gem_set_wedged(i915);
err = -EIO;
@@ -1081,10 +1181,7 @@ static int igt_handle_error(void *arg)
/* Temporarily disable error capture */
error = xchg(&i915->gpu_error.first_error, (void *)-1);
- engine->hangcheck.stalled = true;
- engine->hangcheck.seqno = intel_engine_get_seqno(engine);
-
- i915_handle_error(i915, intel_engine_flag(engine), "%s", __func__);
+ i915_handle_error(i915, ENGINE_MASK(engine->id), 0, NULL);
xchg(&i915->gpu_error.first_error, error);
@@ -1112,8 +1209,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_hang_sanitycheck),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
- SUBTEST(igt_reset_idle_engine_others),
- SUBTEST(igt_reset_active_engine_others),
+ SUBTEST(igt_reset_engines),
SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
SUBTEST(igt_handle_error),
@@ -1129,6 +1225,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
err = i915_subtests(tests, i915);
+ mutex_lock(&i915->drm.struct_mutex);
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+
i915_modparams.enable_hangcheck = saved_hangcheck;
intel_runtime_pm_put(i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
new file mode 100644
index 000000000000..1b8a07125150
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -0,0 +1,459 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+
+#include "mock_context.h"
+
+struct spinner {
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *hws;
+ struct drm_i915_gem_object *obj;
+ u32 *batch;
+ void *seqno;
+};
+
+static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
+{
+ unsigned int mode;
+ void *vaddr;
+ int err;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ memset(spin, 0, sizeof(*spin));
+ spin->i915 = i915;
+
+ spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->hws)) {
+ err = PTR_ERR(spin->hws);
+ goto err;
+ }
+
+ spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->obj)) {
+ err = PTR_ERR(spin->obj);
+ goto err_hws;
+ }
+
+ i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+ vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+ spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+ mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
+ vaddr = i915_gem_object_pin_map(spin->obj, mode);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_unpin_hws;
+ }
+ spin->batch = vaddr;
+
+ return 0;
+
+err_unpin_hws:
+ i915_gem_object_unpin_map(spin->hws);
+err_obj:
+ i915_gem_object_put(spin->obj);
+err_hws:
+ i915_gem_object_put(spin->hws);
+err:
+ return err;
+}
+
+static unsigned int seqno_offset(u64 fence)
+{
+ return offset_in_page(sizeof(u32) * fence);
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+ const struct i915_request *rq)
+{
+ return hws->node.start + seqno_offset(rq->fence.context);
+}
+
+static int emit_recurse_batch(struct spinner *spin,
+ struct i915_request *rq,
+ u32 arbitration_command)
+{
+ struct i915_address_space *vm = &rq->ctx->ppgtt->base;
+ struct i915_vma *hws, *vma;
+ u32 *batch;
+ int err;
+
+ vma = i915_vma_instance(spin->obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ hws = i915_vma_instance(spin->hws, vm, NULL);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(hws, 0, 0, PIN_USER);
+ if (err)
+ goto unpin_vma;
+
+ i915_vma_move_to_active(vma, rq, 0);
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ i915_vma_move_to_active(hws, rq, 0);
+ if (!i915_gem_object_has_active_reference(hws->obj)) {
+ i915_gem_object_get(hws->obj);
+ i915_gem_object_set_active_reference(hws->obj);
+ }
+
+ batch = spin->batch;
+
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+
+ *batch++ = arbitration_command;
+
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+ i915_gem_chipset_flush(spin->i915);
+
+ err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+
+ i915_vma_unpin(hws);
+unpin_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+static struct i915_request *
+spinner_create_request(struct spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = emit_recurse_batch(spin, rq, arbitration_command);
+ if (err) {
+ __i915_request_add(rq, false);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
+{
+ u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
+
+ return READ_ONCE(*seqno);
+}
+
+static void spinner_end(struct spinner *spin)
+{
+ *spin->batch = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(spin->i915);
+}
+
+static void spinner_fini(struct spinner *spin)
+{
+ spinner_end(spin);
+
+ i915_gem_object_unpin_map(spin->obj);
+ i915_gem_object_put(spin->obj);
+
+ i915_gem_object_unpin_map(spin->hws);
+ i915_gem_object_put(spin->hws);
+}
+
+static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
+{
+ if (!wait_event_timeout(rq->execute,
+ READ_ONCE(rq->global_seqno),
+ msecs_to_jiffies(10)))
+ return false;
+
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 1000));
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ struct spinner spin;
+ int err = -ENOMEM;
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ if (spinner_init(&spin, i915))
+ goto err_unlock;
+
+ ctx = kernel_context(i915);
+ if (!ctx)
+ goto err_spin;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx;
+ }
+
+ i915_request_add(rq);
+ if (!wait_for_spinner(&spin, rq)) {
+ GEM_TRACE("spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx;
+ }
+
+ spinner_end(&spin);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ err = -EIO;
+ goto err_ctx;
+ }
+ }
+
+ err = 0;
+err_ctx:
+ kernel_context_close(ctx);
+err_spin:
+ spinner_fini(&spin);
+err_unlock:
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int live_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ if (spinner_init(&spin_hi, i915))
+ goto err_unlock;
+
+ if (spinner_init(&spin_lo, i915))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+
+ ctx_lo = kernel_context(i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!wait_for_spinner(&spin_lo, rq)) {
+ GEM_TRACE("lo spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!wait_for_spinner(&spin_hi, rq)) {
+ GEM_TRACE("hi spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ spinner_end(&spin_hi);
+ spinner_end(&spin_lo);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ spinner_fini(&spin_lo);
+err_spin_hi:
+ spinner_fini(&spin_hi);
+err_unlock:
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int live_late_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {};
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ if (spinner_init(&spin_hi, i915))
+ goto err_unlock;
+
+ if (spinner_init(&spin_lo, i915))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+
+ ctx_lo = kernel_context(i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!wait_for_spinner(&spin_lo, rq)) {
+ pr_err("First context failed to start\n");
+ goto err_wedged;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (wait_for_spinner(&spin_hi, rq)) {
+ pr_err("Second context overtook first?\n");
+ goto err_wedged;
+ }
+
+ attr.priority = I915_PRIORITY_MAX;
+ engine->schedule(rq, &attr);
+
+ if (!wait_for_spinner(&spin_hi, rq)) {
+ pr_err("High priority context failed to preempt the low priority context\n");
+ GEM_TRACE_DUMP();
+ goto err_wedged;
+ }
+
+ spinner_end(&spin_hi);
+ spinner_end(&spin_lo);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ spinner_fini(&spin_lo);
+err_spin_hi:
+ spinner_fini(&spin_hi);
+err_unlock:
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+
+err_wedged:
+ spinner_end(&spin_hi);
+ spinner_end(&spin_lo);
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+}
+
+int intel_execlists_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_preempt),
+ SUBTEST(live_late_preempt),
+ };
+
+ if (!HAS_EXECLISTS(i915))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index f76f2597df5c..47bc5b2ddb56 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -137,7 +137,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
- valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid),
+ valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid),
GFP_KERNEL);
if (!valid)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
new file mode 100644
index 000000000000..17444a3abbb9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -0,0 +1,291 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_context.h"
+
+static struct drm_i915_gem_object *
+read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *result;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ const u32 base = engine->mmio_base;
+ u32 srm, *cs;
+ int err;
+ int i;
+
+ result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(result))
+ return result;
+
+ i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
+
+ cs = i915_gem_object_pin_map(result, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_obj;
+ }
+ memset(cs, 0xc5, PAGE_SIZE);
+ i915_gem_object_unpin_map(result);
+
+ vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_obj;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_pin;
+ }
+
+ srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_req;
+ }
+
+ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
+ *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
+ *cs++ = 0;
+ }
+ intel_ring_advance(rq, cs);
+
+ i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ reservation_object_lock(vma->resv, NULL);
+ reservation_object_add_excl_fence(vma->resv, &rq->fence);
+ reservation_object_unlock(vma->resv);
+
+ i915_gem_object_get(result);
+ i915_gem_object_set_active_reference(result);
+
+ __i915_request_add(rq, true);
+ i915_vma_unpin(vma);
+
+ return result;
+
+err_req:
+ i915_request_add(rq);
+err_pin:
+ i915_vma_unpin(vma);
+err_obj:
+ i915_gem_object_put(result);
+ return ERR_PTR(err);
+}
+
+static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i)
+{
+ return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid;
+}
+
+static void print_results(const struct whitelist *w, const u32 *results)
+{
+ unsigned int i;
+
+ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+ u32 expected = get_whitelist_reg(w, i);
+ u32 actual = results[i];
+
+ pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
+ i, expected, actual);
+ }
+}
+
+static int check_whitelist(const struct whitelist *w,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *results;
+ u32 *vaddr;
+ int err;
+ int i;
+
+ results = read_nonprivs(ctx, engine);
+ if (IS_ERR(results))
+ return PTR_ERR(results);
+
+ err = i915_gem_object_set_to_cpu_domain(results, false);
+ if (err)
+ goto out_put;
+
+ vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+
+ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+ u32 expected = get_whitelist_reg(w, i);
+ u32 actual = vaddr[i];
+
+ if (expected != actual) {
+ print_results(w, vaddr);
+ pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
+ i, expected, actual);
+
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(results);
+out_put:
+ i915_gem_object_put(results);
+ return err;
+}
+
+static int do_device_reset(struct intel_engine_cs *engine)
+{
+ i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL);
+ return 0;
+}
+
+static int do_engine_reset(struct intel_engine_cs *engine)
+{
+ return i915_reset_engine(engine, NULL);
+}
+
+static int switch_to_scratch_context(struct intel_engine_cs *engine)
+{
+ struct i915_gem_context *ctx;
+ struct i915_request *rq;
+
+ ctx = kernel_context(engine->i915);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ rq = i915_request_alloc(engine, ctx);
+ kernel_context_close(ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static int check_whitelist_across_reset(struct intel_engine_cs *engine,
+ int (*reset)(struct intel_engine_cs *),
+ const struct whitelist *w,
+ const char *name)
+{
+ struct i915_gem_context *ctx;
+ int err;
+
+ ctx = kernel_context(engine->i915);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ err = check_whitelist(w, ctx, engine);
+ if (err) {
+ pr_err("Invalid whitelist *before* %s reset!\n", name);
+ goto out;
+ }
+
+ err = switch_to_scratch_context(engine);
+ if (err)
+ goto out;
+
+ err = reset(engine);
+ if (err) {
+ pr_err("%s reset failed\n", name);
+ goto out;
+ }
+
+ err = check_whitelist(w, ctx, engine);
+ if (err) {
+ pr_err("Whitelist not preserved in context across %s reset!\n",
+ name);
+ goto out;
+ }
+
+ kernel_context_close(ctx);
+
+ ctx = kernel_context(engine->i915);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ err = check_whitelist(w, ctx, engine);
+ if (err) {
+ pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
+ name);
+ goto out;
+ }
+
+out:
+ kernel_context_close(ctx);
+ return err;
+}
+
+static int live_reset_whitelist(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine = i915->engine[RCS];
+ struct i915_gpu_error *error = &i915->gpu_error;
+ struct whitelist w;
+ int err = 0;
+
+ /* If we reset the gpu, we should not lose the RING_NONPRIV */
+
+ if (!engine)
+ return 0;
+
+ if (!whitelist_build(engine, &w))
+ return 0;
+
+ pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count);
+
+ set_bit(I915_RESET_BACKOFF, &error->flags);
+ set_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+
+ if (intel_has_reset_engine(i915)) {
+ err = check_whitelist_across_reset(engine,
+ do_engine_reset, &w,
+ "engine");
+ if (err)
+ goto out;
+ }
+
+ if (intel_has_gpu_reset(i915)) {
+ err = check_whitelist_across_reset(engine,
+ do_device_reset, &w,
+ "device");
+ if (err)
+ goto out;
+ }
+
+out:
+ clear_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+ clear_bit(I915_RESET_BACKOFF, &error->flags);
+ return err;
+}
+
+int intel_workarounds_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_reset_whitelist),
+ };
+ int err;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 78a89efa1119..26bf29d97007 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -25,6 +25,11 @@
#include "mock_engine.h"
#include "mock_request.h"
+struct mock_ring {
+ struct intel_ring base;
+ struct i915_timeline timeline;
+};
+
static struct mock_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
@@ -71,14 +76,21 @@ static struct intel_ring *
mock_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
- i915_gem_context_get(ctx);
+ struct intel_context *ce = to_intel_context(ctx, engine);
+
+ if (!ce->pin_count++)
+ i915_gem_context_get(ctx);
+
return engine->buffer;
}
static void mock_context_unpin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
- i915_gem_context_put(ctx);
+ struct intel_context *ce = to_intel_context(ctx, engine);
+
+ if (!--ce->pin_count)
+ i915_gem_context_put(ctx);
}
static int mock_request_alloc(struct i915_request *request)
@@ -125,7 +137,7 @@ static void mock_submit_request(struct i915_request *request)
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
const unsigned long sz = PAGE_SIZE / 2;
- struct intel_ring *ring;
+ struct mock_ring *ring;
BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
@@ -133,14 +145,25 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
if (!ring)
return NULL;
- ring->size = sz;
- ring->effective_size = sz;
- ring->vaddr = (void *)(ring + 1);
+ i915_timeline_init(engine->i915, &ring->timeline, engine->name);
+
+ ring->base.size = sz;
+ ring->base.effective_size = sz;
+ ring->base.vaddr = (void *)(ring + 1);
+ ring->base.timeline = &ring->timeline;
- INIT_LIST_HEAD(&ring->request_list);
- intel_ring_update_space(ring);
+ INIT_LIST_HEAD(&ring->base.request_list);
+ intel_ring_update_space(&ring->base);
- return ring;
+ return &ring->base;
+}
+
+static void mock_ring_free(struct intel_ring *base)
+{
+ struct mock_ring *ring = container_of(base, typeof(*ring), base);
+
+ i915_timeline_fini(&ring->timeline);
+ kfree(ring);
}
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
@@ -155,12 +178,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
if (!engine)
return NULL;
- engine->base.buffer = mock_ring(&engine->base);
- if (!engine->base.buffer) {
- kfree(engine);
- return NULL;
- }
-
/* minimal engine setup for requests */
engine->base.i915 = i915;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
@@ -174,9 +191,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.emit_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
- engine->base.timeline =
- &i915->gt.global_timeline.engine[engine->base.id];
-
+ i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
intel_engine_init_breadcrumbs(&engine->base);
engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
@@ -185,7 +200,17 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
+ engine->base.buffer = mock_ring(&engine->base);
+ if (!engine->base.buffer)
+ goto err_breadcrumbs;
+
return &engine->base;
+
+err_breadcrumbs:
+ intel_engine_fini_breadcrumbs(&engine->base);
+ i915_timeline_fini(&engine->base.timeline);
+ kfree(engine);
+ return NULL;
}
void mock_engine_flush(struct intel_engine_cs *engine)
@@ -217,10 +242,12 @@ void mock_engine_free(struct intel_engine_cs *engine)
GEM_BUG_ON(timer_pending(&mock->hw_delay));
if (engine->last_retired_context)
- engine->context_unpin(engine, engine->last_retired_context);
+ intel_context_unpin(engine->last_retired_context, engine);
+
+ mock_ring_free(engine->buffer);
intel_engine_fini_breadcrumbs(engine);
+ i915_timeline_fini(&engine->timeline);
- kfree(engine->buffer);
kfree(engine);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index e6d4b882599a..94baedfa0f74 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -44,6 +44,7 @@ void mock_device_flush(struct drm_i915_private *i915)
mock_engine_flush(engine);
i915_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
}
static void mock_device_release(struct drm_device *dev)
@@ -72,8 +73,8 @@ static void mock_device_release(struct drm_device *dev)
mutex_lock(&i915->drm.struct_mutex);
mock_fini_ggtt(i915);
- i915_gem_timeline_fini(&i915->gt.global_timeline);
mutex_unlock(&i915->drm.struct_mutex);
+ WARN_ON(!list_empty(&i915->gt.timelines));
destroy_workqueue(i915->wq);
@@ -223,26 +224,25 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->priorities)
goto err_dependencies;
- mutex_lock(&i915->drm.struct_mutex);
INIT_LIST_HEAD(&i915->gt.timelines);
- err = i915_gem_timeline_init__global(i915);
- if (err) {
- mutex_unlock(&i915->drm.struct_mutex);
- goto err_priorities;
- }
+ INIT_LIST_HEAD(&i915->gt.active_rings);
+ INIT_LIST_HEAD(&i915->gt.closed_vma);
+
+ mutex_lock(&i915->drm.struct_mutex);
mock_init_ggtt(i915);
- mutex_unlock(&i915->drm.struct_mutex);
mkwrite_device_info(i915)->ring_mask = BIT(0);
i915->engine[RCS] = mock_engine(i915, "mock", RCS);
if (!i915->engine[RCS])
- goto err_priorities;
+ goto err_unlock;
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
goto err_engine;
+ mutex_unlock(&i915->drm.struct_mutex);
+
WARN_ON(i915_gemfs_init(i915));
return i915;
@@ -250,7 +250,8 @@ struct drm_i915_private *mock_gem_device(void)
err_engine:
for_each_engine(engine, i915, id)
mock_engine_free(engine);
-err_priorities:
+err_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
kmem_cache_destroy(i915->priorities);
err_dependencies:
kmem_cache_destroy(i915->dependencies);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index e96873f96116..36c112088940 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -76,7 +76,6 @@ mock_ppgtt(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ppgtt->base.global_link);
drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
- i915_gem_timeline_init(i915, &ppgtt->base.timeline, name);
ppgtt->base.clear_range = nop_clear_range;
ppgtt->base.insert_page = mock_insert_page;
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index 47b1f47c5812..dcf3b16f5a07 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -1,45 +1,28 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017-2018 Intel Corporation
*/
+#include "../i915_timeline.h"
+
#include "mock_timeline.h"
-struct intel_timeline *mock_timeline(u64 context)
+void mock_timeline_init(struct i915_timeline *timeline, u64 context)
{
- static struct lock_class_key class;
- struct intel_timeline *tl;
+ timeline->fence_context = context;
+
+ spin_lock_init(&timeline->lock);
- tl = kzalloc(sizeof(*tl), GFP_KERNEL);
- if (!tl)
- return NULL;
+ init_request_active(&timeline->last_request, NULL);
+ INIT_LIST_HEAD(&timeline->requests);
- __intel_timeline_init(tl, NULL, context, &class, "mock");
+ i915_syncmap_init(&timeline->sync);
- return tl;
+ INIT_LIST_HEAD(&timeline->link);
}
-void mock_timeline_destroy(struct intel_timeline *tl)
+void mock_timeline_fini(struct i915_timeline *timeline)
{
- __intel_timeline_fini(tl);
- kfree(tl);
+ i915_timeline_fini(timeline);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/selftests/mock_timeline.h
index c27ff4639b8b..b6deaa61110d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.h
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.h
@@ -1,33 +1,15 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017-2018 Intel Corporation
*/
#ifndef __MOCK_TIMELINE__
#define __MOCK_TIMELINE__
-#include "../i915_gem_timeline.h"
+struct i915_timeline;
-struct intel_timeline *mock_timeline(u64 context);
-void mock_timeline_destroy(struct intel_timeline *tl);
+void mock_timeline_init(struct i915_timeline *timeline, u64 context);
+void mock_timeline_fini(struct i915_timeline *timeline);
#endif /* !__MOCK_TIMELINE__ */