aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c131
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c442
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c49
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c47
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h443
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c696
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c137
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c114
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c145
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c170
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c93
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c181
-rw-r--r--drivers/gpu/drm/i915/i915_params.c14
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h410
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c133
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h69
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c237
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c246
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c112
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c45
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h25
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c19
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c130
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2173
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c276
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c19
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h204
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c835
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h75
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c437
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h78
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c322
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c12
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c23
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c701
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c15
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c2
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c51
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c385
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h43
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c19
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c46
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c18
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1097
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c308
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen6.c25
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen7.c25
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c25
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen9.c25
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c289
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h37
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c73
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c34
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c30
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c394
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c1148
66 files changed, 7659 insertions, 5767 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 4e39ab34eb1c..74acca9bcd9d 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -11,6 +11,8 @@ config DRM_I915
select SHMEM
select TMPFS
select DRM_KMS_HELPER
+ select DRM_PANEL
+ select DRM_MIPI_DSI
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e4083e41a600..f01922591679 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -19,6 +19,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
# GEM code
i915-y += i915_cmd_parser.o \
+ i915_gem_batch_pool.o \
i915_gem_context.o \
i915_gem_render_state.o \
i915_gem_debug.o \
@@ -47,6 +48,7 @@ i915-y += intel_renderstate_gen6.o \
i915-y += intel_audio.o \
intel_bios.o \
intel_display.o \
+ intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
intel_modes.o \
@@ -64,11 +66,12 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
+ intel_atomic.o \
+ intel_atomic_plane.o \
intel_crt.o \
intel_ddi.o \
intel_dp.o \
intel_dp_mst.o \
- intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
intel_dsi_panel_vbt.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 22c992a78ac6..806e812340d0 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -152,6 +152,7 @@ static const struct drm_i915_cmd_descriptor render_cmds[] = {
CMD( MI_PREDICATE, SMI, F, 1, S ),
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
@@ -210,6 +211,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
CMD( MI_RS_CONTROL, SMI, F, 1, S ),
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
@@ -229,6 +231,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
@@ -272,6 +275,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
@@ -401,6 +405,7 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
#define REG64(addr) (addr), (addr + sizeof(u32))
static const u32 gen7_render_regs[] = {
+ REG64(GPGPU_THREADS_DISPATCHED),
REG64(HS_INVOCATION_COUNT),
REG64(DS_INVOCATION_COUNT),
REG64(IA_VERTICES_COUNT),
@@ -481,13 +486,17 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+ u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_RC_CLIENT) {
- if (subclient == INSTR_MEDIA_SUBCLIENT)
- return 0xFFF;
- else
+ if (subclient == INSTR_MEDIA_SUBCLIENT) {
+ if (op == 6)
+ return 0xFFFF;
+ else
+ return 0xFFF;
+ } else
return 0xFF;
}
@@ -716,13 +725,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
- if (hash_empty(ring->cmd_hash)) {
- ret = init_hash_table(ring, cmd_tables, cmd_table_count);
- if (ret) {
- DRM_ERROR("CMD: cmd_parser_init failed!\n");
- fini_hash_table(ring);
- return ret;
- }
+ WARN_ON(!hash_empty(ring->cmd_hash));
+
+ ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+ if (ret) {
+ DRM_ERROR("CMD: cmd_parser_init failed!\n");
+ fini_hash_table(ring);
+ return ret;
}
ring->needs_cmd_parser = true;
@@ -840,6 +849,69 @@ finish:
return (u32*)addr;
}
+/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
+static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
+ struct drm_i915_gem_object *src_obj,
+ u32 batch_start_offset,
+ u32 batch_len)
+{
+ int ret = 0;
+ int needs_clflush = 0;
+ u32 *src_base, *dest_base = NULL;
+ u32 *src_addr, *dest_addr;
+ u32 offset = batch_start_offset / sizeof(*dest_addr);
+ u32 end = batch_start_offset + batch_len;
+
+ if (end > dest_obj->base.size || end > src_obj->base.size)
+ return ERR_PTR(-E2BIG);
+
+ ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+ return ERR_PTR(ret);
+ }
+
+ src_base = vmap_batch(src_obj);
+ if (!src_base) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
+ ret = -ENOMEM;
+ goto unpin_src;
+ }
+
+ src_addr = src_base + offset;
+
+ if (needs_clflush)
+ drm_clflush_virt_range((char *)src_addr, batch_len);
+
+ ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
+ goto unmap_src;
+ }
+
+ dest_base = vmap_batch(dest_obj);
+ if (!dest_base) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
+ ret = -ENOMEM;
+ goto unmap_src;
+ }
+
+ dest_addr = dest_base + offset;
+
+ if (batch_start_offset != 0)
+ memset((u8 *)dest_base, 0, batch_start_offset);
+
+ memcpy(dest_addr, src_addr, batch_len);
+ memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
+
+unmap_src:
+ vunmap(src_base);
+unpin_src:
+ i915_gem_object_unpin_pages(src_obj);
+
+ return ret ? ERR_PTR(ret) : dest_base;
+}
+
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question
@@ -956,7 +1028,9 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question
+ * @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
+ * @batch_len: length of the commands in batch_obj
* @is_master: is the submitting process the drm master?
*
* Parses the specified batch buffer looking for privilege violations as
@@ -967,33 +1041,38 @@ static bool check_cmd(const struct intel_engine_cs *ring,
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
+ u32 batch_len,
bool is_master)
{
int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
- int needs_clflush = 0;
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
- ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
+ ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
if (ret) {
- DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
- return ret;
+ DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
+ return -1;
}
- batch_base = vmap_batch(batch_obj);
- if (!batch_base) {
- DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
- i915_gem_object_unpin_pages(batch_obj);
- return -ENOMEM;
+ batch_base = copy_batch(shadow_batch_obj, batch_obj,
+ batch_start_offset, batch_len);
+ if (IS_ERR(batch_base)) {
+ DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
+ i915_gem_object_ggtt_unpin(shadow_batch_obj);
+ return PTR_ERR(batch_base);
}
- if (needs_clflush)
- drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
-
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
- batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
+
+ /*
+ * We use the batch length as size because the shadow object is as
+ * large or larger and copy_batch() will write MI_NOPs to the extra
+ * space. Parsing should be faster in some cases this way.
+ */
+ batch_end = cmd + (batch_len / sizeof(*batch_end));
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;
@@ -1053,8 +1132,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
}
vunmap(batch_base);
-
- i915_gem_object_unpin_pages(batch_obj);
+ i915_gem_object_ggtt_unpin(shadow_batch_obj);
return ret;
}
@@ -1076,6 +1154,7 @@ int i915_cmd_parser_get_version(void)
* hardware parsing enabled (so does not allow new use cases).
* 2. Allow access to the MI_PREDICATE_SRC0 and
* MI_PREDICATE_SRC1 registers.
+ * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
*/
- return 2;
+ return 3;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 779a275eb1fd..96e811fe24ca 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,9 +96,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
- if (obj->user_pin_count > 0)
- return "P";
- else if (i915_gem_obj_is_pinned(obj))
+ if (i915_gem_obj_is_pinned(obj))
return "p";
else
return " ";
@@ -125,7 +123,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int pin_count = 0;
- seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
+ seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %x %x %x%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
@@ -133,9 +131,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
- obj->last_read_seqno,
- obj->last_write_seqno,
- obj->last_fenced_seqno,
+ i915_gem_request_get_seqno(obj->last_read_req),
+ i915_gem_request_get_seqno(obj->last_write_req),
+ i915_gem_request_get_seqno(obj->last_fenced_req),
i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -154,8 +152,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_puts(m, " (pp");
else
seq_puts(m, " (g");
- seq_printf(m, "gtt offset: %08lx, size: %08lx)",
- vma->node.start, vma->node.size);
+ seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)",
+ vma->node.start, vma->node.size,
+ vma->ggtt_view.type);
}
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
@@ -168,8 +167,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
*t = '\0';
seq_printf(m, " (%s mappable)", s);
}
- if (obj->ring != NULL)
- seq_printf(m, " (%s)", obj->ring->name);
+ if (obj->last_read_req != NULL)
+ seq_printf(m, " (%s)",
+ i915_gem_request_get_ring(obj->last_read_req)->name);
if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
@@ -336,7 +336,7 @@ static int per_file_stats(int id, void *ptr, void *data)
if (ppgtt->file_priv != stats->file_priv)
continue;
- if (obj->ring) /* XXX per-vma statistic */
+ if (obj->active) /* XXX per-vma statistic */
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
@@ -346,7 +346,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else {
if (i915_gem_obj_ggtt_bound(obj)) {
stats->global += obj->base.size;
- if (obj->ring)
+ if (obj->active)
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
@@ -360,6 +360,33 @@ static int per_file_stats(int id, void *ptr, void *data)
return 0;
}
+#define print_file_stats(m, name, stats) \
+ seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+ name, \
+ stats.count, \
+ stats.total, \
+ stats.active, \
+ stats.inactive, \
+ stats.global, \
+ stats.shared, \
+ stats.unbound)
+
+static void print_batch_pool_stats(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+ struct file_stats stats;
+
+ memset(&stats, 0, sizeof(stats));
+
+ list_for_each_entry(obj,
+ &dev_priv->mm.batch_pool.cache_list,
+ batch_pool_list)
+ per_file_stats(0, obj, &stats);
+
+ print_file_stats(m, "batch pool", stats);
+}
+
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -442,6 +469,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
seq_putc(m, '\n');
+ print_batch_pool_stats(m, dev_priv);
+
+ seq_putc(m, '\n');
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct task_struct *task;
@@ -459,15 +489,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
*/
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
- task ? task->comm : "<unknown>",
- stats.count,
- stats.total,
- stats.active,
- stats.inactive,
- stats.global,
- stats.shared,
- stats.unbound);
+ print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
}
@@ -543,14 +565,16 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
}
- if (work->flip_queued_ring) {
- seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
- work->flip_queued_ring->name,
- work->flip_queued_seqno,
+ if (work->flip_queued_req) {
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(work->flip_queued_req);
+
+ seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
+ ring->name,
+ i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
- work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- work->flip_queued_seqno));
+ ring->get_seqno(ring, true),
+ i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
@@ -582,6 +606,36 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int count = 0;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_puts(m, "cache:\n");
+ list_for_each_entry(obj,
+ &dev_priv->mm.batch_pool.cache_list,
+ batch_pool_list) {
+ seq_puts(m, " ");
+ describe_obj(m, obj);
+ seq_putc(m, '\n');
+ count++;
+ }
+
+ seq_printf(m, "total: %d\n", count);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@@ -604,7 +658,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
list_for_each_entry(gem_request,
&ring->request_list,
list) {
- seq_printf(m, " %d @ %d\n",
+ seq_printf(m, " %x @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
}
@@ -622,7 +676,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *ring)
{
if (ring->get_seqno) {
- seq_printf(m, "Current sequence (%s): %u\n",
+ seq_printf(m, "Current sequence (%s): %x\n",
ring->name, ring->get_seqno(ring, false));
}
}
@@ -1051,7 +1105,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
if (ret)
goto out;
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
@@ -1059,7 +1113,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
reqf >>= 24;
else
reqf >>= 25;
- reqf *= GT_FREQUENCY_MULTIPLIER;
+ reqf = intel_gpu_freq(dev_priv, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
@@ -1076,9 +1130,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
- cagf *= GT_FREQUENCY_MULTIPLIER;
+ cagf = intel_gpu_freq(dev_priv, cagf);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
if (IS_GEN6(dev) || IS_GEN7(dev)) {
@@ -1124,18 +1178,18 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, max_freq));
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
- dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts;
@@ -1145,16 +1199,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
seq_printf(m, "max GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
seq_printf(m, "min GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
- seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+ seq_printf(m,
+ "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
seq_printf(m, "current GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+ intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
seq_puts(m, "no P-state info available\n");
@@ -1165,6 +1220,53 @@ out:
return ret;
}
+static int i915_hangcheck_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ u64 acthd[I915_NUM_RINGS];
+ u32 seqno[I915_NUM_RINGS];
+ int i;
+
+ if (!i915.enable_hangcheck) {
+ seq_printf(m, "Hangcheck disabled\n");
+ return 0;
+ }
+
+ intel_runtime_pm_get(dev_priv);
+
+ for_each_ring(ring, dev_priv, i) {
+ seqno[i] = ring->get_seqno(ring, false);
+ acthd[i] = intel_ring_get_active_head(ring);
+ }
+
+ intel_runtime_pm_put(dev_priv);
+
+ if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
+ seq_printf(m, "Hangcheck active, fires in %dms\n",
+ jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
+ jiffies));
+ } else
+ seq_printf(m, "Hangcheck inactive\n");
+
+ for_each_ring(ring, dev_priv, i) {
+ seq_printf(m, "%s:\n", ring->name);
+ seq_printf(m, "\tseqno = %x [current %x]\n",
+ ring->hangcheck.seqno, seqno[i]);
+ seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
+ (long long)ring->hangcheck.acthd,
+ (long long)acthd[i]);
+ seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
+ (long long)ring->hangcheck.max_acthd);
+ seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
+ seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+ }
+
+ return 0;
+}
+
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
@@ -1234,14 +1336,31 @@ static int ironlake_drpc_info(struct seq_file *m)
return 0;
}
-static int vlv_drpc_info(struct seq_file *m)
+static int i915_forcewake_domains(struct seq_file *m, void *data)
{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore_forcewake_domain *fw_domain;
+ int i;
+ spin_lock_irq(&dev_priv->uncore.lock);
+ for_each_fw_domain(fw_domain, dev_priv, i) {
+ seq_printf(m, "%s.wake_count = %u\n",
+ intel_uncore_forcewake_domain_to_str(i),
+ fw_domain->wake_count);
+ }
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ return 0;
+}
+
+static int vlv_drpc_info(struct seq_file *m)
+{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, rcctl1, pw_status;
- unsigned fw_rendercount = 0, fw_mediacount = 0;
intel_runtime_pm_get(dev_priv);
@@ -1273,22 +1392,11 @@ static int vlv_drpc_info(struct seq_file *m)
seq_printf(m, "Media RC6 residency since boot: %u\n",
I915_READ(VLV_GT_MEDIA_RC6));
- spin_lock_irq(&dev_priv->uncore.lock);
- fw_rendercount = dev_priv->uncore.fw_rendercount;
- fw_mediacount = dev_priv->uncore.fw_mediacount;
- spin_unlock_irq(&dev_priv->uncore.lock);
-
- seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
- seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
-
-
- return 0;
+ return i915_forcewake_domains(m, NULL);
}
-
static int gen6_drpc_info(struct seq_file *m)
{
-
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1302,7 +1410,7 @@ static int gen6_drpc_info(struct seq_file *m)
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
- forcewake_count = dev_priv->uncore.forcewake_count;
+ forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
spin_unlock_irq(&dev_priv->uncore.lock);
if (forcewake_count) {
@@ -1617,7 +1725,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- gpu_freq * GT_FREQUENCY_MULTIPLIER,
+ intel_gpu_freq(dev_priv, gpu_freq),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -1874,7 +1982,7 @@ static int i915_execlists(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, ring_id) {
- struct intel_ctx_submit_request *head_req = NULL;
+ struct drm_i915_gem_request *head_req = NULL;
int count = 0;
unsigned long flags;
@@ -1907,7 +2015,7 @@ static int i915_execlists(struct seq_file *m, void *data)
list_for_each(cursor, &ring->execlist_queue)
count++;
head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct intel_ctx_submit_request, execlist_link);
+ struct drm_i915_gem_request, execlist_link);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
seq_printf(m, "\t%d requests in queue\n", count);
@@ -1930,30 +2038,6 @@ static int i915_execlists(struct seq_file *m, void *data)
return 0;
}
-static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
-
- spin_lock_irq(&dev_priv->uncore.lock);
- if (IS_VALLEYVIEW(dev)) {
- fw_rendercount = dev_priv->uncore.fw_rendercount;
- fw_mediacount = dev_priv->uncore.fw_mediacount;
- } else
- forcewake_count = dev_priv->uncore.forcewake_count;
- spin_unlock_irq(&dev_priv->uncore.lock);
-
- if (IS_VALLEYVIEW(dev)) {
- seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
- seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
- } else
- seq_printf(m, "forcewake count = %u\n", forcewake_count);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -2155,6 +2239,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 psrperf = 0;
+ u32 stat[3];
+ enum pipe pipe;
bool enabled = false;
intel_runtime_pm_get(dev_priv);
@@ -2169,14 +2255,39 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- enabled = HAS_PSR(dev) &&
- I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
+ if (HAS_PSR(dev)) {
+ if (HAS_DDI(dev))
+ enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ else {
+ for_each_pipe(dev_priv, pipe) {
+ stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ enabled = true;
+ }
+ }
+ }
+ seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
+
+ if (!HAS_DDI(dev))
+ for_each_pipe(dev_priv, pipe) {
+ if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ seq_printf(m, " pipe %c", pipe_name(pipe));
+ }
+ seq_puts(m, "\n");
+
+ seq_printf(m, "Link standby: %s\n",
+ yesno((bool)dev_priv->psr.link_standby));
- if (HAS_PSR(dev))
+ /* CHV PSR has no kind of performance counter */
+ if (HAS_PSR(dev) && HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance_Counter: %u\n", psrperf);
+
+ seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ }
mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
@@ -2319,10 +2430,18 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "AUDIO";
case POWER_DOMAIN_PLLS:
return "PLLS";
+ case POWER_DOMAIN_AUX_A:
+ return "AUX_A";
+ case POWER_DOMAIN_AUX_B:
+ return "AUX_B";
+ case POWER_DOMAIN_AUX_C:
+ return "AUX_C";
+ case POWER_DOMAIN_AUX_D:
+ return "AUX_D";
case POWER_DOMAIN_INIT:
return "INIT";
default:
- WARN_ON(1);
+ MISSING_CASE(domain);
return "?";
}
}
@@ -2547,7 +2666,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
+ yesno(crtc->active), crtc->config->pipe_src_w,
+ crtc->config->pipe_src_h);
if (crtc->active) {
intel_crtc_info(m, crtc);
@@ -2718,6 +2838,9 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
enum pipe pipe;
int plane;
+ if (INTEL_INFO(dev)->gen < 9)
+ return 0;
+
drm_modeset_lock_all(dev);
ddb = &dev_priv->wm.skl_hw.ddb;
@@ -2830,7 +2953,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
char buf[PIPE_CRC_BUFFER_LEN];
- int head, tail, n_entries, n;
+ int n_entries;
ssize_t bytes_read;
/*
@@ -2862,36 +2985,39 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
}
/* We now have one or more entries to read */
- head = pipe_crc->head;
- tail = pipe_crc->tail;
- n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
- count / PIPE_CRC_LINE_LEN);
- spin_unlock_irq(&pipe_crc->lock);
+ n_entries = count / PIPE_CRC_LINE_LEN;
bytes_read = 0;
- n = 0;
- do {
- struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
+ while (n_entries > 0) {
+ struct intel_pipe_crc_entry *entry =
+ &pipe_crc->entries[pipe_crc->tail];
int ret;
+ if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+ INTEL_PIPE_CRC_ENTRIES_NR) < 1)
+ break;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+ pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+
bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
"%8u %8x %8x %8x %8x %8x\n",
entry->frame, entry->crc[0],
entry->crc[1], entry->crc[2],
entry->crc[3], entry->crc[4]);
- ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
- buf, PIPE_CRC_LINE_LEN);
+ spin_unlock_irq(&pipe_crc->lock);
+
+ ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
if (ret == PIPE_CRC_LINE_LEN)
return -EFAULT;
- BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
- tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
- n++;
- } while (--n_entries);
+ user_buf += PIPE_CRC_LINE_LEN;
+ n_entries--;
+
+ spin_lock_irq(&pipe_crc->lock);
+ }
- spin_lock_irq(&pipe_crc->lock);
- pipe_crc->tail = tail;
spin_unlock_irq(&pipe_crc->lock);
return bytes_read;
@@ -3072,6 +3198,12 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
need_stable_symbols = true;
break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_CHERRYVIEW(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
+ need_stable_symbols = true;
+ break;
case INTEL_PIPE_CRC_SOURCE_NONE:
*val = 0;
break;
@@ -3092,11 +3224,19 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
tmp |= DC_BALANCE_RESET_VLV;
- if (pipe == PIPE_A)
+ switch (pipe) {
+ case PIPE_A:
tmp |= PIPE_A_SCRAMBLE_RESET;
- else
+ break;
+ case PIPE_B:
tmp |= PIPE_B_SCRAMBLE_RESET;
-
+ break;
+ case PIPE_C:
+ tmp |= PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return -EINVAL;
+ }
I915_WRITE(PORT_DFT2_G4X, tmp);
}
@@ -3185,10 +3325,19 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
- if (pipe == PIPE_A)
+ switch (pipe) {
+ case PIPE_A:
tmp &= ~PIPE_A_SCRAMBLE_RESET;
- else
+ break;
+ case PIPE_B:
tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ break;
+ case PIPE_C:
+ tmp &= ~PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return;
+ }
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
I915_WRITE(PORT_DFT2_G4X, tmp);
@@ -3252,9 +3401,9 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
* relevant on hsw with pipe A when using the always-on power well
* routing.
*/
- if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
- !crtc->config.pch_pfit.enabled) {
- crtc->config.pch_pfit.force_thru = true;
+ if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
+ !crtc->config->pch_pfit.enabled) {
+ crtc->config->pch_pfit.force_thru = true;
intel_display_power_get(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
@@ -3278,8 +3427,8 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
* relevant on hsw with pipe A when using the always-on power well
* routing.
*/
- if (crtc->config.pch_pfit.force_thru) {
- crtc->config.pch_pfit.force_thru = false;
+ if (crtc->config->pch_pfit.force_thru) {
+ crtc->config->pch_pfit.force_thru = false;
dev_priv->display.crtc_disable(&crtc->base);
dev_priv->display.crtc_enable(&crtc->base);
@@ -3359,13 +3508,15 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
/* none -> real source transition */
if (source) {
+ struct intel_pipe_crc_entry *entries;
+
DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
pipe_name(pipe), pipe_crc_source_name(source));
- pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
- INTEL_PIPE_CRC_ENTRIES_NR,
- GFP_KERNEL);
- if (!pipe_crc->entries)
+ entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
+ sizeof(pipe_crc->entries[0]),
+ GFP_KERNEL);
+ if (!entries)
return -ENOMEM;
/*
@@ -3377,6 +3528,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
hsw_disable_ips(crtc);
spin_lock_irq(&pipe_crc->lock);
+ kfree(pipe_crc->entries);
+ pipe_crc->entries = entries;
pipe_crc->head = 0;
pipe_crc->tail = 0;
spin_unlock_irq(&pipe_crc->lock);
@@ -3404,6 +3557,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
spin_lock_irq(&pipe_crc->lock);
entries = pipe_crc->entries;
pipe_crc->entries = NULL;
+ pipe_crc->head = 0;
+ pipe_crc->tail = 0;
spin_unlock_irq(&pipe_crc->lock);
kfree(entries);
@@ -3826,6 +3981,17 @@ i915_wedged_set(void *data, u64 val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
+ /*
+ * There is no safeguard against this debugfs entry colliding
+ * with the hangcheck calling same i915_handle_error() in
+ * parallel, causing an explosion. For now we assume that the
+ * test harness is responsible enough not to inject gpu hangs
+ * while it is writing to 'i915_wedged'
+ */
+
+ if (i915_reset_in_progress(&dev_priv->gpu_error))
+ return -EAGAIN;
+
intel_runtime_pm_get(dev_priv);
i915_handle_error(dev, val,
@@ -4012,10 +4178,7 @@ i915_max_freq_get(void *data, u64 *val)
if (ret)
return ret;
- if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
- else
- *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -4044,12 +4207,12 @@ i915_max_freq_set(void *data, u64 val)
* Turbo will still be enabled, but won't go above the set value.
*/
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
hw_max = dev_priv->rps.max_freq;
hw_min = dev_priv->rps.min_freq;
} else {
- do_div(val, GT_FREQUENCY_MULTIPLIER);
+ val = intel_freq_opcode(dev_priv, val);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.max_freq;
@@ -4093,10 +4256,7 @@ i915_min_freq_get(void *data, u64 *val)
if (ret)
return ret;
- if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
- else
- *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -4125,12 +4285,12 @@ i915_min_freq_set(void *data, u64 val)
* Turbo will still be enabled, but won't go below the set value.
*/
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
hw_max = dev_priv->rps.max_freq;
hw_min = dev_priv->rps.min_freq;
} else {
- do_div(val, GT_FREQUENCY_MULTIPLIER);
+ val = intel_freq_opcode(dev_priv, val);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.max_freq;
@@ -4222,7 +4382,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_get(dev_priv);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
return 0;
}
@@ -4235,7 +4396,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -4296,7 +4458,9 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
+ {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
+ {"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
@@ -4308,7 +4472,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
{"i915_execlists", i915_execlists, 0},
- {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
+ {"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_llc", i915_llc, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ecee3bcc8772..1a46787129e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -92,6 +92,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_VEBOX:
value = intel_ring_initialized(&dev_priv->ring[VECS]);
break;
+ case I915_PARAM_HAS_BSD2:
+ value = intel_ring_initialized(&dev_priv->ring[VCS2]);
+ break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
break;
@@ -143,6 +146,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_COHERENT_PHYS_GTT:
value = 1;
break;
+ case I915_PARAM_MMAP_VERSION:
+ value = 1;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -598,6 +604,17 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->num_pipes = 0;
}
}
+
+ if (IS_CHERRYVIEW(dev)) {
+ u32 fuse, mask_eu;
+
+ fuse = I915_READ(CHV_FUSE_GT);
+ mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+ CHV_FGT_EU_DIS_SS0_R1_MASK |
+ CHV_FGT_EU_DIS_SS1_R0_MASK |
+ CHV_FGT_EU_DIS_SS1_R1_MASK);
+ info->eu_total = 16 - hweight32(mask_eu);
+ }
}
/**
@@ -773,6 +790,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_freewq;
}
+ dev_priv->gpu_error.hangcheck_wq =
+ alloc_ordered_workqueue("i915-hangcheck", 0);
+ if (dev_priv->gpu_error.hangcheck_wq == NULL) {
+ DRM_ERROR("Failed to create our hangcheck workqueue.\n");
+ ret = -ENOMEM;
+ goto out_freedpwq;
+ }
+
intel_irq_init(dev_priv);
intel_uncore_sanitize(dev);
@@ -830,6 +855,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_runtime_pm_enable(dev_priv);
+ i915_audio_component_init(dev_priv);
+
return 0;
out_power_well:
@@ -845,6 +872,8 @@ out_gem_unload:
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
+ destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+out_freedpwq:
destroy_workqueue(dev_priv->dp_wq);
out_freewq:
destroy_workqueue(dev_priv->wq);
@@ -870,6 +899,8 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ i915_audio_component_cleanup(dev_priv);
+
ret = i915_gem_suspend(dev);
if (ret) {
DRM_ERROR("failed to idle hardware: %d\n", ret);
@@ -913,8 +944,7 @@ int i915_driver_unload(struct drm_device *dev)
}
/* Free error state after interrupts are fully disabled. */
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
- cancel_work_sync(&dev_priv->gpu_error.work);
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled)
@@ -928,6 +958,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
@@ -938,6 +969,7 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->wq);
+ destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos);
i915_global_gtt_cleanup(dev);
@@ -1004,6 +1036,13 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
kfree(file_priv);
}
+static int
+i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return -ENODEV;
+}
+
const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
@@ -1025,8 +1064,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
@@ -1055,6 +1094,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 574057cd1d09..8039cec71fc2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(IS_HSW_ULT(dev));
- } else if (IS_BROADWELL(dev)) {
- dev_priv->pch_type = PCH_LPT;
- dev_priv->pch_id =
- INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- DRM_DEBUG_KMS("This is Broadwell, assuming "
- "LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(!IS_HSW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
@@ -841,6 +835,8 @@ int i915_reset(struct drm_device *dev)
return ret;
}
+ intel_overlay_reset(dev_priv);
+
/* Ok, now get things going again... */
/*
@@ -940,8 +936,7 @@ static int i915_pm_suspend(struct device *dev)
static int i915_pm_suspend_late(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
/*
* We have a suspedn ordering issue with the snd-hda driver also
@@ -960,8 +955,7 @@ static int i915_pm_suspend_late(struct device *dev)
static int i915_pm_resume_early(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -971,8 +965,7 @@ static int i915_pm_resume_early(struct device *dev)
static int i915_pm_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1299,7 +1292,9 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
err = vlv_allow_gt_wake(dev_priv, false);
if (err)
goto err2;
- vlv_save_gunit_s0ix_state(dev_priv);
+
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
if (err)
@@ -1330,7 +1325,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/
ret = vlv_force_gfx_clock(dev_priv, true);
- vlv_restore_gunit_s0ix_state(dev_priv);
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);
if (!ret)
@@ -1363,8 +1359,6 @@ static int intel_runtime_suspend(struct device *device)
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
- assert_force_wake_inactive(dev_priv);
-
DRM_DEBUG_KMS("Suspending device\n");
/*
@@ -1402,7 +1396,8 @@ static int intel_runtime_suspend(struct device *device)
return ret;
}
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+ intel_uncore_forcewake_reset(dev, false);
dev_priv->pm.suspended = true;
/*
@@ -1430,6 +1425,8 @@ static int intel_runtime_suspend(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D3hot);
}
+ assert_forcewakes_inactive(dev_priv);
+
DRM_DEBUG_KMS("Device suspended\n");
return 0;
}
@@ -1640,6 +1637,14 @@ static int __init i915_init(void)
#endif
}
+ /*
+ * FIXME: Note that we're lying to the DRM core here so that we can get access
+ * to the atomic ioctl and the atomic properties. Only plane operations on
+ * a single CRTC will actually work.
+ */
+ if (i915.nuclear_pageflip)
+ driver.driver_features |= DRIVER_ATOMIC;
+
return drm_pci_init(&driver, &i915_pci_driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e9f891c432f8..f2a825e39646 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,10 +55,51 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20141121"
+#define DRIVER_DATE "20150130"
#undef WARN_ON
-#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
+/* Many gcc seem to no see through this and fall over :( */
+#if 0
+#define WARN_ON(x) ({ \
+ bool __i915_warn_cond = (x); \
+ if (__builtin_constant_p(__i915_warn_cond)) \
+ BUILD_BUG_ON(__i915_warn_cond); \
+ WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
+#else
+#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
+#endif
+
+#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
+ (long) (x), __func__);
+
+/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
+ * WARN_ON()) for hw state sanity checks to check for unexpected conditions
+ * which may not necessarily be a user visible problem. This will either
+ * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
+ * enable distros and users to tailor their preferred amount of i915 abrt
+ * spam.
+ */
+#define I915_STATE_WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) { \
+ if (i915.verbose_state_checks) \
+ WARN(1, format); \
+ else \
+ DRM_ERROR(format); \
+ } \
+ unlikely(__ret_warn_on); \
+})
+
+#define I915_STATE_WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) { \
+ if (i915.verbose_state_checks) \
+ WARN(1, "WARN_ON(" #condition ")\n"); \
+ else \
+ DRM_ERROR("WARN_ON(" #condition ")\n"); \
+ } \
+ unlikely(__ret_warn_on); \
+})
enum pipe {
INVALID_PIPE = -1,
@@ -143,6 +184,10 @@ enum intel_display_power_domain {
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
POWER_DOMAIN_PLLS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -458,8 +503,8 @@ struct drm_i915_error_state {
struct intel_connector;
struct intel_encoder;
-struct intel_crtc_config;
-struct intel_plane_config;
+struct intel_crtc_state;
+struct intel_initial_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
@@ -497,10 +542,11 @@ struct drm_i915_display_funcs {
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
- struct intel_crtc_config *);
- void (*get_plane_config)(struct intel_crtc *,
- struct intel_plane_config *);
- int (*crtc_compute_clock)(struct intel_crtc *crtc);
+ struct intel_crtc_state *);
+ void (*get_initial_plane_config)(struct intel_crtc *,
+ struct intel_initial_plane_config *);
+ int (*crtc_compute_clock)(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
@@ -533,11 +579,28 @@ struct drm_i915_display_funcs {
void (*enable_backlight)(struct intel_connector *connector);
};
+enum forcewake_domain_id {
+ FW_DOMAIN_ID_RENDER = 0,
+ FW_DOMAIN_ID_BLITTER,
+ FW_DOMAIN_ID_MEDIA,
+
+ FW_DOMAIN_ID_COUNT
+};
+
+enum forcewake_domains {
+ FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
+ FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
+ FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA),
+ FORCEWAKE_ALL = (FORCEWAKE_RENDER |
+ FORCEWAKE_BLITTER |
+ FORCEWAKE_MEDIA)
+};
+
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
- int fw_engine);
+ enum forcewake_domains domains);
void (*force_wake_put)(struct drm_i915_private *dev_priv,
- int fw_engine);
+ enum forcewake_domains domains);
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
@@ -560,14 +623,31 @@ struct intel_uncore {
struct intel_uncore_funcs funcs;
unsigned fifo_count;
- unsigned forcewake_count;
-
- unsigned fw_rendercount;
- unsigned fw_mediacount;
- unsigned fw_blittercount;
-
- struct timer_list force_wake_timer;
-};
+ enum forcewake_domains fw_domains;
+
+ struct intel_uncore_forcewake_domain {
+ struct drm_i915_private *i915;
+ enum forcewake_domain_id id;
+ unsigned wake_count;
+ struct timer_list timer;
+ u32 reg_set;
+ u32 val_set;
+ u32 val_clear;
+ u32 reg_ack;
+ u32 reg_post;
+ u32 val_reset;
+ } fw_domain[FW_DOMAIN_ID_COUNT];
+};
+
+/* Iterate over initialised fw domains */
+#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
+ for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
+ (i__) < FW_DOMAIN_ID_COUNT; \
+ (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
+ if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
+
+#define for_each_fw_domain(domain__, dev_priv__, i__) \
+ for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
@@ -612,6 +692,7 @@ struct intel_device_info {
int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
+ unsigned int eu_total;
};
#undef DEFINE_FLAG
@@ -637,6 +718,11 @@ struct i915_ctx_hang_stats {
/* Time when this context was last blamed for a GPU reset */
unsigned long guilty_ts;
+ /* If the contexts causes a second GPU hang within this time,
+ * it is permanently banned from submitting any more work.
+ */
+ unsigned long ban_period_seconds;
+
/* This context is banned to submit more work */
bool banned;
};
@@ -679,7 +765,7 @@ struct intel_context {
struct {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
- int unpin_count;
+ int pin_count;
} engine[I915_NUM_RINGS];
struct list_head link;
@@ -730,11 +816,33 @@ struct i915_fbc {
} no_fbc_reason;
};
-struct i915_drrs {
- struct intel_connector *connector;
+/**
+ * HIGH_RR is the highest eDP panel refresh rate read from EDID
+ * LOW_RR is the lowest eDP panel refresh rate found from EDID
+ * parsing for same resolution.
+ */
+enum drrs_refresh_rate_type {
+ DRRS_HIGH_RR,
+ DRRS_LOW_RR,
+ DRRS_MAX_RR, /* RR count */
+};
+
+enum drrs_support_type {
+ DRRS_NOT_SUPPORTED = 0,
+ STATIC_DRRS_SUPPORT = 1,
+ SEAMLESS_DRRS_SUPPORT = 2
};
struct intel_dp;
+struct i915_drrs {
+ struct mutex mutex;
+ struct delayed_work work;
+ struct intel_dp *dp;
+ unsigned busy_frontbuffer_bits;
+ enum drrs_refresh_rate_type refresh_rate_type;
+ enum drrs_support_type type;
+};
+
struct i915_psr {
struct mutex lock;
bool sink_support;
@@ -743,6 +851,7 @@ struct i915_psr {
bool active;
struct delayed_work work;
unsigned busy_frontbuffer_bits;
+ bool link_standby;
};
enum intel_pch {
@@ -1130,6 +1239,11 @@ struct intel_l3_parity {
int which_slice;
};
+struct i915_gem_batch_pool {
+ struct drm_device *dev;
+ struct list_head cache_list;
+};
+
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
@@ -1143,6 +1257,13 @@ struct i915_gem_mm {
*/
struct list_head unbound_list;
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ */
+ struct i915_gem_batch_pool batch_pool;
+
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@@ -1224,14 +1345,13 @@ struct i915_gpu_error {
/* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
- struct timer_list hangcheck_timer;
+ struct workqueue_struct *hangcheck_wq;
+ struct delayed_work hangcheck_work;
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
struct drm_i915_error_state *first_error;
- struct work_struct work;
-
unsigned long missed_irq_rings;
@@ -1301,10 +1421,11 @@ struct ddi_vbt_port_info {
uint8_t supports_dp:1;
};
-enum drrs_support_type {
- DRRS_NOT_SUPPORTED = 0,
- STATIC_DRRS_SUPPORT = 1,
- SEAMLESS_DRRS_SUPPORT = 2
+enum psr_lines_to_wait {
+ PSR_0_LINES_TO_WAIT = 0,
+ PSR_1_LINE_TO_WAIT,
+ PSR_4_LINES_TO_WAIT,
+ PSR_8_LINES_TO_WAIT
};
struct intel_vbt_data {
@@ -1336,6 +1457,15 @@ struct intel_vbt_data {
struct edp_power_seq edp_pps;
struct {
+ bool full_link;
+ bool require_aux_wakeup;
+ int idle_frames;
+ enum psr_lines_to_wait lines_to_wait;
+ int tp1_wakeup_time;
+ int tp2_tp3_wakeup_time;
+ } psr;
+
+ struct {
u16 pwm_freq_hz;
bool present;
bool active_low_pwm;
@@ -1698,6 +1828,9 @@ struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
+ /* hda/i915 audio component */
+ bool audio_component_registered;
+
uint32_t hw_context_size;
struct list_head context_list;
@@ -1770,6 +1903,8 @@ struct drm_i915_private {
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
+ uint32_t request_uniq;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@@ -1781,6 +1916,11 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return dev->dev_private;
}
+static inline struct drm_i915_private *dev_to_i915(struct device *dev)
+{
+ return to_i915(dev_get_drvdata(dev));
+}
+
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@@ -1853,6 +1993,8 @@ struct drm_i915_gem_object {
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
+ struct list_head batch_pool_list;
+
/**
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
@@ -1912,6 +2054,7 @@ struct drm_i915_gem_object {
*/
unsigned long gt_ro:1;
unsigned int cache_level:3;
+ unsigned int cache_dirty:1;
unsigned int has_dma_mapping:1;
@@ -1924,13 +2067,11 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping;
int vmapping_count;
- struct intel_engine_cs *ring;
-
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_read_seqno;
- uint32_t last_write_seqno;
+ struct drm_i915_gem_request *last_read_req;
+ struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */
- uint32_t last_fenced_seqno;
+ struct drm_i915_gem_request *last_fenced_req;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -1941,10 +2082,6 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
- /** User space pin count and filp owning the pin */
- unsigned long user_pin_count;
- struct drm_file *pin_filp;
-
union {
/** for phy allocated objects */
struct drm_dma_handle *phys_handle;
@@ -1973,11 +2110,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
* The request queue allows us to note sequence numbers that have been emitted
* and may be associated with active buffers to be retired.
*
- * By keeping this list, we can avoid having to do questionable
- * sequence-number comparisons on buffer last_rendering_seqnos, and associate
- * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ * By keeping this list, we can avoid having to do questionable sequence
+ * number comparisons on buffer last_read|write_seqno. It also allows an
+ * emission time to be associated with the request for tracking how far ahead
+ * of the GPU the submission is.
*/
struct drm_i915_gem_request {
+ struct kref ref;
+
/** On Which ring this request was generated */
struct intel_engine_cs *ring;
@@ -1987,7 +2127,14 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the start of the request */
u32 head;
- /** Position in the ringbuffer of the end of the request */
+ /**
+ * Position in the ringbuffer of the start of the postfix.
+ * This is required to calculate the maximum available ringbuffer
+ * space without overwriting the postfix.
+ */
+ u32 postfix;
+
+ /** Position in the ringbuffer of the end of the whole request */
u32 tail;
/** Context related to this request */
@@ -2005,8 +2152,75 @@ struct drm_i915_gem_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
+
+ uint32_t uniq;
+
+ /**
+ * The ELSP only accepts two elements at a time, so we queue
+ * context/tail pairs on a given queue (ring->execlist_queue) until the
+ * hardware is available. The queue serves a double purpose: we also use
+ * it to keep track of the up to 2 contexts currently in the hardware
+ * (usually one in execution and the other queued up by the GPU): We
+ * only remove elements from the head of the queue when the hardware
+ * informs us that an element has been completed.
+ *
+ * All accesses to the queue are mediated by a spinlock
+ * (ring->execlist_lock).
+ */
+
+ /** Execlist link in the submission queue.*/
+ struct list_head execlist_link;
+
+ /** Execlists no. of times this request has been sent to the ELSP */
+ int elsp_submitted;
+
};
+void i915_gem_request_free(struct kref *req_ref);
+
+static inline uint32_t
+i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
+{
+ return req ? req->seqno : 0;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+{
+ return req ? req->ring : NULL;
+}
+
+static inline void
+i915_gem_request_reference(struct drm_i915_gem_request *req)
+{
+ kref_get(&req->ref);
+}
+
+static inline void
+i915_gem_request_unreference(struct drm_i915_gem_request *req)
+{
+ WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+ kref_put(&req->ref, i915_gem_request_free);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+ struct drm_i915_gem_request *src)
+{
+ if (src)
+ i915_gem_request_reference(src);
+
+ if (*pdst)
+ i915_gem_request_unreference(*pdst);
+
+ *pdst = src;
+}
+
+/*
+ * XXX: i915_gem_request_completed should be here but currently needs the
+ * definition of i915_seqno_passed() which is below. It will be moved in
+ * a later patch when the call to i915_seqno_passed() is obsoleted...
+ */
+
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
@@ -2159,8 +2373,7 @@ struct drm_i915_cmd_table {
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
- (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+ ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
@@ -2240,7 +2453,9 @@ struct drm_i915_cmd_table {
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
+#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
+ IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
+ IS_SKYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -2310,6 +2525,8 @@ struct i915_params {
bool disable_vtd_wa;
int use_mmio_flip;
bool mmio_debug;
+ bool verbose_state_checks;
+ bool nuclear_pageflip;
};
extern struct i915_params i915 __read_mostly;
@@ -2354,6 +2571,12 @@ extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
+const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
+void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2410,10 +2633,6 @@ int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@ -2458,10 +2677,23 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
+int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags,
+ const struct i915_ggtt_view *view);
+static inline
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
- uint64_t flags);
+ uint64_t flags)
+{
+ return i915_gem_object_pin_view(obj, vm, alignment, flags,
+ &i915_ggtt_view_normal);
+}
+
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2510,6 +2742,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
+ bool lazy_coherency)
+{
+ u32 seqno;
+
+ BUG_ON(req == NULL);
+
+ seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+
+ return i915_seqno_passed(seqno, req->seqno);
+}
+
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
@@ -2525,7 +2769,7 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
-int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
@@ -2568,17 +2812,15 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *batch_obj,
- u32 *seqno);
-#define i915_add_request(ring, seqno) \
- __i915_add_request(ring, NULL, NULL, seqno)
-int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+ struct drm_i915_gem_object *batch_obj);
+#define i915_add_request(ring) \
+ __i915_add_request(ring, NULL, NULL)
+int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv);
-int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
- uint32_t seqno);
+int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -2612,18 +2854,51 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
+unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view);
+static inline
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL);
+}
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view);
+static inline
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
+}
+
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
+struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+static inline
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+
+static inline
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_lookup_or_create_vma_view(obj, vm,
+ &i915_ggtt_view_normal);
+}
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
@@ -2720,6 +2995,10 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -2805,6 +3084,13 @@ void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
+/* i915_gem_batch_pool.c */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+ struct i915_gem_batch_pool *pool);
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
+struct drm_i915_gem_object*
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
+
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
@@ -2812,7 +3098,9 @@ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
+ u32 batch_len,
bool is_master);
/* i915_suspend.c */
@@ -2892,9 +3180,6 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
-extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
-extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
@@ -2923,20 +3208,12 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
-/* On SNB platform, before reading ring registers forcewake bit
- * must be set to prevent GT core from power down and stale values being
- * returned.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
-void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
-
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
/* intel_sideband.c */
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
-void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
+void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
@@ -2957,15 +3234,8 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
-
-#define FORCEWAKE_RENDER (1 << 0)
-#define FORCEWAKE_MEDIA (1 << 1)
-#define FORCEWAKE_BLITTER (1 << 2)
-#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \
- FORCEWAKE_BLITTER)
-
+int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
@@ -3070,4 +3340,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
+static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req)
+{
+ if (ring->trace_irq_req == NULL && ring->irq_get(ring))
+ i915_gem_request_assign(&ring->trace_irq_req, req);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c11603b4cf1d..c26d36cc4b31 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -39,8 +39,7 @@
#include <linux/dma-buf.h>
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
- bool force);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
@@ -153,12 +152,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
return 0;
}
-static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
-{
- return i915_gem_obj_bound_any(obj) && !obj->active;
-}
-
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -1157,19 +1150,18 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
}
/*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
+ * Compare arbitrary request against outstanding lazy request. Emit on match.
*/
int
-i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
+i915_gem_check_olr(struct drm_i915_gem_request *req)
{
int ret;
- BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
ret = 0;
- if (seqno == ring->outstanding_lazy_seqno)
- ret = i915_add_request(ring, NULL);
+ if (req == req->ring->outstanding_lazy_request)
+ ret = i915_add_request(req->ring);
return ret;
}
@@ -1194,10 +1186,9 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
}
/**
- * __i915_wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @reset_counter: reset sequence associated with the given seqno
+ * __i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @reset_counter: reset sequence associated with the given request
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
@@ -1208,15 +1199,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
* reset_counter _must_ be read before, and an appropriate smp_rmb must be
* inserted.
*
- * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * Returns 0 if the request was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv)
{
+ struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
@@ -1228,7 +1220,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
- if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ if (i915_gem_request_completed(req, true))
return 0;
timeout_expire = timeout ?
@@ -1246,7 +1238,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
return -ENODEV;
/* Record current time in case interrupted by signal, or wedged */
- trace_i915_gem_request_wait_begin(ring, seqno);
+ trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
for (;;) {
struct timer_list timer;
@@ -1265,7 +1257,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
break;
}
- if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+ if (i915_gem_request_completed(req, false)) {
ret = 0;
break;
}
@@ -1297,7 +1289,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
}
}
now = ktime_get_raw_ns();
- trace_i915_gem_request_wait_end(ring, seqno);
+ trace_i915_gem_request_wait_end(req);
if (!irq_test_in_progress)
ring->irq_put(ring);
@@ -1324,32 +1316,40 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
}
/**
- * Waits for a sequence number to be signaled, and cleans up the
+ * Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int
-i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
+i915_wait_request(struct drm_i915_gem_request *req)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- bool interruptible = dev_priv->mm.interruptible;
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ bool interruptible;
unsigned reset_counter;
int ret;
+ BUG_ON(req == NULL);
+
+ dev = req->ring->dev;
+ dev_priv = dev->dev_private;
+ interruptible = dev_priv->mm.interruptible;
+
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(seqno == 0);
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
- ret = i915_gem_check_olr(ring, seqno);
+ ret = i915_gem_check_olr(req);
if (ret)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
- NULL, NULL);
+ i915_gem_request_reference(req);
+ ret = __i915_wait_request(req, reset_counter,
+ interruptible, NULL, NULL);
+ i915_gem_request_unreference(req);
+ return ret;
}
static int
@@ -1361,11 +1361,11 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*
- * Note that the last_write_seqno is always the earlier of
- * the two (read/write) seqno, so if we haved successfully waited,
+ * Note that the last_write_req is always the earlier of
+ * the two (read/write) requests, so if we haved successfully waited,
* we know we have passed the last write.
*/
- obj->last_write_seqno = 0;
+ i915_gem_request_assign(&obj->last_write_req, NULL);
return 0;
}
@@ -1378,15 +1378,14 @@ static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
- struct intel_engine_cs *ring = obj->ring;
- u32 seqno;
+ struct drm_i915_gem_request *req;
int ret;
- seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
- if (seqno == 0)
+ req = readonly ? obj->last_write_req : obj->last_read_req;
+ if (!req)
return 0;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(req);
if (ret)
return ret;
@@ -1401,33 +1400,33 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_i915_file_private *file_priv,
bool readonly)
{
+ struct drm_i915_gem_request *req;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = obj->ring;
unsigned reset_counter;
- u32 seqno;
int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible);
- seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
- if (seqno == 0)
+ req = readonly ? obj->last_write_req : obj->last_read_req;
+ if (!req)
return 0;
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
- ret = i915_gem_check_olr(ring, seqno);
+ ret = i915_gem_check_olr(req);
if (ret)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
- ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
- file_priv);
+ ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(req);
if (ret)
return ret;
@@ -1481,18 +1480,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret)
goto unref;
- if (read_domains & I915_GEM_DOMAIN_GTT) {
+ if (read_domains & I915_GEM_DOMAIN_GTT)
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
-
- /* Silently promote "you're not bound, there was nothing to do"
- * to success, since the client was just asking us to
- * make sure everything was done.
- */
- if (ret == -EINVAL)
- ret = 0;
- } else {
+ else
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
- }
unref:
drm_gem_object_unreference(&obj->base);
@@ -1524,7 +1515,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
/* Pinned buffers may be scanout, so flush the cache */
if (obj->pin_display)
- i915_gem_object_flush_cpu_write_domain(obj, true);
+ i915_gem_object_flush_cpu_write_domain(obj);
drm_gem_object_unreference(&obj->base);
unlock:
@@ -1557,6 +1548,12 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
unsigned long addr;
+ if (args->flags & ~(I915_MMAP_WC))
+ return -EINVAL;
+
+ if (args->flags & I915_MMAP_WC && !cpu_has_pat)
+ return -ENODEV;
+
obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
@@ -1572,6 +1569,19 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
+ if (args->flags & I915_MMAP_WC) {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+
+ down_write(&mm->mmap_sem);
+ vma = find_vma(mm, addr);
+ if (vma)
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
+ addr = -ENOMEM;
+ up_write(&mm->mmap_sem);
+ }
drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -2256,14 +2266,18 @@ static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
- u32 seqno = intel_ring_get_seqno(ring);
+ struct drm_i915_gem_request *req;
+ struct intel_engine_cs *old_ring;
BUG_ON(ring == NULL);
- if (obj->ring != ring && obj->last_write_seqno) {
- /* Keep the seqno relative to the current ring */
- obj->last_write_seqno = seqno;
+
+ req = intel_ring_get_request(ring);
+ old_ring = i915_gem_request_get_ring(obj->last_read_req);
+
+ if (old_ring != ring && obj->last_write_req) {
+ /* Keep the request relative to the current ring */
+ i915_gem_request_assign(&obj->last_write_req, req);
}
- obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
if (!obj->active) {
@@ -2273,7 +2287,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->ring_list, &ring->active_list);
- obj->last_read_seqno = seqno;
+ i915_gem_request_assign(&obj->last_read_req, req);
}
void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2286,29 +2300,25 @@ void i915_vma_move_to_active(struct i915_vma *vma,
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_address_space *vm;
struct i915_vma *vma;
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- vma = i915_gem_obj_to_vma(obj, vm);
- if (vma && !list_empty(&vma->mm_list))
- list_move_tail(&vma->mm_list, &vm->inactive_list);
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (!list_empty(&vma->mm_list))
+ list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
}
intel_fb_obj_flush(obj, true);
list_del_init(&obj->ring_list);
- obj->ring = NULL;
- obj->last_read_seqno = 0;
- obj->last_write_seqno = 0;
+ i915_gem_request_assign(&obj->last_read_req, NULL);
+ i915_gem_request_assign(&obj->last_write_req, NULL);
obj->base.write_domain = 0;
- obj->last_fenced_seqno = 0;
+ i915_gem_request_assign(&obj->last_fenced_req, NULL);
obj->active = 0;
drm_gem_object_unreference(&obj->base);
@@ -2319,13 +2329,10 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
- struct intel_engine_cs *ring = obj->ring;
-
- if (ring == NULL)
+ if (obj->last_read_req == NULL)
return;
- if (i915_seqno_passed(ring->get_seqno(ring, true),
- obj->last_read_seqno))
+ if (i915_gem_request_completed(obj->last_read_req, true))
i915_gem_object_move_to_inactive(obj);
}
@@ -2401,22 +2408,20 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *obj,
- u32 *out_seqno)
+ struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf;
- u32 request_ring_position, request_start;
+ u32 request_start;
int ret;
- request = ring->preallocated_lazy_request;
+ request = ring->outstanding_lazy_request;
if (WARN_ON(request == NULL))
return -ENOMEM;
if (i915.enable_execlists) {
- struct intel_context *ctx = request->ctx;
- ringbuf = ctx->engine[ring->id].ringbuf;
+ ringbuf = request->ctx->engine[ring->id].ringbuf;
} else
ringbuf = ring->buffer;
@@ -2429,7 +2434,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* what.
*/
if (i915.enable_execlists) {
- ret = logical_ring_flush_all_caches(ringbuf);
+ ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
if (ret)
return ret;
} else {
@@ -2443,10 +2448,10 @@ int __i915_add_request(struct intel_engine_cs *ring,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ringbuf);
+ request->postfix = intel_ring_get_tail(ringbuf);
if (i915.enable_execlists) {
- ret = ring->emit_request(ringbuf);
+ ret = ring->emit_request(ringbuf, request);
if (ret)
return ret;
} else {
@@ -2455,10 +2460,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
return ret;
}
- request->seqno = intel_ring_get_seqno(ring);
- request->ring = ring;
request->head = request_start;
- request->tail = request_ring_position;
+ request->tail = intel_ring_get_tail(ringbuf);
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
@@ -2491,9 +2494,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
spin_unlock(&file_priv->mm.lock);
}
- trace_i915_gem_request_add(ring, request->seqno);
- ring->outstanding_lazy_seqno = 0;
- ring->preallocated_lazy_request = NULL;
+ trace_i915_gem_request_add(request);
+ ring->outstanding_lazy_request = NULL;
i915_queue_hangcheck(ring->dev);
@@ -2503,8 +2505,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
- if (out_seqno)
- *out_seqno = request->seqno;
return 0;
}
@@ -2532,7 +2532,8 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
if (ctx->hang_stats.banned)
return true;
- if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+ if (ctx->hang_stats.ban_period_seconds &&
+ elapsed <= ctx->hang_stats.ban_period_seconds) {
if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
return true;
@@ -2568,33 +2569,39 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
- struct intel_context *ctx = request->ctx;
-
list_del(&request->list);
i915_gem_request_remove_from_client(request);
+ i915_gem_request_unreference(request);
+}
+
+void i915_gem_request_free(struct kref *req_ref)
+{
+ struct drm_i915_gem_request *req = container_of(req_ref,
+ typeof(*req), ref);
+ struct intel_context *ctx = req->ctx;
+
if (ctx) {
if (i915.enable_execlists) {
- struct intel_engine_cs *ring = request->ring;
+ struct intel_engine_cs *ring = req->ring;
if (ctx != ring->default_context)
intel_lr_context_unpin(ring, ctx);
}
+
i915_gem_context_unreference(ctx);
}
- kfree(request);
+
+ kfree(req);
}
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
- u32 completed_seqno;
-
- completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
- if (i915_seqno_passed(completed_seqno, request->seqno))
+ if (i915_gem_request_completed(request, false))
continue;
return request;
@@ -2641,13 +2648,17 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* pinned in place.
*/
while (!list_empty(&ring->execlist_queue)) {
- struct intel_ctx_submit_request *submit_req;
+ struct drm_i915_gem_request *submit_req;
submit_req = list_first_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
list_del(&submit_req->execlist_link);
intel_runtime_pm_put(dev_priv);
+
+ if (submit_req->ctx != ring->default_context)
+ intel_lr_context_unpin(ring, submit_req->ctx);
+
i915_gem_context_unreference(submit_req->ctx);
kfree(submit_req);
}
@@ -2669,10 +2680,8 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request);
}
- /* These may not have been flush before the reset, do so now */
- kfree(ring->preallocated_lazy_request);
- ring->preallocated_lazy_request = NULL;
- ring->outstanding_lazy_seqno = 0;
+ /* This may not have been flushed before the reset, so clean it now */
+ i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
}
void i915_gem_restore_fences(struct drm_device *dev)
@@ -2724,15 +2733,11 @@ void i915_gem_reset(struct drm_device *dev)
void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
- uint32_t seqno;
-
if (list_empty(&ring->request_list))
return;
WARN_ON(i915_verify_lists(ring->dev));
- seqno = ring->get_seqno(ring, true);
-
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
@@ -2744,7 +2749,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+ if (!i915_gem_request_completed(obj->last_read_req, true))
break;
i915_gem_object_move_to_inactive(obj);
@@ -2759,10 +2764,10 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
struct drm_i915_gem_request,
list);
- if (!i915_seqno_passed(seqno, request->seqno))
+ if (!i915_gem_request_completed(request, true))
break;
- trace_i915_gem_request_retire(ring, request->seqno);
+ trace_i915_gem_request_retire(request);
/* This is one of the few common intersection points
* between legacy ringbuffer submission and execlists:
@@ -2780,15 +2785,15 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
* of tail of the request to update the last known position
* of the GPU head.
*/
- ringbuf->last_retired_head = request->tail;
+ ringbuf->last_retired_head = request->postfix;
i915_gem_free_request(request);
}
- if (unlikely(ring->trace_irq_seqno &&
- i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
+ if (unlikely(ring->trace_irq_req &&
+ i915_gem_request_completed(ring->trace_irq_req, true))) {
ring->irq_put(ring);
- ring->trace_irq_seqno = 0;
+ i915_gem_request_assign(&ring->trace_irq_req, NULL);
}
WARN_ON(i915_verify_lists(ring->dev));
@@ -2860,14 +2865,17 @@ i915_gem_idle_work_handler(struct work_struct *work)
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
+ struct intel_engine_cs *ring;
int ret;
if (obj->active) {
- ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+ ring = i915_gem_request_get_ring(obj->last_read_req);
+
+ ret = i915_gem_check_olr(obj->last_read_req);
if (ret)
return ret;
- i915_gem_retire_requests_ring(obj->ring);
+ i915_gem_retire_requests_ring(ring);
}
return 0;
@@ -2901,9 +2909,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *ring = NULL;
+ struct drm_i915_gem_request *req;
unsigned reset_counter;
- u32 seqno = 0;
int ret = 0;
if (args->flags != 0)
@@ -2924,13 +2931,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret)
goto out;
- if (obj->active) {
- seqno = obj->last_read_seqno;
- ring = obj->ring;
- }
+ if (!obj->active || !obj->last_read_req)
+ goto out;
- if (seqno == 0)
- goto out;
+ req = obj->last_read_req;
/* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a timeout <=0 (like busy ioctl)
@@ -2942,10 +2946,15 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
- return __i915_wait_seqno(ring, seqno, reset_counter, true,
- &args->timeout_ns, file->driver_priv);
+ ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns,
+ file->driver_priv);
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(req);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
out:
drm_gem_object_unreference(&obj->base);
@@ -2969,10 +2978,12 @@ int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to)
{
- struct intel_engine_cs *from = obj->ring;
+ struct intel_engine_cs *from;
u32 seqno;
int ret, idx;
+ from = i915_gem_request_get_ring(obj->last_read_req);
+
if (from == NULL || to == from)
return 0;
@@ -2981,24 +2992,25 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
idx = intel_ring_sync_index(from, to);
- seqno = obj->last_read_seqno;
+ seqno = i915_gem_request_get_seqno(obj->last_read_req);
/* Optimization: Avoid semaphore sync when we are sure we already
* waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
- ret = i915_gem_check_olr(obj->ring, seqno);
+ ret = i915_gem_check_olr(obj->last_read_req);
if (ret)
return ret;
- trace_i915_gem_ring_sync_to(from, to, seqno);
+ trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
ret = to->semaphore.sync_to(to, from, seqno);
if (!ret)
- /* We use last_read_seqno because sync_to()
+ /* We use last_read_req because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
- from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
+ from->semaphore.sync_seqno[idx] =
+ i915_gem_request_get_seqno(obj->last_read_req);
return ret;
}
@@ -3054,10 +3066,8 @@ int i915_vma_unbind(struct i915_vma *vma)
* cause memory corruption through use-after-free.
*/
- /* Throw away the active reference before moving to the unbound list */
- i915_gem_object_retire(obj);
-
- if (i915_is_ggtt(vma->vm)) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj);
/* release the fence reg _after_ flushing */
@@ -3071,8 +3081,15 @@ int i915_vma_unbind(struct i915_vma *vma)
vma->unbind_vma(vma);
list_del_init(&vma->mm_list);
- if (i915_is_ggtt(vma->vm))
- obj->map_and_fenceable = false;
+ if (i915_is_ggtt(vma->vm)) {
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+ obj->map_and_fenceable = false;
+ } else if (vma->ggtt_view.pages) {
+ sg_free_table(vma->ggtt_view.pages);
+ kfree(vma->ggtt_view.pages);
+ vma->ggtt_view.pages = NULL;
+ }
+ }
drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
@@ -3080,6 +3097,10 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (list_empty(&obj->vma_list)) {
+ /* Throw away the active reference before
+ * moving to the unbound list. */
+ i915_gem_object_retire(obj);
+
i915_gem_gtt_finish_object(obj);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
}
@@ -3148,6 +3169,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
+ /* Adjust fence size to match tiled area */
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ uint32_t row_size = obj->stride *
+ (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+ size = (size / row_size) * row_size;
+ }
+
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -3263,17 +3291,12 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
"bogus fence setup with stride: 0x%x, tiling mode: %i\n",
obj->stride, obj->tiling_mode);
- switch (INTEL_INFO(dev)->gen) {
- case 9:
- case 8:
- case 7:
- case 6:
- case 5:
- case 4: i965_write_fence_reg(dev, reg, obj); break;
- case 3: i915_write_fence_reg(dev, reg, obj); break;
- case 2: i830_write_fence_reg(dev, reg, obj); break;
- default: BUG();
- }
+ if (IS_GEN2(dev))
+ i830_write_fence_reg(dev, reg, obj);
+ else if (IS_GEN3(dev))
+ i915_write_fence_reg(dev, reg, obj);
+ else if (INTEL_INFO(dev)->gen >= 4)
+ i965_write_fence_reg(dev, reg, obj);
/* And similarly be paranoid that no direct access to this region
* is reordered to before the fence is installed.
@@ -3312,12 +3335,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static int
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
- if (obj->last_fenced_seqno) {
- int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+ if (obj->last_fenced_req) {
+ int ret = i915_wait_request(obj->last_fenced_req);
if (ret)
return ret;
- obj->last_fenced_seqno = 0;
+ i915_gem_request_assign(&obj->last_fenced_req, NULL);
}
return 0;
@@ -3490,7 +3513,8 @@ static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
- uint64_t flags)
+ uint64_t flags,
+ const struct i915_ggtt_view *view)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3540,7 +3564,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
if (IS_ERR(vma))
goto err_unpin;
@@ -3570,15 +3594,19 @@ search_free:
if (ret)
goto err_remove_node;
+ trace_i915_vma_bind(vma, flags);
+ ret = i915_vma_bind(vma, obj->cache_level,
+ flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
+ if (ret)
+ goto err_finish_gtt;
+
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
- trace_i915_vma_bind(vma, flags);
- vma->bind_vma(vma, obj->cache_level,
- flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
-
return vma;
+err_finish_gtt:
+ i915_gem_gtt_finish_object(obj);
err_remove_node:
drm_mm_remove_node(&vma->node);
err_free_vma:
@@ -3615,11 +3643,14 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
- if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
+ obj->cache_dirty = true;
return false;
+ }
trace_i915_gem_object_clflush(obj);
drm_clflush_sg(obj->pages);
+ obj->cache_dirty = false;
return true;
}
@@ -3655,15 +3686,14 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
/** Flushes the CPU write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
- bool force)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
{
uint32_t old_write_domain;
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- if (i915_gem_clflush_object(obj, force))
+ if (i915_gem_clflush_object(obj, obj->pin_display))
i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain;
@@ -3685,15 +3715,10 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
uint32_t old_write_domain, old_read_domains;
+ struct i915_vma *vma;
int ret;
- /* Not valid to be called on unbound objects. */
- if (vma == NULL)
- return -EINVAL;
-
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
return 0;
@@ -3702,7 +3727,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
return ret;
i915_gem_object_retire(obj);
- i915_gem_object_flush_cpu_write_domain(obj, false);
+
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -3733,9 +3771,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_write_domain);
/* And bump the LRU for this access */
- if (i915_gem_object_is_inactive(obj))
+ vma = i915_gem_obj_to_ggtt(obj);
+ if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->mm_list,
- &dev_priv->gtt.base.inactive_list);
+ &to_i915(obj->base.dev)->gtt.base.inactive_list);
return 0;
}
@@ -3781,36 +3820,23 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (drm_mm_node_allocated(&vma->node))
- vma->bind_vma(vma, cache_level,
- vma->bound & GLOBAL_BIND);
+ if (drm_mm_node_allocated(&vma->node)) {
+ ret = i915_vma_bind(vma, cache_level,
+ vma->bound & GLOBAL_BIND);
+ if (ret)
+ return ret;
+ }
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
- if (cpu_write_needs_clflush(obj)) {
- u32 old_read_domains, old_write_domain;
-
- /* If we're coming from LLC cached, then we haven't
- * actually been tracking whether the data is in the
- * CPU cache or not, since we only allow one bit set
- * in obj->write_domain and have been skipping the clflushes.
- * Just set it to the CPU cache for now.
- */
- i915_gem_object_retire(obj);
- WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
-
- old_read_domains = obj->base.read_domains;
- old_write_domain = obj->base.write_domain;
-
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
+ if (obj->cache_dirty &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+ cpu_write_needs_clflush(obj)) {
+ if (i915_gem_clflush_object(obj, true))
+ i915_gem_chipset_flush(obj->base.dev);
}
return 0;
@@ -3902,18 +3928,14 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
if (!vma)
return false;
- /* There are 3 sources that pin objects:
+ /* There are 2 sources that pin objects:
* 1. The display engine (scanouts, sprites, cursors);
* 2. Reservations for execbuffer;
- * 3. The user.
*
* We can ignore reservations as we hold the struct_mutex and
- * are only called outside of the reservation path. The user
- * can only increment pin_count once, and so if after
- * subtracting the potential reference by the user, any pin_count
- * remains, it must be due to another use by the display engine.
+ * are only called outside of the reservation path.
*/
- return vma->pin_count - !!obj->user_pin_count;
+ return vma->pin_count;
}
/*
@@ -3930,7 +3952,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
bool was_pin_display;
int ret;
- if (pipelined != obj->ring) {
+ if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
ret = i915_gem_object_sync(obj, pipelined);
if (ret)
return ret;
@@ -3964,7 +3986,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (ret)
goto err_unpin_display;
- i915_gem_object_flush_cpu_write_domain(obj, true);
+ i915_gem_object_flush_cpu_write_domain(obj);
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -4082,10 +4104,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
- struct drm_i915_gem_request *request;
- struct intel_engine_cs *ring = NULL;
+ struct drm_i915_gem_request *request, *target = NULL;
unsigned reset_counter;
- u32 seqno = 0;
int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -4101,19 +4121,24 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- ring = request->ring;
- seqno = request->seqno;
+ target = request;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ if (target)
+ i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock);
- if (seqno == 0)
+ if (target == NULL)
return 0;
- ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(target);
+ mutex_unlock(&dev->struct_mutex);
+
return ret;
}
@@ -4137,10 +4162,11 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
}
int
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags)
+i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags,
+ const struct i915_ggtt_view *view)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
@@ -4156,7 +4182,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
return -EINVAL;
- vma = i915_gem_obj_to_vma(obj, vm);
+ vma = i915_gem_obj_to_vma_view(obj, vm, view);
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
@@ -4166,7 +4192,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_offset(obj, vm), alignment,
+ i915_gem_obj_offset_view(obj, vm, view->type),
+ alignment,
!!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
ret = i915_vma_unbind(vma);
@@ -4179,13 +4206,17 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
+ vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
+ flags, view);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
- if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
- vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
+ ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+ if (ret)
+ return ret;
+ }
if ((bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
@@ -4257,102 +4288,6 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
}
int
-i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_pin *args = data;
- struct drm_i915_gem_object *obj;
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
-
- if (obj->madv != I915_MADV_WILLNEED) {
- DRM_DEBUG("Attempting to pin a purgeable buffer\n");
- ret = -EFAULT;
- goto out;
- }
-
- if (obj->pin_filp != NULL && obj->pin_filp != file) {
- DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
- args->handle);
- ret = -EINVAL;
- goto out;
- }
-
- if (obj->user_pin_count == ULONG_MAX) {
- ret = -EBUSY;
- goto out;
- }
-
- if (obj->user_pin_count == 0) {
- ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
- if (ret)
- goto out;
- }
-
- obj->user_pin_count++;
- obj->pin_filp = file;
-
- args->offset = i915_gem_obj_ggtt_offset(obj);
-out:
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-int
-i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_pin *args = data;
- struct drm_i915_gem_object *obj;
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
-
- if (obj->pin_filp != file) {
- DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
- args->handle);
- ret = -EINVAL;
- goto out;
- }
- obj->user_pin_count--;
- if (obj->user_pin_count == 0) {
- obj->pin_filp = NULL;
- i915_gem_object_ggtt_unpin(obj);
- }
-
-out:
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -4378,9 +4313,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_flush_active(obj);
args->busy = obj->active;
- if (obj->ring) {
+ if (obj->last_read_req) {
+ struct intel_engine_cs *ring;
BUILD_BUG_ON(I915_NUM_RINGS > 16);
- args->busy |= intel_ring_flag(obj->ring) << 16;
+ ring = i915_gem_request_get_ring(obj->last_read_req);
+ args->busy |= intel_ring_flag(ring) << 16;
}
drm_gem_object_unreference(&obj->base);
@@ -4460,6 +4397,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
+ INIT_LIST_HEAD(&obj->batch_pool_list);
obj->ops = ops;
@@ -4615,12 +4553,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
intel_runtime_pm_put(dev_priv);
}
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm)
+ if (vma->vm == vm && vma->ggtt_view.type == view->type)
return vma;
return NULL;
@@ -4676,10 +4615,15 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex);
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
flush_delayed_work(&dev_priv->mm.idle_work);
+ /* Assert that we sucessfully flushed all the work and
+ * reset the GPU back to its idle, low power state.
+ */
+ WARN_ON(dev_priv->mm.busy);
+
return 0;
err:
@@ -4791,14 +4735,6 @@ int i915_gem_init_rings(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- /*
- * At least 830 can leave some of the unused rings
- * "active" (ie. head != tail) after resume which
- * will prevent c3 entry. Makes sure all unused rings
- * are totally idle.
- */
- init_unused_rings(dev);
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@@ -4851,6 +4787,7 @@ int
i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
int ret, i;
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
@@ -4877,32 +4814,35 @@ i915_gem_init_hw(struct drm_device *dev)
i915_gem_init_swizzling(dev);
- ret = dev_priv->gt.init_rings(dev);
- if (ret)
- return ret;
+ /*
+ * At least 830 can leave some of the unused rings
+ * "active" (ie. head != tail) after resume which
+ * will prevent c3 entry. Makes sure all unused rings
+ * are totally idle.
+ */
+ init_unused_rings(dev);
+
+ for_each_ring(ring, dev_priv, i) {
+ ret = ring->init_hw(ring);
+ if (ret)
+ return ret;
+ }
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
- /*
- * XXX: Contexts should only be initialized once. Doing a switch to the
- * default context switch however is something we'd like to do after
- * reset or thaw (the latter may not actually be necessary for HW, but
- * goes with our code better). Context switching requires rings (for
- * the do_switch), but before enabling PPGTT. So don't move this.
- */
- ret = i915_gem_context_enable(dev_priv);
+ ret = i915_ppgtt_init_hw(dev);
if (ret && ret != -EIO) {
- DRM_ERROR("Context enable failed %d\n", ret);
+ DRM_ERROR("PPGTT enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
-
- return ret;
}
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_gem_context_enable(dev_priv);
if (ret && ret != -EIO) {
- DRM_ERROR("PPGTT enable failed %d\n", ret);
+ DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
+
+ return ret;
}
return ret;
@@ -4939,18 +4879,18 @@ int i915_gem_init(struct drm_device *dev)
}
ret = i915_gem_init_userptr(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out_unlock;
i915_gem_init_global_gtt(dev);
ret = i915_gem_context_init(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out_unlock;
+
+ ret = dev_priv->gt.init_rings(dev);
+ if (ret)
+ goto out_unlock;
ret = i915_gem_init_hw(dev);
if (ret == -EIO) {
@@ -4962,6 +4902,8 @@ int i915_gem_init(struct drm_device *dev)
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
ret = 0;
}
+
+out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -5062,6 +5004,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
+ i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
+
mutex_init(&dev_priv->fb_tracking.lock);
}
@@ -5222,8 +5166,9 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
}
/* All the new VM stuff */
-unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
@@ -5231,7 +5176,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (vma->vm == vm)
+ if (vma->vm == vm && vma->ggtt_view.type == view)
return vma->node.start;
}
@@ -5240,13 +5185,16 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
return -1;
}
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view)
{
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, vma_link)
- if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ if (vma->vm == vm &&
+ vma->ggtt_view.type == view &&
+ drm_mm_node_allocated(&vma->node))
return true;
return false;
@@ -5378,11 +5326,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
struct i915_vma *vma;
- vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
- if (vma->vm != i915_obj_to_ggtt(obj))
- return NULL;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
+ return vma;
- return vma;
+ return NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
new file mode 100644
index 000000000000..c690170a1c4f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+/**
+ * DOC: batch pool
+ *
+ * In order to submit batch buffers as 'secure', the software command parser
+ * must ensure that a batch buffer cannot be modified after parsing. It does
+ * this by copying the user provided batch buffer contents to a kernel owned
+ * buffer from which the hardware will actually execute, and by carefully
+ * managing the address space bindings for such buffers.
+ *
+ * The batch pool framework provides a mechanism for the driver to manage a
+ * set of scratch buffers to use for this purpose. The framework can be
+ * extended to support other uses cases should they arise.
+ */
+
+/**
+ * i915_gem_batch_pool_init() - initialize a batch buffer pool
+ * @dev: the drm device
+ * @pool: the batch buffer pool
+ */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+ struct i915_gem_batch_pool *pool)
+{
+ pool->dev = dev;
+ INIT_LIST_HEAD(&pool->cache_list);
+}
+
+/**
+ * i915_gem_batch_pool_fini() - clean up a batch buffer pool
+ * @pool: the pool to clean up
+ *
+ * Note: Callers must hold the struct_mutex.
+ */
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
+{
+ WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+
+ while (!list_empty(&pool->cache_list)) {
+ struct drm_i915_gem_object *obj =
+ list_first_entry(&pool->cache_list,
+ struct drm_i915_gem_object,
+ batch_pool_list);
+
+ WARN_ON(obj->active);
+
+ list_del_init(&obj->batch_pool_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+}
+
+/**
+ * i915_gem_batch_pool_get() - select a buffer from the pool
+ * @pool: the batch buffer pool
+ * @size: the minimum desired size of the returned buffer
+ *
+ * Finds or allocates a batch buffer in the pool with at least the requested
+ * size. The caller is responsible for any domain, active/inactive, or
+ * purgeability management for the returned buffer.
+ *
+ * Note: Callers must hold the struct_mutex
+ *
+ * Return: the selected batch buffer object
+ */
+struct drm_i915_gem_object *
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
+ size_t size)
+{
+ struct drm_i915_gem_object *obj = NULL;
+ struct drm_i915_gem_object *tmp, *next;
+
+ WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+
+ list_for_each_entry_safe(tmp, next,
+ &pool->cache_list, batch_pool_list) {
+
+ if (tmp->active)
+ continue;
+
+ /* While we're looping, do some clean up */
+ if (tmp->madv == __I915_MADV_PURGED) {
+ list_del(&tmp->batch_pool_list);
+ drm_gem_object_unreference(&tmp->base);
+ continue;
+ }
+
+ /*
+ * Select a buffer that is at least as big as needed
+ * but not 'too much' bigger. A better way to do this
+ * might be to bucket the pool objects based on size.
+ */
+ if (tmp->base.size >= size &&
+ tmp->base.size <= (2 * size)) {
+ obj = tmp;
+ break;
+ }
+ }
+
+ if (!obj) {
+ obj = i915_gem_alloc_object(pool->dev, size);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ list_add_tail(&obj->batch_pool_list, &pool->cache_list);
+ }
+ else
+ /* Keep list in LRU order */
+ list_move_tail(&obj->batch_pool_list, &pool->cache_list);
+
+ obj->madv = I915_MADV_WILLNEED;
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d011ec82ef1e..8603bf48d3ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -222,6 +222,8 @@ __create_hw_context(struct drm_device *dev,
* is no remap info, it will be a NOP. */
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
+ ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
+
return ctx;
err_out:
@@ -408,14 +410,25 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
BUG_ON(!dev_priv->ring[RCS].default_context);
- if (i915.enable_execlists)
- return 0;
+ if (i915.enable_execlists) {
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->init_context) {
+ ret = ring->init_context(ring,
+ ring->default_context);
+ if (ret) {
+ DRM_ERROR("ring init context: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
- for_each_ring(ring, dev_priv, i) {
- ret = i915_switch_context(ring, ring->default_context);
- if (ret)
- return ret;
- }
+ } else
+ for_each_ring(ring, dev_priv, i) {
+ ret = i915_switch_context(ring, ring->default_context);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -611,9 +624,14 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out;
vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
- if (!(vma->bound & GLOBAL_BIND))
- vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
- GLOBAL_BIND);
+ if (!(vma->bound & GLOBAL_BIND)) {
+ ret = i915_vma_bind(vma,
+ to->legacy_hw_ctx.rcs_state->cache_level,
+ GLOBAL_BIND);
+ /* This shouldn't ever fail. */
+ if (WARN_ONCE(ret, "GGTT context bind failed!"))
+ goto unpin_out;
+ }
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
@@ -651,7 +669,8 @@ static int do_switch(struct intel_engine_cs *ring,
* swapped, but there is no way to do that yet.
*/
from->legacy_hw_ctx.rcs_state->dirty = 1;
- BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
+ BUG_ON(i915_gem_request_get_ring(
+ from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
/* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
@@ -671,10 +690,6 @@ done:
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
-
- ret = i915_gem_render_state_init(ring);
- if (ret)
- DRM_ERROR("init render state: %d\n", ret);
}
return 0;
@@ -779,3 +794,72 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
return 0;
}
+
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context_param *args = data;
+ struct intel_context *ctx;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(ctx);
+ }
+
+ args->size = 0;
+ switch (args->param) {
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
+ args->value = ctx->hang_stats.ban_period_seconds;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context_param *args = data;
+ struct intel_context *ctx;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(ctx);
+ }
+
+ switch (args->param) {
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
+ if (args->size)
+ ret = -EINVAL;
+ else if (args->value < ctx->hang_stats.ban_period_seconds &&
+ !capable(CAP_SYS_ADMIN))
+ ret = -EPERM;
+ else
+ ctx->hang_stats.ban_period_seconds = args->value;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 886ff2ee7a28..e3a49d94da3a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -50,11 +50,12 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @dev: drm_device
* @vm: address space to evict from
- * @size: size of the desired free space
+ * @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
* @cache_level: cache_level for the desired space
- * @mappable: whether the free space must be mappable
- * @nonblocking: whether evicting active objects is allowed or not
+ * @start: start (inclusive) of the range from which to evict objects
+ * @end: end (exclusive) of the range from which to evict objects
+ * @flags: additional flags to control the eviction algorithm
*
* This function will try to evict vmas until a free space satisfying the
* requirements is found. Callers must check first whether any such hole exists
@@ -196,7 +197,6 @@ found:
/**
* i915_gem_evict_vm - Evict all idle vmas from a vm
- *
* @vm: Address space to cleanse
* @do_idle: Boolean directing whether to idle first.
*
@@ -214,6 +214,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next;
int ret;
+ WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
trace_i915_gem_evict_vm(vm);
if (do_idle) {
@@ -222,6 +223,8 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
return ret;
i915_gem_retire_requests(vm->dev);
+
+ WARN_ON(!list_empty(&vm->active_list));
}
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 11738316394a..b773368fc62c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -37,6 +37,7 @@
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+#define __EXEC_OBJECT_PURGEABLE (1<<27)
#define BATCH_OFFSET_BIAS (256*1024)
@@ -223,7 +224,12 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
vma->pin_count--;
- entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+ if (entry->flags & __EXEC_OBJECT_PURGEABLE)
+ obj->madv = I915_MADV_DONTNEED;
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
+ __EXEC_OBJECT_HAS_PIN |
+ __EXEC_OBJECT_PURGEABLE);
}
static void eb_destroy(struct eb_vmas *eb)
@@ -357,9 +363,12 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- !(target_vma->bound & GLOBAL_BIND)))
- target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
- GLOBAL_BIND);
+ !(target_vma->bound & GLOBAL_BIND))) {
+ ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
+ GLOBAL_BIND);
+ if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
+ return ret;
+ }
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
@@ -943,7 +952,7 @@ void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring)
{
- u32 seqno = intel_ring_get_seqno(ring);
+ struct drm_i915_gem_request *req = intel_ring_get_request(ring);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
@@ -960,7 +969,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->last_write_seqno = seqno;
+ i915_gem_request_assign(&obj->last_write_req, req);
intel_fb_obj_invalidate(obj, ring);
@@ -968,7 +977,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- obj->last_fenced_seqno = seqno;
+ i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(ring->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
@@ -990,7 +999,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- (void)__i915_add_request(ring, file, obj, NULL);
+ (void)__i915_add_request(ring, file, obj);
}
static int
@@ -1060,6 +1069,67 @@ i915_emit_box(struct intel_engine_cs *ring,
return 0;
}
+static struct drm_i915_gem_object*
+i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+ struct drm_i915_gem_exec_object2 *shadow_exec_entry,
+ struct eb_vmas *eb,
+ struct drm_i915_gem_object *batch_obj,
+ u32 batch_start_offset,
+ u32 batch_len,
+ bool is_master,
+ u32 *flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
+ struct drm_i915_gem_object *shadow_batch_obj;
+ bool need_reloc = false;
+ int ret;
+
+ shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
+ batch_obj->base.size);
+ if (IS_ERR(shadow_batch_obj))
+ return shadow_batch_obj;
+
+ ret = i915_parse_cmds(ring,
+ batch_obj,
+ shadow_batch_obj,
+ batch_start_offset,
+ batch_len,
+ is_master);
+ if (ret) {
+ if (ret == -EACCES)
+ return batch_obj;
+ } else {
+ struct i915_vma *vma;
+
+ memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+
+ vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+ vma->exec_entry = shadow_exec_entry;
+ vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
+ drm_gem_object_reference(&shadow_batch_obj->base);
+ i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
+ list_add_tail(&vma->exec_list, &eb->vmas);
+
+ shadow_batch_obj->base.pending_read_domains =
+ batch_obj->base.pending_read_domains;
+
+ /*
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+ * bit from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically
+ * don't want that set when the command parser is
+ * enabled.
+ *
+ * FIXME: with aliasing ppgtt, buffers that should only
+ * be in ggtt still end up in the aliasing ppgtt. remove
+ * this check when that is fixed.
+ */
+ if (USES_FULL_PPGTT(dev))
+ *flags |= I915_DISPATCH_SECURE;
+ }
+
+ return ret ? ERR_PTR(ret) : shadow_batch_obj;
+}
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
@@ -1208,7 +1278,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
return ret;
}
- trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+ trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1277,6 +1347,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
+ struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
@@ -1309,13 +1380,35 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
+ ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+ DRM_DEBUG("execbuf with non bsd ring but with invalid "
+ "bsd dispatch flags: %d\n", (int)(args->flags));
+ return -EINVAL;
+ }
+
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS];
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
if (HAS_BSD2(dev)) {
int ring_id;
- ring_id = gen8_dispatch_bsd_ring(dev, file);
- ring = &dev_priv->ring[ring_id];
+
+ switch (args->flags & I915_EXEC_BSD_MASK) {
+ case I915_EXEC_BSD_DEFAULT:
+ ring_id = gen8_dispatch_bsd_ring(dev, file);
+ ring = &dev_priv->ring[ring_id];
+ break;
+ case I915_EXEC_BSD_RING1:
+ ring = &dev_priv->ring[VCS];
+ break;
+ case I915_EXEC_BSD_RING2:
+ ring = &dev_priv->ring[VCS2];
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
+ (int)(args->flags & I915_EXEC_BSD_MASK));
+ return -EINVAL;
+ }
} else
ring = &dev_priv->ring[VCS];
} else
@@ -1393,28 +1486,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = -EINVAL;
goto err;
}
- batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
if (i915_needs_cmd_parser(ring)) {
- ret = i915_parse_cmds(ring,
- batch_obj,
- args->batch_start_offset,
- file->is_master);
- if (ret) {
- if (ret != -EACCES)
- goto err;
- } else {
- /*
- * XXX: Actually do this when enabling batch copy...
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
- * from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically don't
- * want that set when the command parser is enabled.
- */
+ batch_obj = i915_gem_execbuffer_parse(ring,
+ &shadow_exec_entry,
+ eb,
+ batch_obj,
+ args->batch_start_offset,
+ args->batch_len,
+ file->is_master,
+ &flags);
+ if (IS_ERR(batch_obj)) {
+ ret = PTR_ERR(batch_obj);
+ goto err;
}
}
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 171f6eafdeee..746f77fb57a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,6 +30,68 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/**
+ * DOC: Global GTT views
+ *
+ * Background and previous state
+ *
+ * Historically objects could exists (be bound) in global GTT space only as
+ * singular instances with a view representing all of the object's backing pages
+ * in a linear fashion. This view will be called a normal view.
+ *
+ * To support multiple views of the same object, where the number of mapped
+ * pages is not equal to the backing store, or where the layout of the pages
+ * is not linear, concept of a GGTT view was added.
+ *
+ * One example of an alternative view is a stereo display driven by a single
+ * image. In this case we would have a framebuffer looking like this
+ * (2x2 pages):
+ *
+ * 12
+ * 34
+ *
+ * Above would represent a normal GGTT view as normally mapped for GPU or CPU
+ * rendering. In contrast, fed to the display engine would be an alternative
+ * view which could look something like this:
+ *
+ * 1212
+ * 3434
+ *
+ * In this example both the size and layout of pages in the alternative view is
+ * different from the normal view.
+ *
+ * Implementation and usage
+ *
+ * GGTT views are implemented using VMAs and are distinguished via enum
+ * i915_ggtt_view_type and struct i915_ggtt_view.
+ *
+ * A new flavour of core GEM functions which work with GGTT bound objects were
+ * added with the _view suffix. They take the struct i915_ggtt_view parameter
+ * encapsulating all metadata required to implement a view.
+ *
+ * As a helper for callers which are only interested in the normal view,
+ * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * GEM API functions, the ones not taking the view parameter, are operating on,
+ * or with the normal GGTT view.
+ *
+ * Code wanting to add or use a new GGTT view needs to:
+ *
+ * 1. Add a new enum with a suitable name.
+ * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 3. Add support to i915_get_vma_pages().
+ *
+ * New views are required to build a scatter-gather table from within the
+ * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * exists for the lifetime of an VMA.
+ *
+ * Core API is designed to have copy semantics which means that passed in
+ * struct i915_ggtt_view does not need to be persistent (left around after
+ * calling the core API functions).
+ *
+ */
+
+const struct i915_ggtt_view i915_ggtt_view_normal;
+
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
@@ -40,8 +102,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
- if (IS_GEN8(dev))
- has_full_ppgtt = false; /* XXX why? */
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
@@ -72,7 +132,10 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return 0;
}
- return has_aliasing_ppgtt ? 1 : 0;
+ if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
+ return 2;
+ else
+ return has_aliasing_ppgtt ? 1 : 0;
}
@@ -132,7 +195,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
pte |= GEN6_PTE_UNCACHED;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(level);
}
return pte;
@@ -156,7 +219,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
pte |= GEN6_PTE_UNCACHED;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(level);
}
return pte;
@@ -1102,10 +1165,8 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
- else if (IS_GEN8(dev) || IS_GEN9(dev))
- return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
- BUG();
+ return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
}
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
@@ -1146,7 +1207,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_enable(dev);
else
- WARN_ON(1);
+ MISSING_CASE(INTEL_INFO(dev)->gen);
if (ppgtt) {
for_each_ring(ring, dev_priv, i) {
@@ -1341,9 +1402,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* The bind_vma code tries to be smart about tracking mappings.
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
+ *
+ * Bind is not expected to fail since this is only called on
+ * resume and assumption is all requirements exist already.
*/
vma->bound &= ~GLOBAL_BIND;
- vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND));
}
@@ -1538,7 +1602,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
BUG_ON(!i915_is_ggtt(vma->vm));
- intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
+ intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags);
vma->bound = GLOBAL_BIND;
}
@@ -1588,7 +1652,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
- vma->vm->insert_entries(vma->vm, obj->pages,
+ vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
vma->node.start,
cache_level, flags);
vma->bound |= GLOBAL_BIND;
@@ -1600,7 +1664,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
- vma->obj->pages,
+ vma->ggtt_view.pages,
vma->node.start,
cache_level, flags);
vma->bound |= LOCAL_BIND;
@@ -2165,7 +2229,8 @@ int i915_gem_gtt_init(struct drm_device *dev)
}
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
@@ -2176,12 +2241,9 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
+ vma->ggtt_view = *view;
- switch (INTEL_INFO(vm->dev)->gen) {
- case 9:
- case 8:
- case 7:
- case 6:
+ if (INTEL_INFO(vm->dev)->gen >= 6) {
if (i915_is_ggtt(vm)) {
vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma;
@@ -2189,39 +2251,73 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
vma->unbind_vma = ppgtt_unbind_vma;
vma->bind_vma = ppgtt_bind_vma;
}
- break;
- case 5:
- case 4:
- case 3:
- case 2:
+ } else {
BUG_ON(!i915_is_ggtt(vm));
vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma;
- break;
- default:
- BUG();
}
- /* Keep GGTT vmas first to make debug easier */
- if (i915_is_ggtt(vm))
- list_add(&vma->vma_link, &obj->vma_list);
- else {
- list_add_tail(&vma->vma_link, &obj->vma_list);
+ list_add_tail(&vma->vma_link, &obj->vma_list);
+ if (!i915_is_ggtt(vm))
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
- }
return vma;
}
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
- vma = i915_gem_obj_to_vma(obj, vm);
+ vma = i915_gem_obj_to_vma_view(obj, vm, view);
if (!vma)
- vma = __i915_gem_vma_create(obj, vm);
+ vma = __i915_gem_vma_create(obj, vm, view);
return vma;
}
+
+static inline
+int i915_get_vma_pages(struct i915_vma *vma)
+{
+ if (vma->ggtt_view.pages)
+ return 0;
+
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
+ vma->ggtt_view.pages = vma->obj->pages;
+ else
+ WARN_ONCE(1, "GGTT view %u not implemented!\n",
+ vma->ggtt_view.type);
+
+ if (!vma->ggtt_view.pages) {
+ DRM_ERROR("Failed to get pages for VMA view type %u!\n",
+ vma->ggtt_view.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
+ * @vma: VMA to map
+ * @cache_level: mapping cache level
+ * @flags: flags like global or local mapping
+ *
+ * DMA addresses are taken from the scatter-gather table of this object (or of
+ * this VMA in case of non-default GGTT views) and PTE entries set up.
+ * Note that DMA addresses are also the only part of the SG table we care about.
+ */
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags)
+{
+ int ret = i915_get_vma_pages(vma);
+
+ if (ret)
+ return ret;
+
+ vma->bind_vma(vma, cache_level, flags);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index beaf4bcfdac8..e377c7d27bd4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -109,7 +109,20 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+enum i915_ggtt_view_type {
+ I915_GGTT_VIEW_NORMAL = 0,
+};
+
+struct i915_ggtt_view {
+ enum i915_ggtt_view_type type;
+
+ struct sg_table *pages;
+};
+
+extern const struct i915_ggtt_view i915_ggtt_view_normal;
+
enum i915_cache_level;
+
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
@@ -129,6 +142,15 @@ struct i915_vma {
#define PTE_READ_ONLY (1<<2)
unsigned int bound : 4;
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
/** This object's place on the active/inactive lists */
struct list_head mm_list;
@@ -146,11 +168,10 @@ struct i915_vma {
/**
* How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, pin_ioctl
- * (via user_pin_count), execbuffer (objects are not allowed multiple
- * times for the same batchbuffer), and the framebuffer code. When
- * switching/pageflipping, the framebuffer code has at most two buffers
- * pinned per crtc.
+ * users can each hold at most one reference: pwrite/pread, execbuffer
+ * (objects are not allowed multiple times for the same batchbuffer),
+ * and the framebuffer code. When switching/pageflipping, the
+ * framebuffer code has at most two buffers pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
@@ -182,7 +203,7 @@ struct i915_address_space {
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
+ * flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
@@ -193,7 +214,7 @@ struct i915_address_space {
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
- * last_rendering_seqno is 0 while an object is in this list.
+ * last_read_req is NULL while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 98dcd94acba8..521548a08578 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so.obj, NULL);
+ ret = __i915_add_request(ring, NULL, so.obj);
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4727a4e2c87c..7a24bd1a51f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -399,7 +399,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
obj->fence_dirty =
- obj->last_fenced_seqno ||
+ obj->last_fenced_req ||
obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d182058383a9..1719078c763a 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -113,7 +113,10 @@ restart:
continue;
obj = mo->obj;
- drm_gem_object_reference(&obj->base);
+
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
spin_unlock(&mn->lock);
cancel_userptr(obj);
@@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
it = interval_tree_iter_first(&mn->objects, start, end);
if (it != NULL) {
obj = container_of(it, struct i915_mmu_object, it)->obj;
- drm_gem_object_reference(&obj->base);
+
+ /* The mmu_object is released late when destroying the
+ * GEM object so it is entirely possible to gain a
+ * reference on an object in the process of being freed
+ * since our serialisation is via the spinlock and not
+ * the struct_mutex - and consequently use it after it
+ * is freed and then double free it.
+ */
+ if (!kref_get_unless_zero(&obj->base.refcount)) {
+ spin_unlock(&mn->lock);
+ serial = 0;
+ continue;
+ }
+
serial = mn->serial;
}
spin_unlock(&mn->lock);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index cdaee6ce05f8..48ddbf44c862 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -670,8 +670,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size;
err->name = obj->base.name;
- err->rseqno = obj->last_read_seqno;
- err->wseqno = obj->last_write_seqno;
+ err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);
+ err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
@@ -679,13 +679,12 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->pinned = 0;
if (i915_gem_obj_is_pinned(obj))
err->pinned = 1;
- if (obj->user_pin_count > 0)
- err->pinned = -1;
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
- err->ring = obj->ring ? obj->ring->id : -1;
+ err->ring = obj->last_read_req ?
+ i915_gem_request_get_ring(obj->last_read_req)->id : -1;
err->cache_level = obj->cache_level;
}
@@ -719,10 +718,8 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
break;
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->pin_count > 0) {
+ if (vma->vm == vm && vma->pin_count > 0)
capture_bo(err++, vma);
- break;
- }
}
return err - first;
@@ -767,32 +764,21 @@ static void i915_gem_record_fences(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 9:
- case 8:
- case 7:
- case 6:
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
+ if (IS_GEN3(dev) || IS_GEN2(dev)) {
for (i = 0; i < 8; i++)
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
-
- default:
- BUG();
- }
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
+ (i * 4));
+ } else if (IS_GEN5(dev) || IS_GEN4(dev))
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 +
+ (i * 8));
+ else if (INTEL_INFO(dev)->gen >= 6)
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 +
+ (i * 8));
}
@@ -926,9 +912,13 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
- switch (INTEL_INFO(dev)->gen) {
- case 9:
- case 8:
+ if (IS_GEN6(dev))
+ ering->vm_info.pp_dir_base =
+ I915_READ(RING_PP_DIR_BASE_READ(ring));
+ else if (IS_GEN7(dev))
+ ering->vm_info.pp_dir_base =
+ I915_READ(RING_PP_DIR_BASE(ring));
+ else if (INTEL_INFO(dev)->gen >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(ring, i));
@@ -936,16 +926,6 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.pdp[i] |=
I915_READ(GEN8_RING_PDP_LDW(ring, i));
}
- break;
- case 7:
- ering->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE(ring));
- break;
- case 6:
- ering->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE_READ(ring));
- break;
- }
}
}
@@ -1072,7 +1052,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies;
- erq->tail = request->tail;
+ erq->tail = request->postfix;
}
}
}
@@ -1097,10 +1077,8 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->pin_count > 0) {
+ if (vma->vm == vm && vma->pin_count > 0)
i++;
- break;
- }
}
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
@@ -1378,26 +1356,15 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
- switch (INTEL_INFO(dev)->gen) {
- case 2:
- case 3:
+ if (IS_GEN2(dev) || IS_GEN3(dev))
instdone[0] = I915_READ(INSTDONE);
- break;
- case 4:
- case 5:
- case 6:
+ else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
instdone[0] = I915_READ(INSTDONE_I965);
instdone[1] = I915_READ(INSTDONE1);
- break;
- default:
- WARN_ONCE(1, "Unsupported platform\n");
- case 7:
- case 8:
- case 9:
+ } else if (INTEL_INFO(dev)->gen >= 7) {
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
- break;
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d0d3dfbe6d2a..4145d95902f5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -45,7 +45,7 @@
* and related files, but that will be described in separate chapters.
*/
-static const u32 hpd_ibx[] = {
+static const u32 hpd_ibx[HPD_NUM_PINS] = {
[HPD_CRT] = SDE_CRT_HOTPLUG,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
@@ -53,7 +53,7 @@ static const u32 hpd_ibx[] = {
[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};
-static const u32 hpd_cpt[] = {
+static const u32 hpd_cpt[HPD_NUM_PINS] = {
[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
@@ -61,7 +61,7 @@ static const u32 hpd_cpt[] = {
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};
-static const u32 hpd_mask_i915[] = {
+static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_EN,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
@@ -70,7 +70,7 @@ static const u32 hpd_mask_i915[] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};
-static const u32 hpd_status_g4x[] = {
+static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
@@ -79,7 +79,7 @@ static const u32 hpd_status_g4x[] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
-static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
+static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
@@ -183,6 +183,8 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{
assert_spin_locked(&dev_priv->irq_lock);
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@@ -229,6 +231,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
{
uint32_t new_val;
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
assert_spin_locked(&dev_priv->irq_lock);
new_val = dev_priv->pm_irq_mask;
@@ -292,6 +296,23 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
+{
+ /*
+ * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+ mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
+ return mask;
+}
+
void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -304,8 +325,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
- ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
+ I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
@@ -332,6 +352,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
assert_spin_locked(&dev_priv->irq_lock);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -571,7 +593,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
const struct drm_display_mode *mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
@@ -642,7 +664,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
enum pipe pipe = crtc->pipe;
int position, vtotal;
@@ -669,7 +691,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
@@ -827,7 +849,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
crtc,
- &to_intel_crtc(crtc)->config.adjusted_mode);
+ &to_intel_crtc(crtc)->config->base.adjusted_mode);
}
static bool intel_hpd_irq_event(struct drm_device *dev,
@@ -857,7 +879,7 @@ static void i915_digport_work_func(struct work_struct *work)
container_of(work, struct drm_i915_private, dig_port_work);
u32 long_port_mask, short_port_mask;
struct intel_digital_port *intel_dig_port;
- int i, ret;
+ int i;
u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock);
@@ -881,9 +903,11 @@ static void i915_digport_work_func(struct work_struct *work)
valid = true;
if (valid) {
+ enum irqreturn ret;
+
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
- if (ret == true) {
- /* if we get true fallback to old school hpd */
+ if (ret == IRQ_NONE) {
+ /* fall back to old school hpd */
old_bits |= (1 << intel_dig_port->base.hpd_pin);
}
}
@@ -1017,7 +1041,7 @@ static void notify_ring(struct drm_device *dev,
if (!intel_ring_initialized(ring))
return;
- trace_i915_gem_request_complete(ring);
+ trace_i915_gem_request_notify(ring);
wake_up_all(&ring->irq_queue);
}
@@ -1383,14 +1407,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (rcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
@@ -1406,14 +1430,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
@@ -1440,7 +1464,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
@@ -1500,7 +1524,7 @@ static inline enum port get_port_from_pin(enum hpd_pin pin)
static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
u32 dig_hotplug_reg,
- const u32 *hpd)
+ const u32 hpd[HPD_NUM_PINS])
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
@@ -2397,19 +2421,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
}
/**
- * i915_error_work_func - do process context error handling work
- * @work: work struct
+ * i915_reset_and_wakeup - do process context error handling work
*
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_error_work_func(struct work_struct *work)
+static void i915_reset_and_wakeup(struct drm_device *dev)
{
- struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
- work);
- struct drm_i915_private *dev_priv =
- container_of(error, struct drm_i915_private, gpu_error);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2576,10 +2596,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
/**
- * i915_handle_error - handle an error interrupt
+ * i915_handle_error - handle a gpu error
* @dev: drm device
*
- * Do some basic checking of regsiter state at error interrupt time and
+ * Do some basic checking of regsiter state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
@@ -2604,9 +2624,9 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
&dev_priv->gpu_error.reset_counter);
/*
- * Wakeup waiting processes so that the reset work function
- * i915_error_work_func doesn't deadlock trying to grab various
- * locks. By bumping the reset counter first, the woken
+ * Wakeup waiting processes so that the reset function
+ * i915_reset_and_wakeup doesn't deadlock trying to grab
+ * various locks. By bumping the reset counter first, the woken
* processes will see a reset in progress and back off,
* releasing their locks and then wait for the reset completion.
* We must do this for _all_ gpu waiters that might hold locks
@@ -2619,13 +2639,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
i915_error_wake_up(dev_priv, false);
}
- /*
- * Our reset work can grab modeset locks (since it needs to reset the
- * state of outstanding pagelips). Hence it must not be run on our own
- * dev-priv->wq work queue for otherwise the flush_work in the pageflip
- * code will deadlock.
- */
- schedule_work(&dev_priv->gpu_error.work);
+ i915_reset_and_wakeup(dev);
}
/* Called from drm generic code, passed 'crtc' which
@@ -2753,18 +2767,18 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static u32
-ring_last_seqno(struct intel_engine_cs *ring)
+static struct drm_i915_gem_request *
+ring_last_request(struct intel_engine_cs *ring)
{
return list_entry(ring->request_list.prev,
- struct drm_i915_gem_request, list)->seqno;
+ struct drm_i915_gem_request, list);
}
static bool
-ring_idle(struct intel_engine_cs *ring, u32 seqno)
+ring_idle(struct intel_engine_cs *ring)
{
return (list_empty(&ring->request_list) ||
- i915_seqno_passed(seqno, ring_last_seqno(ring)));
+ i915_gem_request_completed(ring_last_request(ring), false));
}
static bool
@@ -2950,7 +2964,7 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
return HANGCHECK_HUNG;
}
-/**
+/*
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
* if there are no progress, hangcheck score for that ring is increased.
@@ -2958,10 +2972,12 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
-static void i915_hangcheck_elapsed(unsigned long data)
+static void i915_hangcheck_elapsed(struct work_struct *work)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ gpu_error.hangcheck_work.work);
+ struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *ring;
int i;
int busy_count = 0, rings_hung = 0;
@@ -2984,7 +3000,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
acthd = intel_ring_get_active_head(ring);
if (ring->hangcheck.seqno == seqno) {
- if (ring_idle(ring, seqno)) {
+ if (ring_idle(ring)) {
ring->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) {
@@ -3075,17 +3091,18 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
+ struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
if (!i915.enable_hangcheck)
return;
- /* Don't continually defer the hangcheck, but make sure it is active */
- if (timer_pending(timer))
- return;
- mod_timer(timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+ /* Don't continually defer the hangcheck so that it is always run at
+ * least once after work has been scheduled on any ring. Otherwise,
+ * we will ignore a hung ring if a second ring is kept busy.
+ */
+
+ queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
+ round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
}
static void ibx_irq_reset(struct drm_device *dev)
@@ -4123,26 +4140,24 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
- if (I915_HAS_HOTPLUG(dev)) {
- hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- hotplug_en &= ~HOTPLUG_INT_EN_MASK;
- /* Note HDMI and DP share hotplug bits */
- /* enable bits are the same for all generations */
- for_each_intel_encoder(dev, intel_encoder)
- if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
- hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
- /* Programming the CRT detection parameters tends
- to generate a spurious hotplug event about three
- seconds later. So just do it once.
- */
- if (IS_G4X(dev))
- hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
- hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
-
- /* Ignore TV since it's buggy */
- I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- }
+ hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~HOTPLUG_INT_EN_MASK;
+ /* Note HDMI and DP share hotplug bits */
+ /* enable bits are the same for all generations */
+ for_each_intel_encoder(dev, intel_encoder)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
+ /* Programming the CRT detection parameters tends
+ to generate a spurious hotplug event about three
+ seconds later. So just do it once.
+ */
+ if (IS_G4X(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+ /* Ignore TV since it's buggy */
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -4320,7 +4335,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
- INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
@@ -4331,9 +4345,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
- setup_timer(&dev_priv->gpu_error.hangcheck_timer,
- i915_hangcheck_elapsed,
- (unsigned long) dev);
+ INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
+ i915_hangcheck_elapsed);
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
intel_hpd_irq_reenable_work);
@@ -4406,14 +4419,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
}
+ if (I915_HAS_HOTPLUG(dev_priv))
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index c91cb2033cc5..44f2262a5553 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -35,7 +35,7 @@ struct i915_params i915 __read_mostly = {
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_fbc = -1,
- .enable_execlists = 0,
+ .enable_execlists = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = 0,
@@ -51,6 +51,8 @@ struct i915_params i915 __read_mostly = {
.disable_vtd_wa = 0,
.use_mmio_flip = 0,
.mmio_debug = 0,
+ .verbose_state_checks = 1,
+ .nuclear_pageflip = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -122,7 +124,7 @@ MODULE_PARM_DESC(enable_ppgtt,
module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. "
- "(-1=auto, 0=disabled [default], 1=enabled)");
+ "(-1=auto [default], 0=disabled, 1=enabled)");
module_param_named(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
@@ -173,3 +175,11 @@ module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
MODULE_PARM_DESC(mmio_debug,
"Enable the MMIO debug code (default: false). This may negatively "
"affect performance.");
+
+module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
+MODULE_PARM_DESC(verbose_state_checks,
+ "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
+
+module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
+MODULE_PARM_DESC(nuclear_pageflip,
+ "Force atomic modeset functionality; only planes work for now (default: false).");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 172de3b3433b..33b3d0a24071 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -31,6 +31,8 @@
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
+#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
+ (port) == PORT_B ? (b) : (c))
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
@@ -217,6 +219,8 @@
#define INSTR_SUBCLIENT_SHIFT 27
#define INSTR_SUBCLIENT_MASK 0x18000000
#define INSTR_MEDIA_SUBCLIENT 0x2
+#define INSTR_26_TO_24_MASK 0x7000000
+#define INSTR_26_TO_24_SHIFT 24
/*
* Memory interface instructions used by the kernel
@@ -246,6 +250,7 @@
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
+#define MI_SET_APPID MI_INSTR(0x0e, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
@@ -303,8 +308,9 @@
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
-#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
-#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
+#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
+#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
+#define MI_USE_GGTT (1 << 22) /* g4x+ */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
@@ -470,17 +476,18 @@
*/
#define BCS_SWCTRL 0x22200
-#define HS_INVOCATION_COUNT 0x2300
-#define DS_INVOCATION_COUNT 0x2308
-#define IA_VERTICES_COUNT 0x2310
-#define IA_PRIMITIVES_COUNT 0x2318
-#define VS_INVOCATION_COUNT 0x2320
-#define GS_INVOCATION_COUNT 0x2328
-#define GS_PRIMITIVES_COUNT 0x2330
-#define CL_INVOCATION_COUNT 0x2338
-#define CL_PRIMITIVES_COUNT 0x2340
-#define PS_INVOCATION_COUNT 0x2348
-#define PS_DEPTH_COUNT 0x2350
+#define GPGPU_THREADS_DISPATCHED 0x2290
+#define HS_INVOCATION_COUNT 0x2300
+#define DS_INVOCATION_COUNT 0x2308
+#define IA_VERTICES_COUNT 0x2310
+#define IA_PRIMITIVES_COUNT 0x2318
+#define VS_INVOCATION_COUNT 0x2320
+#define GS_INVOCATION_COUNT 0x2328
+#define GS_PRIMITIVES_COUNT 0x2330
+#define CL_INVOCATION_COUNT 0x2338
+#define CL_PRIMITIVES_COUNT 0x2340
+#define PS_INVOCATION_COUNT 0x2348
+#define PS_DEPTH_COUNT 0x2350
/* There are the 4 64-bit counter registers, one for each stream output */
#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
@@ -598,6 +605,15 @@ enum punit_power_well {
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
+#define FB_GFX_FMAX_AT_VMAX_FUSE 0x136
+#define FB_GFX_FREQ_FUSE_MASK 0xff
+#define FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT 24
+#define FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT 16
+#define FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT 8
+
+#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137
+#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8
+
#define PUNIT_GPU_STATUS_REG 0xdb
#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
@@ -1464,6 +1480,17 @@ enum punit_power_well {
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
+/* Fuse readout registers for GT */
+#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168)
+#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
+#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
+#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
+#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
+#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -1509,7 +1536,7 @@ enum punit_power_well {
#define I915_ISP_INTERRUPT (1<<22)
#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
-#define I915_MIPIB_INTERRUPT (1<<19)
+#define I915_MIPIC_INTERRUPT (1<<19)
#define I915_MIPIA_INTERRUPT (1<<18)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
@@ -2539,6 +2566,42 @@ enum punit_power_well {
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
+/* VLV eDP PSR registers */
+#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
+#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
+#define VLV_EDP_PSR_ENABLE (1<<0)
+#define VLV_EDP_PSR_RESET (1<<1)
+#define VLV_EDP_PSR_MODE_MASK (7<<2)
+#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3)
+#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2)
+#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7)
+#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8)
+#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9)
+#define VLV_EDP_PSR_DBL_FRAME (1<<10)
+#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
+#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
+#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB)
+
+#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
+#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
+#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
+#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
+#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
+#define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB)
+
+#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
+#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
+#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3)
+#define VLV_EDP_PSR_CURR_STATE_MASK 7
+#define VLV_EDP_PSR_DISABLED (0<<0)
+#define VLV_EDP_PSR_INACTIVE (1<<0)
+#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0)
+#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0)
+#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
+#define VLV_EDP_PSR_EXIT (5<<0)
+#define VLV_EDP_PSR_IN_TRANS (1<<7)
+#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB)
+
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
@@ -2762,7 +2825,8 @@ enum punit_power_well {
#define DC_BALANCE_RESET (1 << 25)
#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
-#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
+#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
+#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
#define PIPE_A_SCRAMBLE_RESET (1 << 0)
@@ -3704,6 +3768,11 @@ enum punit_power_well {
#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14)
+#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13)
+#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12)
+#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (1f << 5)
+#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
/*
@@ -5158,6 +5227,9 @@ enum punit_power_well {
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
+#define HIZ_CHICKEN 0x7018
+# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
+
#define GEN7_L3SQCREG1 0xB010
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -6005,6 +6077,13 @@ enum punit_power_well {
#define GEN6_PMINTRMSK 0xA168
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define VLV_PWRDWNUPCTL 0xA294
+#define GEN9_MEDIA_PG_IDLE_HYSTERESIS 0xA0C4
+#define GEN9_RENDER_PG_IDLE_HYSTERESIS 0xA0C8
+#define GEN9_PG_ENABLE 0xA210
+
+#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C)
+#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
+#define PIXEL_OVERLAP_CNT_SHIFT 30
#define GEN6_PMISR 0x44020
#define GEN6_PMIMR 0x44024 /* rps_lock */
@@ -6119,6 +6198,7 @@ enum punit_power_well {
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
#define HALF_SLICE_CHICKEN3 0xe184
+#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
@@ -6631,29 +6711,31 @@ enum punit_power_well {
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
-/* VLV MIPI registers */
+/* MIPI DSI registers */
+
+#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
-#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
- _MIPIB_PORT_CTRL)
-#define DPI_ENABLE (1 << 31) /* A + B */
+#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
+#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
+#define DUAL_LINK_MODE_SHIFT 26
#define DUAL_LINK_MODE_MASK (1 << 26)
#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
-#define DITHERING_ENABLE (1 << 25) /* A + B */
+#define DITHERING_ENABLE (1 << 25) /* A + C */
#define FLOPPED_HSTX (1 << 23)
#define DE_INVERT (1 << 19) /* XXX */
#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
#define AFE_LATCHOUT (1 << 17)
#define LP_OUTPUT_HOLD (1 << 16)
-#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
-#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
-#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
-#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
+#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
+#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
+#define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11
+#define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
#define CSB_SHIFT 9
#define CSB_MASK (3 << 9)
#define CSB_20MHZ (0 << 9)
@@ -6662,10 +6744,10 @@ enum punit_power_well {
#define BANDGAP_MASK (1 << 8)
#define BANDGAP_PNW_CIRCUIT (0 << 8)
#define BANDGAP_LNC_CIRCUIT (1 << 8)
-#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
-#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
-#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
-#define TEARING_EFFECT_SHIFT 2 /* A + B */
+#define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
+#define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
+#define TEARING_EFFECT_DELAY (1 << 4) /* A + C */
+#define TEARING_EFFECT_SHIFT 2 /* A + C */
#define TEARING_EFFECT_MASK (3 << 2)
#define TEARING_EFFECT_OFF (0 << 2)
#define TEARING_EFFECT_DSI (1 << 2)
@@ -6677,9 +6759,9 @@ enum punit_power_well {
#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
-#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
- _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(port) _MIPI_PORT(port, \
+ _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
@@ -6689,9 +6771,9 @@ enum punit_power_well {
/* MIPI DSI Controller and D-PHY registers */
#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
-#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
-#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
- _MIPIB_DEVICE_READY)
+#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(port) _MIPI_PORT(port, _MIPIA_DEVICE_READY, \
+ _MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@@ -6700,13 +6782,13 @@ enum punit_power_well {
#define DEVICE_READY (1 << 0)
#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
-#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
-#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
- _MIPIB_INTR_STAT)
+#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(port) _MIPI_PORT(port, _MIPIA_INTR_STAT, \
+ _MIPIC_INTR_STAT)
#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
-#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
-#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
- _MIPIB_INTR_EN)
+#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(port) _MIPI_PORT(port, _MIPIA_INTR_EN, \
+ _MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -6741,9 +6823,9 @@ enum punit_power_well {
#define RXSOT_ERROR (1 << 0)
#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
-#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
- _MIPIB_DSI_FUNC_PRG)
+#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(port) _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \
+ _MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -6765,93 +6847,93 @@ enum punit_power_well {
#define DATA_LANES_PRG_REG_MASK (7 << 0)
#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
-#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
- _MIPIB_HS_TX_TIMEOUT)
+#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \
+ _MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
-#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
- _MIPIB_LP_RX_TIMEOUT)
+#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \
+ _MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
-#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
- _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(port) _MIPI_PORT(port, \
+ _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
-#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
- _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \
+ _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
-#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
-#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
- _MIPIB_DPI_RESOLUTION)
+#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(port) _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \
+ _MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
-#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
- _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(port) _MIPI_PORT(port, \
+ _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
-#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
-#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
-#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
- _MIPIB_HBP_COUNT)
+#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(port) _MIPI_PORT(port, _MIPIA_HBP_COUNT, \
+ _MIPIC_HBP_COUNT)
#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
-#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
-#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
- _MIPIB_HFP_COUNT)
+#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(port) _MIPI_PORT(port, _MIPIA_HFP_COUNT, \
+ _MIPIC_HFP_COUNT)
#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
-#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
-#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
-#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
-#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
- _MIPIB_VBP_COUNT)
+#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(port) _MIPI_PORT(port, _MIPIA_VBP_COUNT, \
+ _MIPIC_VBP_COUNT)
#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
-#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
-#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
- _MIPIB_VFP_COUNT)
+#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(port) _MIPI_PORT(port, _MIPIA_VFP_COUNT, \
+ _MIPIC_VFP_COUNT)
#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
-#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
/* regs above are bits 15:0 */
#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
-#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
-#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
- _MIPIB_DPI_CONTROL)
+#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(port) _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \
+ _MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@@ -6861,30 +6943,30 @@ enum punit_power_well {
#define SHUTDOWN (1 << 0)
#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
-#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
-#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
- _MIPIB_DPI_DATA)
+#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(port) _MIPI_PORT(port, _MIPIA_DPI_DATA, \
+ _MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
-#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
-#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
- _MIPIB_INIT_COUNT)
+#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(port) _MIPI_PORT(port, _MIPIA_INIT_COUNT, \
+ _MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
-#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
- _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(port) _MIPI_PORT(port, \
+ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
-#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
- _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(port) _MIPI_PORT(port, \
+ _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@@ -6893,9 +6975,9 @@ enum punit_power_well {
#define VIDEO_MODE_BURST (3 << 0)
#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
-#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
-#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
- _MIPIB_EOT_DISABLE)
+#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(port) _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \
+ _MIPIC_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@@ -6906,32 +6988,32 @@ enum punit_power_well {
#define EOT_DISABLE (1 << 0)
#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
-#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
-#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
- _MIPIB_LP_BYTECLK)
+#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(port) _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \
+ _MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
/* bits 31:0 */
#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
-#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
-#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
- _MIPIB_LP_GEN_DATA)
+#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \
+ _MIPIC_LP_GEN_DATA)
/* bits 31:0 */
#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
-#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
-#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
- _MIPIB_HS_GEN_DATA)
+#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \
+ _MIPIC_HS_GEN_DATA)
#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
-#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
-#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
- _MIPIB_LP_GEN_CTRL)
+#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \
+ _MIPIC_LP_GEN_CTRL)
#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
-#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
-#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
- _MIPIB_HS_GEN_CTRL)
+#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \
+ _MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@@ -6943,9 +7025,9 @@ enum punit_power_well {
/* data type values, see include/video/mipi_display.h */
#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
-#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
-#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
- _MIPIB_GEN_FIFO_STAT)
+#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(port) _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \
+ _MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -6962,17 +7044,17 @@ enum punit_power_well {
#define HS_DATA_FIFO_FULL (1 << 0)
#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
-#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
- _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(port) _MIPI_PORT(port, \
+ _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
-#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
-#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
- _MIPIB_DPHY_PARAM)
+#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(port) _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \
+ _MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@@ -6984,36 +7066,36 @@ enum punit_power_well {
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
-#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
-#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
- _MIPIB_DBI_BW_CTRL)
+#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(port) _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \
+ _MIPIC_DBI_BW_CTRL)
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb088)
-#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
- _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MIPI_PORT(port, \
+ _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
-#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
-#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
- _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(port) _MIPI_PORT(port, \
+ _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
-#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
-#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
- _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(port) _MIPI_PORT(port, \
+ _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
-#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
-#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
- _MIPIB_INTR_EN_REG_1)
+#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(port) _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \
+ _MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
@@ -7032,9 +7114,9 @@ enum punit_power_well {
/* MIPI adapter registers */
#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
-#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
-#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
- _MIPIB_CTRL)
+#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(port) _MIPI_PORT(port, _MIPIA_CTRL, \
+ _MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -7047,24 +7129,24 @@ enum punit_power_well {
#define RGB_FLIP_TO_BGR (1 << 2)
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
-#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
-#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
- _MIPIB_DATA_ADDRESS)
+#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \
+ _MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
-#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
-#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
- _MIPIB_DATA_LENGTH)
+#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(port) _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \
+ _MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
-#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
-#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
- _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(port) _MIPI_PORT(port, \
+ _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
@@ -7072,22 +7154,22 @@ enum punit_power_well {
#define COMMAND_VALID (1 << 0)
#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
-#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
-#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
- _MIPIB_COMMAND_LENGTH)
+#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(port) _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \
+ _MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
-#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
-#define MIPI_READ_DATA_RETURN(tc, n) \
- (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(port, n) \
+ (_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \
+ 4 * (n)) /* n: 0...7 */
#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
-#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
-#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
- _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(port) _MIPI_PORT(port, \
+ _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
/* For UMS only (deprecated): */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 26368822a33f..9f19ed38cdc3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -264,7 +264,7 @@ static void i915_restore_display(struct drm_device *dev)
}
/* only restore FBC info on the platform that supports FBC*/
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
/* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 4a5af695307e..49f5ade0edb7 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -49,14 +49,14 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
- u32 reg, czcount_30ns;
+ u32 clk_reg, czcount_30ns;
if (IS_CHERRYVIEW(dev))
- reg = CHV_CLK_CTL1;
+ clk_reg = CHV_CLK_CTL1;
else
- reg = VLV_CLK_CTL2;
+ clk_reg = VLV_CLK_CTL2;
- czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
+ czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
if (!czcount_30ns) {
WARN(!czcount_30ns, "bogus CZ count value");
@@ -116,8 +116,6 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
- if (IS_VALLEYVIEW(dminor->dev))
- rc6p_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
}
@@ -126,8 +124,6 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
- if (IS_VALLEYVIEW(dminor->dev))
- rc6pp_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
@@ -285,7 +281,7 @@ static struct bin_attribute dpf_attrs_1 = {
.private = (void *)1
};
-static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
@@ -301,9 +297,14 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
+ ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
- ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
+ u32 rpstat = I915_READ(GEN6_RPSTAT1);
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+ ret = intel_gpu_freq(dev_priv, ret);
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -312,6 +313,27 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ intel_runtime_pm_get(dev_priv);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_runtime_pm_put(dev_priv);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
@@ -319,8 +341,9 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+ return snprintf(buf, PAGE_SIZE,
+ "%d\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -333,10 +356,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
- else
- ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -360,10 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev_priv->dev))
- val = vlv_freq_opcode(dev_priv, val);
- else
- val /= GT_FREQUENCY_MULTIPLIER;
+ val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq ||
@@ -374,21 +391,21 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (val > dev_priv->rps.rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
- val * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, val));
dev_priv->rps.max_freq_softlimit = val;
- if (dev_priv->rps.cur_freq > val) {
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
- } else if (!IS_VALLEYVIEW(dev)) {
- /* We still need gen6_set_rps to process the new max_delay and
- * update the interrupt limits even though frequency request is
- * unchanged. */
- gen6_set_rps(dev, dev_priv->rps.cur_freq);
- }
+ val = clamp_t(int, dev_priv->rps.cur_freq,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit);
+
+ /* We still need *_set_rps to process the new max_delay and
+ * update the interrupt limits and PMINTRMSK even though
+ * frequency request may be unchanged. */
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -405,10 +422,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
- else
- ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -432,10 +446,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev))
- val = vlv_freq_opcode(dev_priv, val);
- else
- val /= GT_FREQUENCY_MULTIPLIER;
+ val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq ||
@@ -446,17 +457,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_freq_softlimit = val;
- if (dev_priv->rps.cur_freq < val) {
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
- } else if (!IS_VALLEYVIEW(dev)) {
- /* We still need gen6_set_rps to process the new min_delay and
- * update the interrupt limits even though frequency request is
- * unchanged. */
- gen6_set_rps(dev, dev_priv->rps.cur_freq);
- }
+ val = clamp_t(int, dev_priv->rps.cur_freq,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit);
+
+ /* We still need *_set_rps to process the new min_delay and
+ * update the interrupt limits and PMINTRMSK even though
+ * frequency request may be unchanged. */
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -464,6 +475,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
}
+static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
@@ -494,19 +506,22 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
if (attr == &dev_attr_gt_RP0_freq_mhz) {
if (IS_VALLEYVIEW(dev))
- val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
else
- val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ val = intel_gpu_freq(dev_priv,
+ ((rp_state_cap & 0x0000ff) >> 0));
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
if (IS_VALLEYVIEW(dev))
- val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
else
- val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ val = intel_gpu_freq(dev_priv,
+ ((rp_state_cap & 0x00ff00) >> 8));
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
if (IS_VALLEYVIEW(dev))
- val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
else
- val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ val = intel_gpu_freq(dev_priv,
+ ((rp_state_cap & 0xff0000) >> 16));
} else {
BUG();
}
@@ -514,6 +529,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
}
static const struct attribute *gen6_attrs[] = {
+ &dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
@@ -524,6 +540,7 @@ static const struct attribute *gen6_attrs[] = {
};
static const struct attribute *vlv_attrs[] = {
+ &dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 751d4ad14d62..6058a01b4443 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -328,8 +328,8 @@ TRACE_EVENT(i915_gem_evict_vm,
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct intel_engine_cs *from,
struct intel_engine_cs *to,
- u32 seqno),
- TP_ARGS(from, to, seqno),
+ struct drm_i915_gem_request *req),
+ TP_ARGS(from, to, req),
TP_STRUCT__entry(
__field(u32, dev)
@@ -342,7 +342,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to->id;
- __entry->seqno = seqno;
+ __entry->seqno = i915_gem_request_get_seqno(req);
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -352,8 +352,8 @@ TRACE_EVENT(i915_gem_ring_sync_to,
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
- TP_ARGS(ring, seqno, flags),
+ TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
+ TP_ARGS(req, flags),
TP_STRUCT__entry(
__field(u32, dev)
@@ -363,11 +363,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
),
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
+ __entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags;
- i915_trace_irq_get(ring, seqno);
+ i915_trace_irq_get(ring, req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -398,31 +400,36 @@ TRACE_EVENT(i915_gem_ring_flush,
);
DECLARE_EVENT_CLASS(i915_gem_request,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
+ __field(u32, uniq)
__field(u32, seqno)
),
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
+ __entry->uniq = req ? req->uniq : 0;
+ __entry->seqno = i915_gem_request_get_seqno(req);
),
- TP_printk("dev=%u, ring=%u, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->uniq,
+ __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
-TRACE_EVENT(i915_gem_request_complete,
+TRACE_EVENT(i915_gem_request_notify,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
@@ -443,17 +450,23 @@ TRACE_EVENT(i915_gem_request_complete,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
+ __field(u32, uniq)
__field(u32, seqno)
__field(bool, blocking)
),
@@ -465,20 +478,24 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
- __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
+ __entry->uniq = req ? req->uniq : 0;
+ __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->blocking =
+ mutex_is_locked(&ring->dev->struct_mutex);
),
- TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
- __entry->dev, __entry->ring, __entry->seqno,
- __entry->blocking ? "yes (NB)" : "no")
+ TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s",
+ __entry->dev, __entry->ring, __entry->uniq,
+ __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
DECLARE_EVENT_CLASS(i915_ring,
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
new file mode 100644
index 000000000000..19a9dd5408f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic modeset support
+ *
+ * The functions here implement the state management and hardware programming
+ * dispatch required by the atomic modeset infrastructure.
+ * See intel_atomic_plane.c for the plane-specific atomic functionality.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "intel_drv.h"
+
+
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @state: state to validate
+ */
+int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ int nconnectors = dev->mode_config.num_connector;
+ enum pipe nuclear_pipe = INVALID_PIPE;
+ int ret;
+ int i;
+ bool not_nuclear = false;
+
+ /*
+ * FIXME: At the moment, we only support "nuclear pageflip" on a
+ * single CRTC. Cross-crtc updates will be added later.
+ */
+ for (i = 0; i < nplanes; i++) {
+ struct intel_plane *plane = to_intel_plane(state->planes[i]);
+ if (!plane)
+ continue;
+
+ if (nuclear_pipe == INVALID_PIPE) {
+ nuclear_pipe = plane->pipe;
+ } else if (nuclear_pipe != plane->pipe) {
+ DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * FIXME: We only handle planes for now; make sure there are no CRTC's
+ * or connectors involved.
+ */
+ state->allow_modeset = false;
+ for (i = 0; i < ncrtcs; i++) {
+ struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
+ if (crtc && crtc->pipe != nuclear_pipe)
+ not_nuclear = true;
+ }
+ for (i = 0; i < nconnectors; i++)
+ if (state->connectors[i] != NULL)
+ not_nuclear = true;
+
+ if (not_nuclear) {
+ DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
+ * we can only handle plane-related operations and do not yet support
+ * asynchronous commit.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ int ret;
+ int i;
+
+ if (async) {
+ DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /* Point of no return */
+
+ /*
+ * FIXME: The proper sequence here will eventually be:
+ *
+ * drm_atomic_helper_swap_state(dev, state)
+ * drm_atomic_helper_commit_pre_planes(dev, state);
+ * drm_atomic_helper_commit_planes(dev, state);
+ * drm_atomic_helper_commit_post_planes(dev, state);
+ * drm_atomic_helper_wait_for_vblanks(dev, state);
+ * drm_atomic_helper_cleanup_planes(dev, state);
+ * drm_atomic_state_free(state);
+ *
+ * once we have full atomic modeset. For now, just manually update
+ * plane states to avoid clobbering good states with dummy states
+ * while nuclear pageflipping.
+ */
+ for (i = 0; i < dev->mode_config.num_total_plane; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane)
+ continue;
+
+ plane->state->state = state;
+ swap(state->plane_states[i], plane->state);
+ plane->state->state = NULL;
+ }
+ drm_atomic_helper_commit_planes(dev, state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_state_free(state);
+
+ return 0;
+}
+
+/**
+ * intel_connector_atomic_get_property - fetch connector property value
+ * @connector: connector to fetch property for
+ * @state: state containing the property value
+ * @property: property to look up
+ * @val: pointer to write property value into
+ *
+ * The DRM core does not store shadow copies of properties for
+ * atomic-capable drivers. This entrypoint is used to fetch
+ * the current value of a driver-specific connector property.
+ */
+int
+intel_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ int i;
+
+ /*
+ * TODO: We only have atomic modeset for planes at the moment, so the
+ * crtc/connector code isn't quite ready yet. Until it's ready,
+ * continue to look up all property values in the DRM's shadow copy
+ * in obj->properties->values[].
+ *
+ * When the crtc/connector state work matures, this function should
+ * be updated to read the values out of the state structure instead.
+ */
+ for (i = 0; i < connector->base.properties->count; i++) {
+ if (connector->base.properties->properties[i] == property) {
+ *val = connector->base.properties->values[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * intel_crtc_duplicate_state - duplicate crtc state
+ * @crtc: drm crtc
+ *
+ * Allocates and returns a copy of the crtc state (both common and
+ * Intel-specific) for the specified crtc.
+ *
+ * Returns: The newly allocated crtc state, or NULL on failure.
+ */
+struct drm_crtc_state *
+intel_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (WARN_ON(!intel_crtc->config))
+ return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL);
+
+ return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config),
+ GFP_KERNEL);
+}
+
+/**
+ * intel_crtc_destroy_state - destroy crtc state
+ * @crtc: drm crtc
+ *
+ * Destroys the crtc state (both common and Intel-specific) for the
+ * specified crtc.
+ */
+void
+intel_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ drm_atomic_helper_crtc_destroy_state(crtc, state);
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
new file mode 100644
index 000000000000..9e6f727dfd19
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic plane helpers
+ *
+ * The functions here are used by the atomic plane helper functions to
+ * implement legacy plane updates (i.e., drm_plane->update_plane() and
+ * drm_plane->disable_plane()). This allows plane updates to use the
+ * atomic state infrastructure and perform plane updates as separate
+ * prepare/check/commit/cleanup steps.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "intel_drv.h"
+
+/**
+ * intel_create_plane_state - create plane state object
+ * @plane: drm plane
+ *
+ * Allocates a fresh plane state for the given plane and sets some of
+ * the state values to sensible initial values.
+ *
+ * Returns: A newly allocated plane state, or NULL on failure
+ */
+struct intel_plane_state *
+intel_create_plane_state(struct drm_plane *plane)
+{
+ struct intel_plane_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ state->base.plane = plane;
+ state->base.rotation = BIT(DRM_ROTATE_0);
+
+ return state;
+}
+
+/**
+ * intel_plane_duplicate_state - duplicate plane state
+ * @plane: drm plane
+ *
+ * Allocates and returns a copy of the plane state (both common and
+ * Intel-specific) for the specified plane.
+ *
+ * Returns: The newly allocated plane state, or NULL on failure.
+ */
+struct drm_plane_state *
+intel_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_plane_state *state;
+ struct intel_plane_state *intel_state;
+
+ if (WARN_ON(!plane->state))
+ intel_state = intel_create_plane_state(plane);
+ else
+ intel_state = kmemdup(plane->state, sizeof(*intel_state),
+ GFP_KERNEL);
+
+ if (!intel_state)
+ return NULL;
+
+ state = &intel_state->base;
+ if (state->fb)
+ drm_framebuffer_reference(state->fb);
+
+ return state;
+}
+
+/**
+ * intel_plane_destroy_state - destroy plane state
+ * @plane: drm plane
+ * @state: state object to destroy
+ *
+ * Destroys the plane state (both common and Intel-specific) for the
+ * specified plane.
+ */
+void
+intel_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ drm_atomic_helper_plane_destroy_state(plane, state);
+}
+
+static int intel_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct intel_crtc *intel_crtc;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_plane_state *intel_state = to_intel_plane_state(state);
+
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
+
+ /*
+ * Both crtc and plane->crtc could be NULL if we're updating a
+ * property while the plane is disabled. We don't actually have
+ * anything driver-specific we need to test in that case, so
+ * just return success.
+ */
+ if (!crtc)
+ return 0;
+
+ /*
+ * The original src/dest coordinates are stored in state->base, but
+ * we want to keep another copy internal to our driver that we can
+ * clip/modify ourselves.
+ */
+ intel_state->src.x1 = state->src_x;
+ intel_state->src.y1 = state->src_y;
+ intel_state->src.x2 = state->src_x + state->src_w;
+ intel_state->src.y2 = state->src_y + state->src_h;
+ intel_state->dst.x1 = state->crtc_x;
+ intel_state->dst.y1 = state->crtc_y;
+ intel_state->dst.x2 = state->crtc_x + state->crtc_w;
+ intel_state->dst.y2 = state->crtc_y + state->crtc_h;
+
+ /* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
+ intel_state->clip.x1 = 0;
+ intel_state->clip.y1 = 0;
+ intel_state->clip.x2 =
+ intel_crtc->active ? intel_crtc->config->pipe_src_w : 0;
+ intel_state->clip.y2 =
+ intel_crtc->active ? intel_crtc->config->pipe_src_h : 0;
+
+ /*
+ * Disabling a plane is always okay; we just need to update
+ * fb tracking in a special way since cleanup_fb() won't
+ * get called by the plane helpers.
+ */
+ if (state->fb == NULL && plane->state->fb != NULL) {
+ /*
+ * 'prepare' is never called when plane is being disabled, so
+ * we need to handle frontbuffer tracking as a special case
+ */
+ intel_crtc->atomic.disabled_planes |=
+ (1 << drm_plane_index(plane));
+ }
+
+ return intel_plane->check_plane(plane, intel_state);
+}
+
+static void intel_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_plane_state *intel_state =
+ to_intel_plane_state(plane->state);
+
+ /* Don't disable an already disabled plane */
+ if (!plane->state->fb && !old_state->fb)
+ return;
+
+ intel_plane->commit_plane(plane, intel_state);
+}
+
+const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
+ .prepare_fb = intel_prepare_plane_fb,
+ .cleanup_fb = intel_cleanup_plane_fb,
+ .atomic_check = intel_plane_atomic_check,
+ .atomic_update = intel_plane_atomic_update,
+};
+
+/**
+ * intel_plane_atomic_get_property - fetch plane property value
+ * @plane: plane to fetch property for
+ * @state: state containing the property value
+ * @property: property to look up
+ * @val: pointer to write property value into
+ *
+ * The DRM core does not store shadow copies of properties for
+ * atomic-capable drivers. This entrypoint is used to fetch
+ * the current value of a driver-specific plane property.
+ */
+int
+intel_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_mode_config *config = &plane->dev->mode_config;
+
+ if (property == config->rotation_property) {
+ *val = state->rotation;
+ } else {
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_plane_atomic_set_property - set plane property value
+ * @plane: plane to set property for
+ * @state: state to update property value in
+ * @property: property to set
+ * @val: value to set property to
+ *
+ * Writes the specified property value for a plane into the provided atomic
+ * state object.
+ *
+ * Returns 0 on success, -EINVAL on unrecognized properties
+ */
+int
+intel_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_mode_config *config = &plane->dev->mode_config;
+
+ if (property == config->rotation_property) {
+ state->rotation = val;
+ } else {
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 2c7ed5cb29c0..2396cc702d18 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -22,6 +22,9 @@
*/
#include <linux/kernel.h>
+#include <linux/component.h>
+#include <drm/i915_component.h>
+#include "intel_drv.h"
#include <drm/drmP.h>
#include <drm/drm_edid.h>
@@ -397,7 +400,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
- struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -461,3 +464,110 @@ void intel_init_audio(struct drm_device *dev)
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
}
}
+
+static void i915_audio_component_get_power(struct device *dev)
+{
+ intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+}
+
+static void i915_audio_component_put_power(struct device *dev)
+{
+ intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+}
+
+/* Get CDCLK in kHz */
+static int i915_audio_component_get_cdclk_freq(struct device *dev)
+{
+ struct drm_i915_private *dev_priv = dev_to_i915(dev);
+ int ret;
+
+ if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
+ return -ENODEV;
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ ret = intel_ddi_get_cdclk_freq(dev_priv);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+
+ return ret;
+}
+
+static const struct i915_audio_component_ops i915_audio_component_ops = {
+ .owner = THIS_MODULE,
+ .get_power = i915_audio_component_get_power,
+ .put_power = i915_audio_component_put_power,
+ .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
+};
+
+static int i915_audio_component_bind(struct device *i915_dev,
+ struct device *hda_dev, void *data)
+{
+ struct i915_audio_component *acomp = data;
+
+ if (WARN_ON(acomp->ops || acomp->dev))
+ return -EEXIST;
+
+ acomp->ops = &i915_audio_component_ops;
+ acomp->dev = i915_dev;
+
+ return 0;
+}
+
+static void i915_audio_component_unbind(struct device *i915_dev,
+ struct device *hda_dev, void *data)
+{
+ struct i915_audio_component *acomp = data;
+
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+}
+
+static const struct component_ops i915_audio_component_bind_ops = {
+ .bind = i915_audio_component_bind,
+ .unbind = i915_audio_component_unbind,
+};
+
+/**
+ * i915_audio_component_init - initialize and register the audio component
+ * @dev_priv: i915 device instance
+ *
+ * This will register with the component framework a child component which
+ * will bind dynamically to the snd_hda_intel driver's corresponding master
+ * component when the latter is registered. During binding the child
+ * initializes an instance of struct i915_audio_component which it receives
+ * from the master. The master can then start to use the interface defined by
+ * this struct. Each side can break the binding at any point by deregistering
+ * its own component after which each side's component unbind callback is
+ * called.
+ *
+ * We ignore any error during registration and continue with reduced
+ * functionality (i.e. without HDMI audio).
+ */
+void i915_audio_component_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+ if (ret < 0) {
+ DRM_ERROR("failed to add audio component (%d)\n", ret);
+ /* continue with reduced functionality */
+ return;
+ }
+
+ dev_priv->audio_component_registered = true;
+}
+
+/**
+ * i915_audio_component_cleanup - deregister the audio component
+ * @dev_priv: i915 device instance
+ *
+ * Deregisters the audio component, breaking any existing binding to the
+ * corresponding snd_hda_intel driver's master component.
+ */
+void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->audio_component_registered)
+ return;
+
+ component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+ dev_priv->audio_component_registered = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index a4bd90f36a03..3f178258d9f9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -664,6 +664,50 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
}
}
+static void
+parse_psr(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_psr *psr;
+ struct psr_table *psr_table;
+
+ psr = find_section(bdb, BDB_PSR);
+ if (!psr) {
+ DRM_DEBUG_KMS("No PSR BDB found.\n");
+ return;
+ }
+
+ psr_table = &psr->psr_table[panel_type];
+
+ dev_priv->vbt.psr.full_link = psr_table->full_link;
+ dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+
+ /* Allowed VBT values goes from 0 to 15 */
+ dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+ psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+
+ switch (psr_table->lines_to_wait) {
+ case 0:
+ dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
+ break;
+ case 1:
+ dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
+ break;
+ case 2:
+ dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
+ break;
+ case 3:
+ dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
+ psr_table->lines_to_wait);
+ break;
+ }
+
+ dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
+}
+
static u8 *goto_next_sequence(u8 *data, int *size)
{
u16 len;
@@ -1241,6 +1285,7 @@ intel_parse_bios(struct drm_device *dev)
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
+ parse_psr(dev_priv, bdb);
parse_mipi(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 7603765c91fc..a6a8710f665f 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -80,7 +80,7 @@ struct vbios_data {
#define BDB_EXT_MMIO_REGS 6
#define BDB_SWF_IO 7
#define BDB_SWF_MMIO 8
-#define BDB_DOT_CLOCK_TABLE 9
+#define BDB_PSR 9
#define BDB_MODE_REMOVAL_TABLE 10
#define BDB_CHILD_DEVICE_TABLE 11
#define BDB_DRIVER_FEATURES 12
@@ -556,6 +556,26 @@ struct bdb_edp {
u16 edp_t3_optimization;
} __packed;
+struct psr_table {
+ /* Feature bits */
+ u8 full_link:1;
+ u8 require_aux_to_wakeup:1;
+ u8 feature_bits_rsvd:6;
+
+ /* Wait times */
+ u8 idle_frames:4;
+ u8 lines_to_wait:3;
+ u8 wait_times_rsvd:1;
+
+ /* TP wake up time in multiple of 100 */
+ u16 tp1_wakeup_time;
+ u16 tp2_tp3_wakeup_time;
+} __packed;
+
+struct bdb_psr {
+ struct psr_table psr_table[16];
+} __packed;
+
void intel_setup_bios(struct drm_device *dev);
int intel_parse_bios(struct drm_device *dev);
@@ -798,7 +818,8 @@ struct mipi_config {
#define DUAL_LINK_PIXEL_ALT 2
u16 dual_link:2;
u16 lane_cnt:2;
- u16 rsvd3:12;
+ u16 pixel_overlap:3;
+ u16 rsvd3:9;
u16 rsvd4;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index a9af9a4866db..e66e17af0a56 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@@ -110,31 +111,31 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
}
static void intel_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
int dotclock;
- pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
intel_ddi_get_config(encoder, pipe_config);
- pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
- pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
static void hsw_crt_pre_enable(struct intel_encoder *encoder)
@@ -157,7 +158,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 adpa;
if (INTEL_INFO(dev)->gen >= 5)
@@ -303,7 +304,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
}
static bool intel_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
@@ -792,6 +793,8 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_get_property = intel_connector_atomic_get_property,
};
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e6b45cd150d3..f14e8a2a022d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -128,15 +128,15 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
};
static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
- { 0x00000018, 0x000000a0 },
- { 0x00004014, 0x00000098 },
+ { 0x00000018, 0x000000a2 },
+ { 0x00004014, 0x0000009B },
{ 0x00006012, 0x00000088 },
- { 0x00008010, 0x00000080 },
- { 0x00000018, 0x00000098 },
+ { 0x00008010, 0x00000087 },
+ { 0x00000018, 0x0000009B },
{ 0x00004014, 0x00000088 },
- { 0x00006012, 0x00000080 },
+ { 0x00006012, 0x00000087 },
{ 0x00000018, 0x00000088 },
- { 0x00004014, 0x00000080 },
+ { 0x00004014, 0x00000087 },
};
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
@@ -328,7 +328,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
- FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
@@ -338,8 +338,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
/* Configure Port Clock Select */
- I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
- WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
+ WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
@@ -357,7 +357,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
* port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
- ((intel_crtc->config.fdi_lanes - 1) << 1) |
+ ((intel_crtc->config->fdi_lanes - 1) << 1) |
DDI_BUF_TRANS_SELECT(i / 2));
POSTING_READ(DDI_BUF_CTL(PORT_E));
@@ -732,7 +732,7 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
@@ -768,15 +768,15 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->port_clock = link_clock;
if (pipe_config->has_dp_encoder)
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
@@ -820,21 +820,26 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->port_clock = link_clock * 2;
if (pipe_config->has_pch_encoder)
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->fdi_m_n);
else if (pipe_config->has_dp_encoder)
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- hsw_ddi_clock_get(encoder, pipe_config);
+ struct drm_device *dev = encoder->base.dev;
+
+ if (INTEL_INFO(dev)->gen <= 8)
+ hsw_ddi_clock_get(encoder, pipe_config);
+ else
+ skl_ddi_clock_get(encoder, pipe_config);
}
static void
@@ -904,6 +909,7 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
static bool
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder,
int clock)
{
@@ -918,16 +924,16 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- intel_crtc->new_config->dpll_hw_state.wrpll = val;
+ crtc_state->dpll_hw_state.wrpll = val;
- pll = intel_get_shared_dpll(intel_crtc);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
return false;
}
- intel_crtc->new_config->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
+ crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
return true;
@@ -1090,6 +1096,7 @@ found:
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder,
int clock)
{
@@ -1139,11 +1146,11 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
} else /* eDP */
return true;
- intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
- intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
- intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
- pll = intel_get_shared_dpll(intel_crtc);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
@@ -1151,7 +1158,7 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
}
/* shared DPLL id 0 is DPLL 1 */
- intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
+ crtc_state->ddi_pll_sel = pll->id + 1;
return true;
}
@@ -1163,17 +1170,20 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
* For private DPLLs, compute_config() should do the selection for us. This
* function should be folded into compute_config() eventually.
*/
-bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
+bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(intel_crtc);
- int clock = intel_crtc->new_config->port_clock;
+ int clock = crtc_state->port_clock;
if (IS_SKYLAKE(dev))
- return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ return skl_ddi_pll_select(intel_crtc, crtc_state,
+ intel_encoder, clock);
else
- return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ return hsw_ddi_pll_select(intel_crtc, crtc_state,
+ intel_encoder, clock);
}
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -1181,13 +1191,13 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
int type = intel_encoder->type;
uint32_t temp;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
temp = TRANS_MSA_SYNC_CLK;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
@@ -1212,7 +1222,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
@@ -1230,7 +1240,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t temp;
@@ -1239,7 +1249,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
temp = TRANS_DDI_FUNC_ENABLE;
temp |= TRANS_DDI_SELECT_PORT(port);
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
temp |= TRANS_DDI_BPC_6;
break;
@@ -1256,9 +1266,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1269,8 +1279,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* using motion blur mitigation (which we don't
* support). */
if (IS_HASWELL(dev) &&
- (intel_crtc->config.pch_pfit.enabled ||
- intel_crtc->config.pch_pfit.force_thru))
+ (intel_crtc->config->pch_pfit.enabled ||
+ intel_crtc->config->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1288,14 +1298,14 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
}
if (type == INTEL_OUTPUT_HDMI) {
- if (intel_crtc->config.has_hdmi_sink)
+ if (intel_crtc->config->has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
- temp |= (intel_crtc->config.fdi_lanes - 1) << 1;
+ temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
type == INTEL_OUTPUT_EDP) {
@@ -1445,7 +1455,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1455,7 +1465,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1477,7 +1487,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
}
if (IS_SKYLAKE(dev)) {
- uint32_t dpll = crtc->config.ddi_pll_sel;
+ uint32_t dpll = crtc->config->ddi_pll_sel;
uint32_t val;
/*
@@ -1492,7 +1502,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll));
- val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
+ val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
@@ -1509,8 +1519,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
I915_WRITE(DPLL_CTRL2, val);
} else {
- WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+ WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel);
}
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
@@ -1527,8 +1537,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
intel_hdmi->set_infoframes(encoder,
- crtc->config.has_hdmi_sink,
- &crtc->config.adjusted_mode);
+ crtc->config->has_hdmi_sink,
+ &crtc->config->base.adjusted_mode);
}
}
@@ -1600,9 +1610,10 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
intel_edp_backlight_on(intel_dp);
intel_psr_enable(intel_dp);
+ intel_edp_drrs_enable(intel_dp);
}
- if (intel_crtc->config.has_audio) {
+ if (intel_crtc->config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
intel_audio_codec_enable(intel_encoder);
}
@@ -1617,7 +1628,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (intel_crtc->config.has_audio) {
+ if (intel_crtc->config->has_audio) {
intel_audio_codec_disable(intel_encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
@@ -1625,6 +1636,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_edp_drrs_disable(intel_dp);
intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
}
@@ -2022,14 +2034,13 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
}
void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
- struct drm_device *dev = dev_priv->dev;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
@@ -2041,7 +2052,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
@@ -2106,10 +2117,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
- if (INTEL_INFO(dev)->gen <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
- else
- skl_ddi_clock_get(encoder, pipe_config);
+ intel_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -2119,7 +2127,7 @@ static void intel_ddi_destroy(struct drm_encoder *encoder)
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
int type = encoder->type;
int port = intel_ddi_get_encoder_port(encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e2af1383b179..3d220a67f865 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,6 +37,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
@@ -76,9 +77,9 @@ static const uint32_t intel_cursor_formats[] = {
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
@@ -95,9 +96,11 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config);
+ const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config);
+ const struct intel_crtc_state *pipe_config);
+static void intel_begin_crtc_commit(struct drm_crtc *crtc);
+static void intel_finish_crtc_commit(struct drm_crtc *crtc);
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
@@ -895,7 +898,7 @@ bool intel_crtc_active(struct drm_crtc *crtc)
* properly reconstruct framebuffers.
*/
return intel_crtc->active && crtc->primary->fb &&
- intel_crtc->config.adjusted_mode.crtc_clock;
+ intel_crtc->config->base.adjusted_mode.crtc_clock;
}
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -904,7 +907,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- return intel_crtc->config.cpu_transcoder;
+ return intel_crtc->config->cpu_transcoder;
}
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
@@ -946,7 +949,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1024,7 +1027,7 @@ void assert_pll(struct drm_i915_private *dev_priv,
reg = DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1040,7 +1043,7 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
mutex_unlock(&dev_priv->dpio_lock);
cur_state = val & DSI_PLL_VCO_EN;
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1052,10 +1055,10 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- if (crtc->config.shared_dpll < 0)
+ if (crtc->config->shared_dpll < 0)
return NULL;
- return &dev_priv->shared_dplls[crtc->config.shared_dpll];
+ return &dev_priv->shared_dplls[crtc->config->shared_dpll];
}
/* For ILK+ */
@@ -1071,7 +1074,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
return;
cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
pll->name, state_string(state), state_string(cur_state));
}
@@ -1095,7 +1098,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
val = I915_READ(reg);
cur_state = !!(val & FDI_TX_ENABLE);
}
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1112,7 +1115,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1135,7 +1138,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
- WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+ I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
@@ -1148,7 +1151,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_PLL_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1190,7 +1193,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
- WARN(panel_pipe == pipe && locked,
+ I915_STATE_WARN(panel_pipe == pipe && locked,
"panel assertion failure, pipe %c regs locked\n",
pipe_name(pipe));
}
@@ -1206,7 +1209,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
@@ -1236,7 +1239,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
cur_state = !!(val & PIPECONF_ENABLE);
}
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
@@ -1251,7 +1254,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
reg = DSPCNTR(plane);
val = I915_READ(reg);
cur_state = !!(val & DISPLAY_PLANE_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"plane %c assertion failure (expected %s, current %s)\n",
plane_name(plane), state_string(state), state_string(cur_state));
}
@@ -1271,7 +1274,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
if (INTEL_INFO(dev)->gen >= 4) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
- WARN(val & DISPLAY_PLANE_ENABLE,
+ I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
plane_name(pipe));
return;
@@ -1283,7 +1286,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
DISPPLANE_SEL_PIPE_SHIFT;
- WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+ I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
"plane %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(i), pipe_name(pipe));
}
@@ -1299,7 +1302,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
if (INTEL_INFO(dev)->gen >= 9) {
for_each_sprite(pipe, sprite) {
val = I915_READ(PLANE_CTL(pipe, sprite));
- WARN(val & PLANE_CTL_ENABLE,
+ I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
@@ -1307,20 +1310,20 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
for_each_sprite(pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
- WARN(val & SP_ENABLE,
+ I915_STATE_WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
}
} else if (INTEL_INFO(dev)->gen >= 7) {
reg = SPRCTL(pipe);
val = I915_READ(reg);
- WARN(val & SPRITE_ENABLE,
+ I915_STATE_WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
} else if (INTEL_INFO(dev)->gen >= 5) {
reg = DVSCNTR(pipe);
val = I915_READ(reg);
- WARN(val & DVS_ENABLE,
+ I915_STATE_WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
}
@@ -1328,7 +1331,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
static void assert_vblank_disabled(struct drm_crtc *crtc)
{
- if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+ if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
drm_crtc_vblank_put(crtc);
}
@@ -1337,12 +1340,12 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
u32 val;
bool enabled;
- WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
+ I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
- WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+ I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
}
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
@@ -1355,7 +1358,7 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
reg = PCH_TRANSCONF(pipe);
val = I915_READ(reg);
enabled = !!(val & TRANS_ENABLE);
- WARN(enabled,
+ I915_STATE_WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
pipe_name(pipe));
}
@@ -1435,11 +1438,11 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
- WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
+ I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
&& (val & DP_PIPEB_SELECT),
"IBX PCH dp port still using transcoder B\n");
}
@@ -1448,11 +1451,11 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
+ I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
&& (val & SDVO_PIPE_B_SELECT),
"IBX PCH hdmi port still using transcoder B\n");
}
@@ -1469,13 +1472,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
reg = PCH_ADPA;
val = I915_READ(reg);
- WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+ I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
reg = PCH_LVDS;
val = I915_READ(reg);
- WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+ I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
@@ -1505,7 +1508,7 @@ static void intel_init_dpio(struct drm_device *dev)
}
static void vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1544,7 +1547,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
}
static void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1599,7 +1602,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
- u32 dpll = crtc->config.dpll_hw_state.dpll;
+ u32 dpll = crtc->config->dpll_hw_state.dpll;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1629,7 +1632,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
- crtc->config.dpll_hw_state.dpll_md);
+ crtc->config->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
@@ -2034,7 +2037,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
else
assert_pll_enabled(dev_priv, pipe);
else {
- if (crtc->config.has_pch_encoder) {
+ if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
assert_fdi_tx_pll_enabled(dev_priv,
@@ -2068,7 +2071,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
static void intel_disable_pipe(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
int reg;
u32 val;
@@ -2090,7 +2093,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
* Double wide has implications for planes
* so best keep it disabled when not needed.
*/
- if (crtc->config.double_wide)
+ if (crtc->config->double_wide)
val &= ~PIPECONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
@@ -2165,7 +2168,8 @@ static void intel_disable_primary_hw_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- assert_pipe_enabled(dev_priv, intel_crtc->pipe);
+ if (WARN_ON(!intel_crtc->active))
+ return;
if (!intel_crtc->primary_enabled)
return;
@@ -2185,11 +2189,12 @@ static bool need_vtd_wa(struct drm_device *dev)
return false;
}
-static int intel_align_height(struct drm_device *dev, int height, bool tiled)
+int
+intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
{
int tile_height;
- tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
+ tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
return ALIGN(height, tile_height);
}
@@ -2312,7 +2317,7 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
}
}
-int intel_format_to_fourcc(int format)
+static int i9xx_format_to_fourcc(int format)
{
switch (format) {
case DISPPLANE_8BPP:
@@ -2333,8 +2338,35 @@ int intel_format_to_fourcc(int format)
}
}
-static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
- struct intel_plane_config *plane_config)
+static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
+{
+ switch (format) {
+ case PLANE_CTL_FORMAT_RGB_565:
+ return DRM_FORMAT_RGB565;
+ default:
+ case PLANE_CTL_FORMAT_XRGB_8888:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR8888;
+ else
+ return DRM_FORMAT_XBGR8888;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB8888;
+ else
+ return DRM_FORMAT_XRGB8888;
+ }
+ case PLANE_CTL_FORMAT_XRGB_2101010:
+ if (rgb_order)
+ return DRM_FORMAT_XBGR2101010;
+ else
+ return DRM_FORMAT_XRGB2101010;
+ }
+}
+
+static bool
+intel_alloc_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_gem_object *obj = NULL;
@@ -2349,10 +2381,9 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
if (!obj)
return false;
- if (plane_config->tiled) {
- obj->tiling_mode = I915_TILING_X;
+ obj->tiling_mode = plane_config->tiling;
+ if (obj->tiling_mode == I915_TILING_X)
obj->stride = crtc->base.primary->fb->pitches[0];
- }
mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
mode_cmd.width = crtc->base.primary->fb->width;
@@ -2379,8 +2410,9 @@ out_unref_obj:
return false;
}
-static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
- struct intel_plane_config *plane_config)
+static void
+intel_find_plane_obj(struct intel_crtc *intel_crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2468,13 +2500,13 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
+ ((intel_crtc->config->pipe_src_h - 1) << 16) |
+ (intel_crtc->config->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
+ ((intel_crtc->config->pipe_src_h - 1) << 16) |
+ (intel_crtc->config->pipe_src_w - 1));
I915_WRITE(PRIMPOS(plane), 0);
I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
@@ -2529,17 +2561,17 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
intel_crtc->dspaddr_offset = linear_offset;
}
- if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
+ if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
- x += (intel_crtc->config.pipe_src_w - 1);
- y += (intel_crtc->config.pipe_src_h - 1);
+ x += (intel_crtc->config->pipe_src_w - 1);
+ y += (intel_crtc->config->pipe_src_h - 1);
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
- (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
- (intel_crtc->config.pipe_src_w - 1) * pixel_size;
+ (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
+ (intel_crtc->config->pipe_src_w - 1) * pixel_size;
}
I915_WRITE(reg, dspcntr);
@@ -2631,18 +2663,18 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
- if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
+ if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += (intel_crtc->config.pipe_src_w - 1);
- y += (intel_crtc->config.pipe_src_h - 1);
+ x += (intel_crtc->config->pipe_src_w - 1);
+ y += (intel_crtc->config->pipe_src_h - 1);
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
- (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
- (intel_crtc->config.pipe_src_w - 1) * pixel_size;
+ (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
+ (intel_crtc->config->pipe_src_w - 1) * pixel_size;
}
}
@@ -2728,7 +2760,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
}
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
- if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
+ if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
@@ -2741,8 +2773,8 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
I915_WRITE(PLANE_SIZE(pipe, 0),
- (intel_crtc->config.pipe_src_h - 1) << 16 |
- (intel_crtc->config.pipe_src_w - 1));
+ (intel_crtc->config->pipe_src_h - 1) << 16 |
+ (intel_crtc->config->pipe_src_w - 1));
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
@@ -2938,85 +2970,20 @@ static void intel_update_pipe_size(struct intel_crtc *crtc)
* then update the pipesrc and pfit state, even on the flip path.
*/
- adjusted_mode = &crtc->config.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
I915_WRITE(PIPESRC(crtc->pipe),
((adjusted_mode->crtc_hdisplay - 1) << 16) |
(adjusted_mode->crtc_vdisplay - 1));
- if (!crtc->config.pch_pfit.enabled &&
+ if (!crtc->config->pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
I915_WRITE(PF_CTL(crtc->pipe), 0);
I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
}
- crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
- crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
-}
-
-static int
-intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
- int ret;
-
- if (intel_crtc_has_pending_flip(crtc)) {
- DRM_ERROR("pipe is still busy with an old pageflip\n");
- return -EBUSY;
- }
-
- /* no fb bound */
- if (!fb) {
- DRM_ERROR("No FB bound\n");
- return 0;
- }
-
- if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
- DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
- plane_name(intel_crtc->plane),
- INTEL_INFO(dev)->num_pipes);
- return -EINVAL;
- }
-
- mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
- if (ret == 0)
- i915_gem_track_fb(old_obj, intel_fb_obj(fb),
- INTEL_FRONTBUFFER_PRIMARY(pipe));
- mutex_unlock(&dev->struct_mutex);
- if (ret != 0) {
- DRM_ERROR("pin & fence failed\n");
- return ret;
- }
-
- dev_priv->display.update_primary_plane(crtc, fb, x, y);
-
- if (intel_crtc->active)
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
-
- crtc->primary->fb = fb;
- crtc->x = x;
- crtc->y = y;
-
- if (old_fb) {
- if (intel_crtc->active && old_fb != fb)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- mutex_unlock(&dev->struct_mutex);
- }
-
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
+ crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
+ crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
}
static void intel_fdi_normal_train(struct drm_crtc *crtc)
@@ -3063,7 +3030,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
{
return crtc->base.enabled && crtc->active &&
- crtc->config.has_pch_encoder;
+ crtc->config->has_pch_encoder;
}
static void ivb_modeset_global_resources(struct drm_device *dev)
@@ -3118,7 +3085,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
@@ -3216,7 +3183,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -3367,7 +3334,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[j/2];
@@ -3455,7 +3422,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
@@ -3639,7 +3606,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
+ int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -3728,7 +3695,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
I915_READ(HTOTAL(cpu_transcoder)));
@@ -3774,7 +3741,7 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
case PIPE_A:
break;
case PIPE_B:
- if (intel_crtc->config.fdi_lanes > 2)
+ if (intel_crtc->config->fdi_lanes > 2)
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
else
cpt_enable_fdi_bc_bifurcation(dev);
@@ -3826,7 +3793,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp = I915_READ(PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
- if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
+ if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
temp |= sel;
else
temp &= ~sel;
@@ -3849,7 +3816,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
+ if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
@@ -3890,7 +3857,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
@@ -3920,10 +3887,11 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
WARN_ON(pll->active);
}
- crtc->config.shared_dpll = DPLL_ID_PRIVATE;
+ crtc->config->shared_dpll = DPLL_ID_PRIVATE;
}
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll;
@@ -3949,7 +3917,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
if (pll->new_config->crtc_mask == 0)
continue;
- if (memcmp(&crtc->new_config->dpll_hw_state,
+ if (memcmp(&crtc_state->dpll_hw_state,
&pll->new_config->hw_state,
sizeof(pll->new_config->hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
@@ -3974,9 +3942,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
found:
if (pll->new_config->crtc_mask == 0)
- pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
+ pll->new_config->hw_state = crtc_state->dpll_hw_state;
- crtc->new_config->shared_dpll = i;
+ crtc_state->shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
@@ -4073,10 +4041,10 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), PS_ENABLE);
- I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
- I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ I915_WRITE(PS_WIN_POS(pipe), crtc->config->pch_pfit.pos);
+ I915_WRITE(PS_WIN_SZ(pipe), crtc->config->pch_pfit.size);
}
}
@@ -4086,7 +4054,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
@@ -4096,12 +4064,12 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
- I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
+ I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
}
}
-static void intel_enable_planes(struct drm_crtc *crtc)
+static void intel_enable_sprite_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -4115,7 +4083,7 @@ static void intel_enable_planes(struct drm_crtc *crtc)
}
}
-static void intel_disable_planes(struct drm_crtc *crtc)
+static void intel_disable_sprite_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -4125,7 +4093,7 @@ static void intel_disable_planes(struct drm_crtc *crtc)
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
- intel_plane_disable(&intel_plane->base);
+ plane->funcs->disable_plane(plane);
}
}
@@ -4134,7 +4102,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!crtc->config.ips_enabled)
+ if (!crtc->config->ips_enabled)
return;
/* We can only enable IPS after we enable a plane and wait for a vblank */
@@ -4167,7 +4135,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!crtc->config.ips_enabled)
+ if (!crtc->config->ips_enabled)
return;
assert_plane_enabled(dev_priv, crtc->plane);
@@ -4216,7 +4184,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
/* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
+ if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
@@ -4259,14 +4227,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
intel_enable_primary_hw_plane(crtc->primary, crtc);
- intel_enable_planes(crtc);
+ intel_enable_sprite_planes(crtc);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
/*
@@ -4288,13 +4256,13 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
intel_crtc_wait_for_pending_flips(crtc);
if (dev_priv->fbc.plane == plane)
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
hsw_disable_ips(intel_crtc);
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
- intel_disable_planes(crtc);
+ intel_disable_sprite_planes(crtc);
intel_disable_primary_hw_plane(crtc->primary, crtc);
/*
@@ -4318,17 +4286,17 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n, NULL);
+ &intel_crtc->config->fdi_m_n, NULL);
}
ironlake_set_pipeconf(crtc);
@@ -4342,7 +4310,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
@@ -4363,18 +4331,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
ironlake_pch_enable(crtc);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
- assert_vblank_disabled(crtc);
- drm_crtc_vblank_on(crtc);
-
intel_crtc_enable_planes(crtc);
}
@@ -4429,19 +4397,19 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_enable_shared_dpll(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
- if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
- I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
- intel_crtc->config.pixel_multiplier - 1);
+ if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
+ I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
+ intel_crtc->config->pixel_multiplier - 1);
}
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n, NULL);
+ &intel_crtc->config->fdi_m_n, NULL);
}
haswell_set_pipeconf(crtc);
@@ -4455,7 +4423,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
dev_priv->display.fdi_link_train(crtc);
@@ -4480,20 +4448,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
- if (intel_crtc->config.dp_encoder_is_mst)
+ if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, true);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
intel_opregion_notify_encoder(encoder, true);
}
- assert_vblank_disabled(crtc);
- drm_crtc_vblank_on(crtc);
-
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
@@ -4508,7 +4476,7 @@ static void skylake_pfit_disable(struct intel_crtc *crtc)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), 0);
I915_WRITE(PS_WIN_POS(pipe), 0);
I915_WRITE(PS_WIN_SZ(pipe), 0);
@@ -4523,7 +4491,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -4544,13 +4512,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_disable_planes(crtc);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
- if (intel_crtc->config.has_pch_encoder)
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
+ if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_disable_pipe(intel_crtc);
@@ -4561,7 +4529,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
ironlake_fdi_disable(crtc);
ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -4591,7 +4559,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4601,27 +4569,27 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (!intel_crtc->active)
return;
intel_crtc_disable_planes(crtc);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
}
- if (intel_crtc->config.has_pch_encoder)
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
+ if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
intel_disable_pipe(intel_crtc);
- if (intel_crtc->config.dp_encoder_is_mst)
+ if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
@@ -4633,7 +4601,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_pipe_clock(intel_crtc);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_ddi_fdi_disable(crtc);
}
@@ -4646,7 +4614,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
if (intel_crtc_to_shared_dpll(intel_crtc))
@@ -4664,9 +4632,9 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc_config *pipe_config = &crtc->config;
+ struct intel_crtc_state *pipe_config = crtc->config;
- if (!crtc->config.gmch_pfit.control)
+ if (!pipe_config->gmch_pfit.control)
return;
/*
@@ -4745,8 +4713,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (intel_crtc->config.pch_pfit.enabled ||
- intel_crtc->config.pch_pfit.force_thru)
+ if (intel_crtc->config->pch_pfit.enabled ||
+ intel_crtc->config->pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -4909,7 +4877,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
cmd = 0;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(cdclk);
return;
}
@@ -4970,7 +4938,7 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->new_enabled)
max_pixclk = max(max_pixclk,
- intel_crtc->new_config->adjusted_mode.crtc_clock);
+ intel_crtc->new_config->base.adjusted_mode.crtc_clock);
}
return max_pixclk;
@@ -5038,12 +5006,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_prepare_pll(intel_crtc, &intel_crtc->config);
+ chv_prepare_pll(intel_crtc, intel_crtc->config);
else
- vlv_prepare_pll(intel_crtc, &intel_crtc->config);
+ vlv_prepare_pll(intel_crtc, intel_crtc->config);
}
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
@@ -5067,9 +5035,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_enable_pll(intel_crtc, &intel_crtc->config);
+ chv_enable_pll(intel_crtc, intel_crtc->config);
else
- vlv_enable_pll(intel_crtc, &intel_crtc->config);
+ vlv_enable_pll(intel_crtc, intel_crtc->config);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -5083,12 +5051,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->enable(encoder);
-
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
intel_crtc_enable_planes(crtc);
/* Underruns don't raise interrupts, so check manually. */
@@ -5100,8 +5068,8 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
- I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
+ I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
+ I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -5119,7 +5087,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_set_pll_dividers(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
@@ -5144,12 +5112,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->enable(encoder);
-
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
intel_crtc_enable_planes(crtc);
/*
@@ -5171,7 +5139,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!crtc->config.gmch_pfit.control)
+ if (!crtc->config->gmch_pfit.control)
return;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -5221,12 +5189,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
intel_disable_pipe(intel_crtc);
i9xx_pfit_disable(intel_crtc);
@@ -5251,7 +5219,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -5309,8 +5277,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
@@ -5318,14 +5284,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc);
- if (crtc->primary->fb) {
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- i915_gem_track_fb(old_obj, NULL,
- INTEL_FRONTBUFFER_PRIMARY(pipe));
- mutex_unlock(&dev->struct_mutex);
- crtc->primary->fb = NULL;
- }
+ crtc->primary->funcs->disable_plane(crtc->primary);
/* Update computed state. */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -5382,25 +5341,25 @@ static void intel_connector_check_state(struct intel_connector *connector)
if (connector->mst_port)
return;
- WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+ I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
- WARN(connector->base.encoder != &encoder->base,
+ I915_STATE_WARN(connector->base.encoder != &encoder->base,
"active connector not linked to encoder\n");
if (encoder) {
- WARN(!encoder->connectors_active,
+ I915_STATE_WARN(!encoder->connectors_active,
"encoder->connectors_active not set\n");
encoder_enabled = encoder->get_hw_state(encoder, &pipe);
- WARN(!encoder_enabled, "encoder not enabled\n");
- if (WARN_ON(!encoder->base.crtc))
+ I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
+ if (I915_STATE_WARN_ON(!encoder->base.crtc))
return;
crtc = encoder->base.crtc;
- WARN(!crtc->enabled, "crtc not enabled\n");
- WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
- WARN(pipe != to_intel_crtc(crtc)->pipe,
+ I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n");
+ I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
"encoder active on the wrong pipe\n");
}
}
@@ -5438,7 +5397,7 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
}
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *pipe_B_crtc =
@@ -5479,7 +5438,7 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return true;
case PIPE_C:
if (!pipe_has_enabled_pch(pipe_B_crtc) ||
- pipe_B_crtc->config.fdi_lanes <= 2) {
+ pipe_B_crtc->config->fdi_lanes <= 2) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
@@ -5497,10 +5456,10 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
#define RETRY 1
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int lane, link_bw, fdi_dotclock;
bool setup_ok, needs_recompute = false;
@@ -5543,7 +5502,7 @@ retry:
}
static void hsw_compute_ips_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
pipe_config->ips_enabled = i915.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
@@ -5551,11 +5510,11 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
}
static int intel_crtc_compute_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
@@ -5800,30 +5759,31 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
}
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
u32 fp, fp2 = 0;
if (IS_PINEVIEW(dev)) {
- fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
+ fp = pnv_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
- fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
- crtc->new_config->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915.powersave) {
- crtc->new_config->dpll_hw_state.fp1 = fp2;
+ crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
- crtc->new_config->dpll_hw_state.fp1 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp;
}
}
@@ -5876,7 +5836,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- enum transcoder transcoder = crtc->config.cpu_transcoder;
+ enum transcoder transcoder = crtc->config->cpu_transcoder;
if (INTEL_INFO(dev)->gen >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
@@ -5888,7 +5848,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* registers are not unnecessarily accessed).
*/
if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
- crtc->config.has_drrs) {
+ crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -5905,15 +5865,15 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(struct intel_crtc *crtc)
{
- if (crtc->config.has_pch_encoder)
- intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+ if (crtc->config->has_pch_encoder)
+ intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
- &crtc->config.dp_m2_n2);
+ intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n,
+ &crtc->config->dp_m2_n2);
}
static void vlv_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
u32 dpll, dpll_md;
@@ -5936,7 +5896,7 @@ static void vlv_update_pll(struct intel_crtc *crtc,
}
static void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5997,7 +5957,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
- if (crtc->config.has_dp_encoder) {
+ if (pipe_config->has_dp_encoder) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -6027,7 +5987,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
}
static void chv_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
@@ -6040,7 +6000,7 @@ static void chv_update_pll(struct intel_crtc *crtc,
}
static void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6125,7 +6085,7 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
{
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
- struct intel_crtc_config pipe_config = {
+ struct intel_crtc_state pipe_config = {
.pixel_multiplier = 1,
.dpll = *dpll,
};
@@ -6158,6 +6118,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
}
static void i9xx_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock,
int num_connectors)
{
@@ -6165,9 +6126,9 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
bool is_sdvo;
- struct dpll *clock = &crtc->new_config->dpll;
+ struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, reduced_clock);
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
@@ -6180,14 +6141,14 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
dpll |= DPLLB_MODE_DAC_SERIAL;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
- dpll |= (crtc->new_config->pixel_multiplier - 1)
+ dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (crtc->new_config->has_dp_encoder)
+ if (crtc_state->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -6215,7 +6176,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- if (crtc->new_config->sdvo_tv_clock)
+ if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
@@ -6224,25 +6185,26 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->new_config->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.dpll = dpll;
if (INTEL_INFO(dev)->gen >= 4) {
- u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
+ u32 dpll_md = (crtc_state->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
+ crtc_state->dpll_hw_state.dpll_md = dpll_md;
}
}
static void i8xx_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
- struct dpll *clock = &crtc->new_config->dpll;
+ struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, reduced_clock);
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
@@ -6267,7 +6229,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->new_config->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.dpll = dpll;
}
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -6275,9 +6237,9 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
uint32_t crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
@@ -6335,12 +6297,12 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
- ((intel_crtc->config.pipe_src_w - 1) << 16) |
- (intel_crtc->config.pipe_src_h - 1));
+ ((intel_crtc->config->pipe_src_w - 1) << 16) |
+ (intel_crtc->config->pipe_src_h - 1));
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6348,56 +6310,56 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
uint32_t tmp;
tmp = I915_READ(HTOTAL(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HBLANK(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HSYNC(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VTOTAL(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VBLANK(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VSYNC(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
- pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
- pipe_config->adjusted_mode.crtc_vtotal += 1;
- pipe_config->adjusted_mode.crtc_vblank_end += 1;
+ pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ pipe_config->base.adjusted_mode.crtc_vtotal += 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
}
tmp = I915_READ(PIPESRC(crtc->pipe));
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
- pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
- pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
+ pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
+ pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
}
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
- mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
- mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
- mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
+ mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
+ mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
+ mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
+ mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
- mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
- mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
- mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
- mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
+ mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
+ mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
+ mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
- mode->flags = pipe_config->adjusted_mode.flags;
+ mode->flags = pipe_config->base.adjusted_mode.flags;
- mode->clock = pipe_config->adjusted_mode.crtc_clock;
- mode->flags |= pipe_config->adjusted_mode.flags;
+ mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
+ mode->flags |= pipe_config->base.adjusted_mode.flags;
}
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -6412,17 +6374,17 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
(intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
- if (intel_crtc->config.double_wide)
+ if (intel_crtc->config->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
- if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
+ if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
pipeconf |= PIPECONF_6BPC;
break;
@@ -6447,7 +6409,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
@@ -6456,14 +6418,15 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
} else
pipeconf |= PIPECONF_PROGRESSIVE;
- if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
+ if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(intel_crtc->pipe));
}
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6495,7 +6458,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
if (is_dsi)
return 0;
- if (!crtc->new_config->clock_set) {
+ if (!crtc_state->clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
/*
@@ -6506,7 +6469,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
*/
limit = intel_limit(crtc, refclk);
ok = dev_priv->display.find_dpll(limit, crtc,
- crtc->new_config->port_clock,
+ crtc_state->port_clock,
refclk, NULL, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -6527,23 +6490,23 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
- crtc->new_config->dpll.n = clock.n;
- crtc->new_config->dpll.m1 = clock.m1;
- crtc->new_config->dpll.m2 = clock.m2;
- crtc->new_config->dpll.p1 = clock.p1;
- crtc->new_config->dpll.p2 = clock.p2;
+ crtc_state->dpll.n = clock.n;
+ crtc_state->dpll.m1 = clock.m1;
+ crtc_state->dpll.m2 = clock.m2;
+ crtc_state->dpll.p1 = clock.p1;
+ crtc_state->dpll.p2 = clock.p2;
}
if (IS_GEN2(dev)) {
- i8xx_update_pll(crtc,
+ i8xx_update_pll(crtc, crtc_state,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
- chv_update_pll(crtc, crtc->new_config);
+ chv_update_pll(crtc, crtc_state);
} else if (IS_VALLEYVIEW(dev)) {
- vlv_update_pll(crtc, crtc->new_config);
+ vlv_update_pll(crtc, crtc_state);
} else {
- i9xx_update_pll(crtc,
+ i9xx_update_pll(crtc, crtc_state,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
}
@@ -6552,7 +6515,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
}
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6582,7 +6545,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
}
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6611,8 +6574,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = clock.dot / 5;
}
-static void i9xx_get_plane_config(struct intel_crtc *crtc,
- struct intel_plane_config *plane_config)
+static void
+i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6620,27 +6584,30 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
- crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
- if (!crtc->base.primary->fb) {
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
+ fb = &intel_fb->base;
+
val = I915_READ(DSPCNTR(plane));
if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED)
- plane_config->tiled = true;
+ plane_config->tiling = I915_TILING_X;
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
- fourcc = intel_format_to_fourcc(pixel_format);
- crtc->base.primary->fb->pixel_format = fourcc;
- crtc->base.primary->fb->bits_per_pixel =
- drm_format_plane_cpp(fourcc, 0) * 8;
+ fourcc = i9xx_format_to_fourcc(pixel_format);
+ fb->pixel_format = fourcc;
+ fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
if (INTEL_INFO(dev)->gen >= 4) {
- if (plane_config->tiled)
+ if (plane_config->tiling)
offset = I915_READ(DSPTILEOFF(plane));
else
offset = I915_READ(DSPLINOFF(plane));
@@ -6651,29 +6618,27 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
plane_config->base = base;
val = I915_READ(PIPESRC(pipe));
- crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
- crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
+ fb->width = ((val >> 16) & 0xfff) + 1;
+ fb->height = ((val >> 0) & 0xfff) + 1;
val = I915_READ(DSPSTRIDE(pipe));
- crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
+ fb->pitches[0] = val & 0xffffffc0;
- aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
- plane_config->tiled);
+ aligned_height = intel_fb_align_height(dev, fb->height,
+ plane_config->tiling);
- plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height);
+ plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
- DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe, plane, crtc->base.primary->fb->width,
- crtc->base.primary->fb->height,
- crtc->base.primary->fb->bits_per_pixel, base,
- crtc->base.primary->fb->pitches[0],
+ DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe_name(pipe), plane, fb->width, fb->height,
+ fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
+ crtc->base.primary->fb = fb;
}
static void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6703,7 +6668,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
}
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7183,7 +7148,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
val = 0;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
val |= PIPECONF_6BPC;
break;
@@ -7201,15 +7166,15 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config.dither)
+ if (intel_crtc->config->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
I915_WRITE(PIPECONF(pipe), val);
@@ -7238,7 +7203,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
* consideration.
*/
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
/*
@@ -7262,7 +7227,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
if (INTEL_INFO(dev)->gen > 6) {
uint16_t postoff = 0;
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -7273,7 +7238,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
} else {
uint32_t mode = CSC_MODE_YUV_TO_RGB;
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -7286,15 +7251,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
uint32_t val;
val = 0;
- if (IS_HASWELL(dev) && intel_crtc->config.dither)
+ if (IS_HASWELL(dev) && intel_crtc->config->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
@@ -7308,7 +7273,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
val = 0;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
val |= PIPEMISC_DITHER_6_BPC;
break;
@@ -7326,7 +7291,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config.dither)
+ if (intel_crtc->config->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
I915_WRITE(PIPEMISC(pipe), val);
@@ -7334,6 +7299,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
}
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *clock,
bool *has_reduced_clock,
intel_clock_t *reduced_clock)
@@ -7356,7 +7322,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
*/
limit = intel_limit(intel_crtc, refclk);
ret = dev_priv->display.find_dpll(limit, intel_crtc,
- intel_crtc->new_config->port_clock,
+ crtc_state->port_clock,
refclk, NULL, clock);
if (!ret)
return false;
@@ -7395,6 +7361,7 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
}
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
u32 *fp,
intel_clock_t *reduced_clock, u32 *fp2)
{
@@ -7432,10 +7399,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
- } else if (intel_crtc->new_config->sdvo_tv_clock)
+ } else if (crtc_state->sdvo_tv_clock)
factor = 20;
- if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
+ if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
*fp |= FP_CB_TUNE;
if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
@@ -7448,20 +7415,20 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
+ dpll |= (crtc_state->pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (intel_crtc->new_config->has_dp_encoder)
+ if (crtc_state->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (intel_crtc->new_config->dpll.p2) {
+ switch (crtc_state->dpll.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -7484,7 +7451,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
return dpll | DPLL_VCO_ENABLE;
}
-static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
+static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock, reduced_clock;
@@ -7498,39 +7466,39 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
- ok = ironlake_compute_clocks(&crtc->base, &clock,
+ ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
&has_reduced_clock, &reduced_clock);
- if (!ok && !crtc->new_config->clock_set) {
+ if (!ok && !crtc_state->clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Compat-code for transition, will disappear. */
- if (!crtc->new_config->clock_set) {
- crtc->new_config->dpll.n = clock.n;
- crtc->new_config->dpll.m1 = clock.m1;
- crtc->new_config->dpll.m2 = clock.m2;
- crtc->new_config->dpll.p1 = clock.p1;
- crtc->new_config->dpll.p2 = clock.p2;
+ if (!crtc_state->clock_set) {
+ crtc_state->dpll.n = clock.n;
+ crtc_state->dpll.m1 = clock.m1;
+ crtc_state->dpll.m2 = clock.m2;
+ crtc_state->dpll.p1 = clock.p1;
+ crtc_state->dpll.p2 = clock.p2;
}
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (crtc->new_config->has_pch_encoder) {
- fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
+ if (crtc_state->has_pch_encoder) {
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
if (has_reduced_clock)
fp2 = i9xx_dpll_compute_fp(&reduced_clock);
- dpll = ironlake_compute_dpll(crtc,
+ dpll = ironlake_compute_dpll(crtc, crtc_state,
&fp, &reduced_clock,
has_reduced_clock ? &fp2 : NULL);
- crtc->new_config->dpll_hw_state.dpll = dpll;
- crtc->new_config->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.fp0 = fp;
if (has_reduced_clock)
- crtc->new_config->dpll_hw_state.fp1 = fp2;
+ crtc_state->dpll_hw_state.fp1 = fp2;
else
- crtc->new_config->dpll_hw_state.fp1 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp;
- pll = intel_get_shared_dpll(crtc);
+ pll = intel_get_shared_dpll(crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
@@ -7584,7 +7552,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
* registers are not unnecessarily read).
*/
if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
- crtc->config.has_drrs) {
+ crtc->config->has_drrs) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
@@ -7605,9 +7573,9 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
}
void intel_dp_get_m_n(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- if (crtc->config.has_pch_encoder)
+ if (pipe_config->has_pch_encoder)
intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
else
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
@@ -7616,14 +7584,14 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
}
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n, NULL);
}
static void skylake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7638,8 +7606,80 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
}
}
+static void
+skylake_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, base, offset, stride_mult;
+ int pipe = crtc->pipe;
+ int fourcc, pixel_format;
+ int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ DRM_DEBUG_KMS("failed to alloc fb\n");
+ return;
+ }
+
+ fb = &intel_fb->base;
+
+ val = I915_READ(PLANE_CTL(pipe, 0));
+ if (val & PLANE_CTL_TILED_MASK)
+ plane_config->tiling = I915_TILING_X;
+
+ pixel_format = val & PLANE_CTL_FORMAT_MASK;
+ fourcc = skl_format_to_fourcc(pixel_format,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK);
+ fb->pixel_format = fourcc;
+ fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+
+ base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
+ plane_config->base = base;
+
+ offset = I915_READ(PLANE_OFFSET(pipe, 0));
+
+ val = I915_READ(PLANE_SIZE(pipe, 0));
+ fb->height = ((val >> 16) & 0xfff) + 1;
+ fb->width = ((val >> 0) & 0x1fff) + 1;
+
+ val = I915_READ(PLANE_STRIDE(pipe, 0));
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ stride_mult = 64;
+ break;
+ case I915_TILING_X:
+ stride_mult = 512;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ goto error;
+ }
+ fb->pitches[0] = (val & 0x3ff) * stride_mult;
+
+ aligned_height = intel_fb_align_height(dev, fb->height,
+ plane_config->tiling);
+
+ plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe_name(pipe), fb->width, fb->height,
+ fb->bits_per_pixel, base, fb->pitches[0],
+ plane_config->size);
+
+ crtc->base.primary->fb = fb;
+ return;
+
+error:
+ kfree(fb);
+}
+
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7662,68 +7702,71 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
}
}
-static void ironlake_get_plane_config(struct intel_crtc *crtc,
- struct intel_plane_config *plane_config)
+static void
+ironlake_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, base, offset;
- int pipe = crtc->pipe, plane = crtc->plane;
+ int pipe = crtc->pipe;
int fourcc, pixel_format;
int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
- crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
- if (!crtc->base.primary->fb) {
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
- val = I915_READ(DSPCNTR(plane));
+ fb = &intel_fb->base;
+
+ val = I915_READ(DSPCNTR(pipe));
if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED)
- plane_config->tiled = true;
+ plane_config->tiling = I915_TILING_X;
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
- fourcc = intel_format_to_fourcc(pixel_format);
- crtc->base.primary->fb->pixel_format = fourcc;
- crtc->base.primary->fb->bits_per_pixel =
- drm_format_plane_cpp(fourcc, 0) * 8;
+ fourcc = i9xx_format_to_fourcc(pixel_format);
+ fb->pixel_format = fourcc;
+ fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
- base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+ base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- offset = I915_READ(DSPOFFSET(plane));
+ offset = I915_READ(DSPOFFSET(pipe));
} else {
- if (plane_config->tiled)
- offset = I915_READ(DSPTILEOFF(plane));
+ if (plane_config->tiling)
+ offset = I915_READ(DSPTILEOFF(pipe));
else
- offset = I915_READ(DSPLINOFF(plane));
+ offset = I915_READ(DSPLINOFF(pipe));
}
plane_config->base = base;
val = I915_READ(PIPESRC(pipe));
- crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
- crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
+ fb->width = ((val >> 16) & 0xfff) + 1;
+ fb->height = ((val >> 0) & 0xfff) + 1;
val = I915_READ(DSPSTRIDE(pipe));
- crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
+ fb->pitches[0] = val & 0xffffffc0;
- aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
- plane_config->tiled);
+ aligned_height = intel_fb_align_height(dev, fb->height,
+ plane_config->tiling);
- plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height);
+ plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
- DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe, plane, crtc->base.primary->fb->width,
- crtc->base.primary->fb->height,
- crtc->base.primary->fb->bits_per_pixel, base,
- crtc->base.primary->fb->pitches[0],
+ DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe_name(pipe), fb->width, fb->height,
+ fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
+
+ crtc->base.primary->fb = fb;
}
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7810,24 +7853,24 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc)
- WARN(crtc->active, "CRTC for pipe %c enabled\n",
+ I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
- WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
- WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
- WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
- WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
- WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
- WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+ I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
+ I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+ I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
if (IS_HASWELL(dev))
- WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
- WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
- WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Utility pin enabled\n");
- WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+ I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
/*
* In theory we can still leave IRQs enabled, as long as only the HPD
@@ -7835,7 +7878,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
- WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+ I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
}
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
@@ -7933,19 +7976,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
/*
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
- *
- * The other problem is that hsw_restore_lcpll() is called as part of
- * the runtime PM resume sequence, so we can't just call
- * gen6_gt_force_wake_get() because that function calls
- * intel_runtime_pm_get(), and we can't change the runtime PM refcount
- * while we are on the resume sequence. So to solve this problem we have
- * to call special forcewake code that doesn't touch runtime PM and
- * doesn't enable the forcewake delayed work.
*/
- spin_lock_irq(&dev_priv->uncore.lock);
- if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7975,11 +8007,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- /* See the big comment above. */
- spin_lock_irq(&dev_priv->uncore.lock);
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
/*
@@ -8041,9 +8069,10 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
intel_prepare_ddi(dev);
}
-static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
+static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
- if (!intel_ddi_pll_select(crtc))
+ if (!intel_ddi_pll_select(crtc, crtc_state))
return -EINVAL;
crtc->lowfreq_avail = false;
@@ -8053,14 +8082,23 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- u32 temp;
+ u32 temp, dpll_ctl1;
temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
switch (pipe_config->ddi_pll_sel) {
+ case SKL_DPLL0:
+ /*
+ * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
+ * of the shared DPLL framework and thus needs to be read out
+ * separately
+ */
+ dpll_ctl1 = I915_READ(DPLL_CTRL1);
+ pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
+ break;
case SKL_DPLL1:
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
break;
@@ -8075,7 +8113,7 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
@@ -8090,7 +8128,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
}
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8132,7 +8170,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
}
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8286,7 +8324,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(intel_crtc->cursor_width);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
@@ -8295,7 +8333,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl |= CURSOR_PIPE_CSC_ENABLE;
}
- if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
+ if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
cntl |= CURSOR_ROTATE_180;
if (intel_crtc->cursor_cntl != cntl) {
@@ -8326,10 +8364,10 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (on)
base = intel_crtc->cursor_addr;
- if (x >= intel_crtc->config.pipe_src_w)
+ if (x >= intel_crtc->config->pipe_src_w)
base = 0;
- if (y >= intel_crtc->config.pipe_src_h)
+ if (y >= intel_crtc->config->pipe_src_h)
base = 0;
if (x < 0) {
@@ -8357,7 +8395,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
- to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
+ crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
base += (intel_crtc->cursor_height *
intel_crtc->cursor_width - 1) * 4;
}
@@ -8405,109 +8443,6 @@ static bool cursor_size_ok(struct drm_device *dev,
return true;
}
-static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
- struct drm_i915_gem_object *obj,
- uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- unsigned old_width;
- uint32_t addr;
- int ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!obj) {
- DRM_DEBUG_KMS("cursor off\n");
- addr = 0;
- mutex_lock(&dev->struct_mutex);
- goto finish;
- }
-
- /* we only need to pin inside GTT if cursor is non-phy */
- mutex_lock(&dev->struct_mutex);
- if (!INTEL_INFO(dev)->cursor_needs_physical) {
- unsigned alignment;
-
- /*
- * Global gtt pte registers are special registers which actually
- * forward writes to a chunk of system memory. Which means that
- * there is no risk that the register values disappear as soon
- * as we call intel_runtime_pm_put(), so it is correct to wrap
- * only the pin/unpin/fence and not more.
- */
- intel_runtime_pm_get(dev_priv);
-
- /* Note that the w/a also requires 2 PTE of padding following
- * the bo. We currently fill all unused PTE with the shadow
- * page and so we should always have valid PTE following the
- * cursor preventing the VT-d warning.
- */
- alignment = 0;
- if (need_vtd_wa(dev))
- alignment = 64*1024;
-
- ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
- if (ret) {
- DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
- intel_runtime_pm_put(dev_priv);
- goto fail_locked;
- }
-
- ret = i915_gem_object_put_fence(obj);
- if (ret) {
- DRM_DEBUG_KMS("failed to release fence for cursor");
- intel_runtime_pm_put(dev_priv);
- goto fail_unpin;
- }
-
- addr = i915_gem_obj_ggtt_offset(obj);
-
- intel_runtime_pm_put(dev_priv);
- } else {
- int align = IS_I830(dev) ? 16 * 1024 : 256;
- ret = i915_gem_object_attach_phys(obj, align);
- if (ret) {
- DRM_DEBUG_KMS("failed to attach phys object\n");
- goto fail_locked;
- }
- addr = obj->phys_handle->busaddr;
- }
-
- finish:
- if (intel_crtc->cursor_bo) {
- if (!INTEL_INFO(dev)->cursor_needs_physical)
- i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
- }
-
- i915_gem_track_fb(intel_crtc->cursor_bo, obj,
- INTEL_FRONTBUFFER_CURSOR(pipe));
- mutex_unlock(&dev->struct_mutex);
-
- old_width = intel_crtc->cursor_width;
-
- intel_crtc->cursor_addr = addr;
- intel_crtc->cursor_bo = obj;
- intel_crtc->cursor_width = width;
- intel_crtc->cursor_height = height;
-
- if (intel_crtc->active) {
- if (old_width != width)
- intel_update_watermarks(crtc);
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
- }
-
- return 0;
-fail_unpin:
- i915_gem_object_unpin_from_display_plane(obj);
-fail_locked:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
@@ -8730,7 +8665,7 @@ retry:
intel_crtc = to_intel_crtc(crtc);
intel_crtc->new_enabled = true;
- intel_crtc->new_config = &intel_crtc->config;
+ intel_crtc->new_config = intel_crtc->config;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
@@ -8771,7 +8706,7 @@ retry:
fail:
intel_crtc->new_enabled = crtc->enabled;
if (intel_crtc->new_enabled)
- intel_crtc->new_config = &intel_crtc->config;
+ intel_crtc->new_config = intel_crtc->config;
else
intel_crtc->new_config = NULL;
fail_unlock:
@@ -8817,7 +8752,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
}
static int i9xx_pll_refclk(struct drm_device *dev,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll = pipe_config->dpll_hw_state.dpll;
@@ -8834,7 +8769,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
/* Returns the clock of the currently programmed mode of the given pipe. */
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8941,7 +8876,7 @@ int intel_dotclock_calculate(int link_freq,
}
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
@@ -8954,7 +8889,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
* agree once we know their relationship in the encoder's
* get_config() function.
*/
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
&pipe_config->fdi_m_n);
}
@@ -8965,9 +8900,9 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *mode;
- struct intel_crtc_config pipe_config;
+ struct intel_crtc_state pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -9082,6 +9017,14 @@ out:
intel_runtime_pm_put(dev_priv);
}
+static void intel_crtc_set_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ kfree(crtc->config);
+ crtc->config = crtc_state;
+ crtc->base.state = &crtc_state->base;
+}
+
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -9098,6 +9041,7 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(work);
}
+ intel_crtc_set_state(intel_crtc, NULL);
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
@@ -9115,7 +9059,10 @@ static void intel_unpin_work_fn(struct work_struct *__work)
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
+
+ if (work->flip_queued_req)
+ i915_gem_request_assign(&work->flip_queued_req, NULL);
mutex_unlock(&dev->struct_mutex);
intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
@@ -9511,25 +9458,53 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
else if (i915.enable_execlists)
return true;
else
- return ring != obj->ring;
+ return ring != i915_gem_request_get_ring(obj->last_read_req);
}
-static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ const enum pipe pipe = intel_crtc->pipe;
+ u32 ctl, stride;
+
+ ctl = I915_READ(PLANE_CTL(pipe, 0));
+ ctl &= ~PLANE_CTL_TILED_MASK;
+ if (obj->tiling_mode == I915_TILING_X)
+ ctl |= PLANE_CTL_TILED_X;
+
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ stride = fb->pitches[0] >> 6;
+ if (obj->tiling_mode == I915_TILING_X)
+ stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */
+
+ /*
+ * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
+ * PLANE_SURF updates, the update is then guaranteed to be atomic.
+ */
+ I915_WRITE(PLANE_CTL(pipe, 0), ctl);
+ I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+
+ I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset);
+ POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
+static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
- bool atomic_update;
- u32 start_vbl_count;
u32 dspcntr;
u32 reg;
- intel_mark_page_flip_active(intel_crtc);
-
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
-
reg = DSPCNTR(intel_crtc->plane);
dspcntr = I915_READ(reg);
@@ -9544,26 +9519,50 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
intel_crtc->unpin_work->gtt_offset);
POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+/*
+ * XXX: This is the temporary way to update the plane registers until we get
+ * around to using the usual plane update functions for MMIO flips
+ */
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ bool atomic_update;
+ u32 start_vbl_count;
+
+ intel_mark_page_flip_active(intel_crtc);
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ skl_do_mmio_flip(intel_crtc);
+ else
+ /* use_mmio_flip() retricts MMIO flips to ilk+ */
+ ilk_do_mmio_flip(intel_crtc);
+
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void intel_mmio_flip_work_func(struct work_struct *work)
{
- struct intel_crtc *intel_crtc =
+ struct intel_crtc *crtc =
container_of(work, struct intel_crtc, mmio_flip.work);
- struct intel_engine_cs *ring;
- uint32_t seqno;
+ struct intel_mmio_flip *mmio_flip;
- seqno = intel_crtc->mmio_flip.seqno;
- ring = intel_crtc->mmio_flip.ring;
+ mmio_flip = &crtc->mmio_flip;
+ if (mmio_flip->req)
+ WARN_ON(__i915_wait_request(mmio_flip->req,
+ crtc->reset_counter,
+ false, NULL, NULL) != 0);
- if (seqno)
- WARN_ON(__i915_wait_seqno(ring, seqno,
- intel_crtc->reset_counter,
- false, NULL, NULL) != 0);
-
- intel_do_mmio_flip(intel_crtc);
+ intel_do_mmio_flip(crtc);
+ if (mmio_flip->req) {
+ mutex_lock(&crtc->base.dev->struct_mutex);
+ i915_gem_request_assign(&mmio_flip->req, NULL);
+ mutex_unlock(&crtc->base.dev->struct_mutex);
+ }
}
static int intel_queue_mmio_flip(struct drm_device *dev,
@@ -9575,8 +9574,8 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
- intel_crtc->mmio_flip.ring = obj->ring;
+ i915_gem_request_assign(&intel_crtc->mmio_flip.req,
+ obj->last_write_req);
schedule_work(&intel_crtc->mmio_flip.work);
@@ -9671,9 +9670,8 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
return false;
if (work->flip_ready_vblank == 0) {
- if (work->flip_queued_ring &&
- !i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- work->flip_queued_seqno))
+ if (work->flip_queued_req &&
+ !i915_gem_request_completed(work->flip_queued_req, true))
return false;
work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe);
@@ -9726,6 +9724,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
@@ -9815,10 +9814,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
- } else if (IS_IVYBRIDGE(dev)) {
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- ring = obj->ring;
+ ring = i915_gem_request_get_ring(obj->last_read_req);
if (ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
} else {
@@ -9838,16 +9837,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_unpin;
- work->flip_queued_seqno = obj->last_write_seqno;
- work->flip_queued_ring = obj->ring;
+ i915_gem_request_assign(&work->flip_queued_req,
+ obj->last_write_req);
} else {
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
if (ret)
goto cleanup_unpin;
- work->flip_queued_seqno = intel_ring_get_seqno(ring);
- work->flip_queued_ring = ring;
+ i915_gem_request_assign(&work->flip_queued_req,
+ intel_ring_get_request(ring));
}
work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
@@ -9856,7 +9855,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
i915_gem_track_fb(work->old_fb_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
@@ -9884,8 +9883,7 @@ free_work:
if (ret == -EIO) {
out_hang:
- intel_crtc_wait_for_pending_flips(crtc);
- ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
+ ret = intel_plane_restore(primary);
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
@@ -9898,6 +9896,8 @@ out_hang:
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
+ .atomic_begin = intel_begin_crtc_commit,
+ .atomic_flush = intel_finish_crtc_commit,
};
/**
@@ -9927,7 +9927,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
crtc->new_enabled = crtc->base.enabled;
if (crtc->new_enabled)
- crtc->new_config = &crtc->config;
+ crtc->new_config = crtc->config;
else
crtc->new_config = NULL;
}
@@ -9960,7 +9960,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
static void
connected_sink_compute_bpp(struct intel_connector *connector,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
int bpp = pipe_config->pipe_bpp;
@@ -9987,7 +9987,7 @@ connected_sink_compute_bpp(struct intel_connector *connector,
static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct drm_framebuffer *fb,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct intel_connector *connector;
@@ -10056,7 +10056,7 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
}
static void intel_dump_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
const char *context)
{
DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
@@ -10090,10 +10090,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->has_infoframe);
DRM_DEBUG_KMS("requested mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->requested_mode);
+ drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
- intel_dump_crtc_timings(&pipe_config->adjusted_mode);
+ drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
+ intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
DRM_DEBUG_KMS("pipe src size: %dx%d\n",
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
@@ -10192,14 +10192,14 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
return true;
}
-static struct intel_crtc_config *
+static struct intel_crtc_state *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
- struct intel_crtc_config *pipe_config;
+ struct intel_crtc_state *pipe_config;
int plane_bpp, ret = -EINVAL;
bool retry = true;
@@ -10217,8 +10217,8 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (!pipe_config)
return ERR_PTR(-ENOMEM);
- drm_mode_copy(&pipe_config->adjusted_mode, mode);
- drm_mode_copy(&pipe_config->requested_mode, mode);
+ drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
+ drm_mode_copy(&pipe_config->base.mode, mode);
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
@@ -10229,13 +10229,13 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
- if (!(pipe_config->adjusted_mode.flags &
+ if (!(pipe_config->base.adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
- pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
- if (!(pipe_config->adjusted_mode.flags &
+ if (!(pipe_config->base.adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
- pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
/* Compute a starting value for pipe_config->pipe_bpp taking the source
* plane pixel format and any sink constraints into account. Returns the
@@ -10254,9 +10254,9 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
- pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
- pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
+ drm_crtc_get_hv_timing(&pipe_config->base.mode,
+ &pipe_config->pipe_src_w,
+ &pipe_config->pipe_src_h);
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
@@ -10264,7 +10264,8 @@ encoder_retry:
pipe_config->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
- drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
+ drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
+ CRTC_STEREO_DOUBLE);
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
@@ -10284,7 +10285,7 @@ encoder_retry:
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
- pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
+ pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
* pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
@@ -10441,7 +10442,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
for_each_intel_crtc(dev, intel_crtc) {
WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config &&
- intel_crtc->new_config != &intel_crtc->config);
+ intel_crtc->new_config != intel_crtc->config);
WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
}
@@ -10493,8 +10494,8 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
static bool
intel_pipe_config_compare(struct drm_device *dev,
- struct intel_crtc_config *current_config,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *current_config,
+ struct intel_crtc_state *pipe_config)
{
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
@@ -10585,19 +10586,19 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
}
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
@@ -10608,17 +10609,17 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(has_audio);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_PHSYNC);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_NHSYNC);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_PVSYNC);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_NVSYNC);
}
@@ -10668,7 +10669,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
#undef PIPE_CONF_CHECK_X
@@ -10742,7 +10743,7 @@ check_connector_state(struct drm_device *dev)
* ->get_hw_state callbacks. */
intel_connector_check_state(connector);
- WARN(&connector->new_encoder->base != connector->base.encoder,
+ I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
"connector's staged encoder doesn't match current encoder\n");
}
}
@@ -10762,9 +10763,9 @@ check_encoder_state(struct drm_device *dev)
encoder->base.base.id,
encoder->base.name);
- WARN(&encoder->new_crtc->base != encoder->base.crtc,
+ I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
"encoder's stage crtc doesn't match current crtc\n");
- WARN(encoder->connectors_active && !encoder->base.crtc,
+ I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
"encoder's active_connectors set, but no crtc\n");
list_for_each_entry(connector, &dev->mode_config.connector_list,
@@ -10783,19 +10784,19 @@ check_encoder_state(struct drm_device *dev)
if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
continue;
- WARN(!!encoder->base.crtc != enabled,
+ I915_STATE_WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
!!encoder->base.crtc, enabled);
- WARN(active && !encoder->base.crtc,
+ I915_STATE_WARN(active && !encoder->base.crtc,
"active encoder with no crtc\n");
- WARN(encoder->connectors_active != active,
+ I915_STATE_WARN(encoder->connectors_active != active,
"encoder's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, encoder->connectors_active);
active = encoder->get_hw_state(encoder, &pipe);
- WARN(active != encoder->connectors_active,
+ I915_STATE_WARN(active != encoder->connectors_active,
"encoder's hw state doesn't match sw tracking "
"(expected %i, found %i)\n",
encoder->connectors_active, active);
@@ -10804,7 +10805,7 @@ check_encoder_state(struct drm_device *dev)
continue;
tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- WARN(active && pipe != tracked_pipe,
+ I915_STATE_WARN(active && pipe != tracked_pipe,
"active encoder's pipe doesn't match"
"(expected %i, found %i)\n",
tracked_pipe, pipe);
@@ -10818,7 +10819,7 @@ check_crtc_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
- struct intel_crtc_config pipe_config;
+ struct intel_crtc_state pipe_config;
for_each_intel_crtc(dev, crtc) {
bool enabled = false;
@@ -10829,7 +10830,7 @@ check_crtc_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id);
- WARN(crtc->active && !crtc->base.enabled,
+ I915_STATE_WARN(crtc->active && !crtc->base.enabled,
"active crtc, but not enabled in sw tracking\n");
for_each_intel_encoder(dev, encoder) {
@@ -10840,10 +10841,10 @@ check_crtc_state(struct drm_device *dev)
active = true;
}
- WARN(active != crtc->active,
+ I915_STATE_WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active);
- WARN(enabled != crtc->base.enabled,
+ I915_STATE_WARN(enabled != crtc->base.enabled,
"crtc's computed enabled state doesn't match tracked enabled state "
"(expected %i, found %i)\n", enabled, crtc->base.enabled);
@@ -10863,16 +10864,16 @@ check_crtc_state(struct drm_device *dev)
encoder->get_config(encoder, &pipe_config);
}
- WARN(crtc->active != active,
+ I915_STATE_WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
if (active &&
- !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
- WARN(1, "pipe state doesn't match!\n");
+ !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
+ I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(crtc, &pipe_config,
"[hw state]");
- intel_dump_pipe_config(crtc, &crtc->config,
+ intel_dump_pipe_config(crtc, crtc->config,
"[sw state]");
}
}
@@ -10897,14 +10898,14 @@ check_shared_dpll_state(struct drm_device *dev)
active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
- WARN(pll->active > hweight32(pll->config.crtc_mask),
+ I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
"more active pll users than references: %i vs %i\n",
pll->active, hweight32(pll->config.crtc_mask));
- WARN(pll->active && !pll->on,
+ I915_STATE_WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
- WARN(pll->on && !pll->active,
+ I915_STATE_WARN(pll->on && !pll->active,
"pll in on but not on in use in sw tracking\n");
- WARN(pll->on != active,
+ I915_STATE_WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
@@ -10914,14 +10915,14 @@ check_shared_dpll_state(struct drm_device *dev)
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
active_crtcs++;
}
- WARN(pll->active != active_crtcs,
+ I915_STATE_WARN(pll->active != active_crtcs,
"pll active crtcs mismatch (expected %i, found %i)\n",
pll->active, active_crtcs);
- WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
+ I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
"pll enabled crtcs mismatch (expected %i, found %i)\n",
hweight32(pll->config.crtc_mask), enabled_crtcs);
- WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
+ I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
}
@@ -10937,16 +10938,16 @@ intel_modeset_check_state(struct drm_device *dev)
check_shared_dpll_state(dev);
}
-void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock)
{
/*
* FDI already provided one idea for the dotclock.
* Yell if the encoder disagrees.
*/
- WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
+ WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- pipe_config->adjusted_mode.crtc_clock, dotclock);
+ pipe_config->base.adjusted_mode.crtc_clock, dotclock);
}
static void update_scanline_offset(struct intel_crtc *crtc)
@@ -10972,7 +10973,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* one to the value.
*/
if (IS_GEN2(dev)) {
- const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
int vtotal;
vtotal = mode->crtc_vtotal;
@@ -10987,7 +10988,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = 1;
}
-static struct intel_crtc_config *
+static struct intel_crtc_state *
intel_modeset_compute_config(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
@@ -10995,7 +10996,7 @@ intel_modeset_compute_config(struct drm_crtc *crtc,
unsigned *prepare_pipes,
unsigned *disable_pipes)
{
- struct intel_crtc_config *pipe_config = NULL;
+ struct intel_crtc_state *pipe_config = NULL;
intel_modeset_affected_pipes(crtc, modeset_pipes,
prepare_pipes, disable_pipes);
@@ -11020,10 +11021,40 @@ out:
return pipe_config;
}
+static int __intel_set_mode_setup_plls(struct drm_device *dev,
+ unsigned modeset_pipes,
+ unsigned disable_pipes)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned clear_pipes = modeset_pipes | disable_pipes;
+ struct intel_crtc *intel_crtc;
+ int ret = 0;
+
+ if (!dev_priv->display.crtc_compute_clock)
+ return 0;
+
+ ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
+ if (ret)
+ goto done;
+
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ struct intel_crtc_state *state = intel_crtc->new_config;
+ ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+ state);
+ if (ret) {
+ intel_shared_dpll_abort_config(dev_priv);
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
@@ -11057,21 +11088,9 @@ static int __intel_set_mode(struct drm_crtc *crtc,
prepare_pipes &= ~disable_pipes;
}
- if (dev_priv->display.crtc_compute_clock) {
- unsigned clear_pipes = modeset_pipes | disable_pipes;
-
- ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
- if (ret)
- goto done;
-
- for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- ret = dev_priv->display.crtc_compute_clock(intel_crtc);
- if (ret) {
- intel_shared_dpll_abort_config(dev_priv);
- goto done;
- }
- }
- }
+ ret = __intel_set_mode_setup_plls(dev, modeset_pipes, disable_pipes);
+ if (ret)
+ goto done;
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
@@ -11092,8 +11111,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
crtc->mode = *mode;
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
- to_intel_crtc(crtc)->config = *pipe_config;
- to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
+ intel_crtc_set_state(to_intel_crtc(crtc), pipe_config);
/*
* Calculate and store various constants which
@@ -11101,7 +11119,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc,
- &pipe_config->adjusted_mode);
+ &pipe_config->base.adjusted_mode);
}
/* Only after disabling all output pipelines that will be changed can we
@@ -11114,26 +11132,15 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_plane *primary = intel_crtc->base.primary;
+ int vdisplay, hdisplay;
- mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
- if (ret != 0) {
- DRM_ERROR("pin & fence failed\n");
- mutex_unlock(&dev->struct_mutex);
- goto done;
- }
- if (old_fb)
- intel_unpin_fb_obj(old_obj);
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
- mutex_unlock(&dev->struct_mutex);
-
- crtc->primary->fb = fb;
- crtc->x = x;
- crtc->y = y;
+ drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
+ ret = primary->funcs->update_plane(primary, &intel_crtc->base,
+ fb, 0, 0,
+ hdisplay, vdisplay,
+ x << 16, y << 16,
+ hdisplay << 16, vdisplay << 16);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -11148,7 +11155,6 @@ done:
if (ret && crtc->enabled)
crtc->mode = *saved_mode;
- kfree(pipe_config);
kfree(saved_mode);
return ret;
}
@@ -11156,7 +11162,7 @@ done:
static int intel_set_mode_pipes(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
@@ -11176,7 +11182,7 @@ static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
- struct intel_crtc_config *pipe_config;
+ struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
pipe_config = intel_modeset_compute_config(crtc, mode, fb,
@@ -11271,7 +11277,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
crtc->new_enabled = config->save_crtc_enabled[count++];
if (crtc->new_enabled)
- crtc->new_config = &crtc->config;
+ crtc->new_config = crtc->config;
else
crtc->new_config = NULL;
}
@@ -11483,7 +11489,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
if (crtc->new_enabled)
- crtc->new_config = &crtc->config;
+ crtc->new_config = crtc->config;
else
crtc->new_config = NULL;
}
@@ -11520,7 +11526,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
struct drm_device *dev;
struct drm_mode_set save_set;
struct intel_set_config *config;
- struct intel_crtc_config *pipe_config;
+ struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret;
@@ -11577,7 +11583,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
goto fail;
} else if (pipe_config) {
if (pipe_config->has_audio !=
- to_intel_crtc(set->crtc)->config.has_audio)
+ to_intel_crtc(set->crtc)->config->has_audio)
config->mode_changed = true;
/*
@@ -11601,11 +11607,14 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
disable_pipes);
} else if (config->fb_changed) {
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+ struct drm_plane *primary = set->crtc->primary;
+ int vdisplay, hdisplay;
- intel_crtc_wait_for_pending_flips(set->crtc);
-
- ret = intel_pipe_set_base(set->crtc,
- set->x, set->y, set->fb);
+ drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
+ ret = primary->funcs->update_plane(primary, set->crtc, set->fb,
+ 0, 0, hdisplay, vdisplay,
+ set->x << 16, set->y << 16,
+ hdisplay << 16, vdisplay << 16);
/*
* We need to make sure the primary plane is re-enabled if it
@@ -11660,6 +11669,8 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
+ .atomic_duplicate_state = intel_crtc_duplicate_state,
+ .atomic_destroy_state = intel_crtc_destroy_state,
};
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -11762,93 +11773,149 @@ static void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
}
-static int
-intel_primary_plane_disable(struct drm_plane *plane)
+/**
+ * intel_prepare_plane_fb - Prepare fb for usage on plane
+ * @plane: drm plane to prepare for
+ * @fb: framebuffer to prepare for presentation
+ *
+ * Prepares a framebuffer for usage on a display plane. Generally this
+ * involves pinning the underlying object and updating the frontbuffer tracking
+ * bits. Some older platforms need special physical address handling for
+ * cursor planes.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int
+intel_prepare_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = plane->dev;
- struct intel_crtc *intel_crtc;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_plane->pipe;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ unsigned frontbuffer_bits = 0;
+ int ret = 0;
- if (!plane->fb)
+ if (!obj)
return 0;
- BUG_ON(!plane->crtc);
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
+ break;
+ }
- intel_crtc = to_intel_crtc(plane->crtc);
+ mutex_lock(&dev->struct_mutex);
- /*
- * Even though we checked plane->fb above, it's still possible that
- * the primary plane has been implicitly disabled because the crtc
- * coordinates given weren't visible, or because we detected
- * that it was 100% covered by a sprite plane. Or, the CRTC may be
- * off and we've set a fb, but haven't actually turned on the CRTC yet.
- * In either case, we need to unpin the FB and let the fb pointer get
- * updated, but otherwise we don't need to touch the hardware.
- */
- if (!intel_crtc->primary_enabled)
- goto disable_unpin;
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ INTEL_INFO(dev)->cursor_needs_physical) {
+ int align = IS_I830(dev) ? 16 * 1024 : 256;
+ ret = i915_gem_object_attach_phys(obj, align);
+ if (ret)
+ DRM_DEBUG_KMS("failed to attach phys object\n");
+ } else {
+ ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ }
- intel_crtc_wait_for_pending_flips(plane->crtc);
- intel_disable_primary_hw_plane(plane, plane->crtc);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
-disable_unpin:
- mutex_lock(&dev->struct_mutex);
- i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
- intel_unpin_fb_obj(intel_fb_obj(plane->fb));
mutex_unlock(&dev->struct_mutex);
- plane->fb = NULL;
- return 0;
+ return ret;
+}
+
+/**
+ * intel_cleanup_plane_fb - Cleans up an fb after plane use
+ * @plane: drm plane to clean up for
+ * @fb: old framebuffer that was on plane
+ *
+ * Cleans up a framebuffer that has just been removed from a plane.
+ */
+void
+intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+
+ if (WARN_ON(!obj))
+ return;
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR ||
+ !INTEL_INFO(dev)->cursor_needs_physical) {
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
static int
intel_check_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
+ int ret;
- return drm_plane_helper_check_update(plane, crtc, fb,
- src, dest, clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true, &state->visible);
-}
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
-static int
-intel_prepare_primary_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
-{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
- int ret;
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ src, dest, clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &state->visible);
+ if (ret)
+ return ret;
- intel_crtc_wait_for_pending_flips(crtc);
+ if (intel_crtc->active) {
+ intel_crtc->atomic.wait_for_flips = true;
- if (intel_crtc_has_pending_flip(crtc)) {
- DRM_ERROR("pipe is still busy with an old pageflip\n");
- return -EBUSY;
- }
+ /*
+ * FBC does not work on some platforms for rotated
+ * planes, so disable it when rotation is not 0 and
+ * update it when rotation is set back to 0.
+ *
+ * FIXME: This is redundant with the fbc update done in
+ * the primary plane enable function except that that
+ * one is done too late. We eventually need to unify
+ * this.
+ */
+ if (intel_crtc->primary_enabled &&
+ INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ dev_priv->fbc.plane == intel_crtc->plane &&
+ state->base.rotation != BIT(DRM_ROTATE_0)) {
+ intel_crtc->atomic.disable_fbc = true;
+ }
- if (old_obj != obj) {
- mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
- if (ret == 0)
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(pipe));
- mutex_unlock(&dev->struct_mutex);
- if (ret != 0) {
- DRM_DEBUG_KMS("pin & fence failed\n");
- return ret;
+ if (state->visible) {
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev) && !intel_crtc->primary_enabled)
+ intel_crtc->atomic.wait_vblank = true;
}
+
+ intel_crtc->atomic.fb_bits |=
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
+
+ intel_crtc->atomic.update_fbc = true;
}
return 0;
@@ -11858,53 +11925,26 @@ static void
intel_commit_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
- struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct drm_framebuffer *fb = state->base.fb;
+ struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *old_fb = plane->fb;
+ struct intel_crtc *intel_crtc;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_rect *src = &state->src;
- crtc->primary->fb = fb;
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
+
+ plane->fb = fb;
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
- intel_plane->crtc_x = state->orig_dst.x1;
- intel_plane->crtc_y = state->orig_dst.y1;
- intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
- intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
- intel_plane->src_x = state->orig_src.x1;
- intel_plane->src_y = state->orig_src.y1;
- intel_plane->src_w = drm_rect_width(&state->orig_src);
- intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
if (intel_crtc->active) {
- /*
- * FBC does not work on some platforms for rotated
- * planes, so disable it when rotation is not 0 and
- * update it when rotation is set back to 0.
- *
- * FIXME: This is redundant with the fbc update done in
- * the primary plane enable function except that that
- * one is done too late. We eventually need to unify
- * this.
- */
- if (intel_crtc->primary_enabled &&
- INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.plane == intel_crtc->plane &&
- intel_plane->rotation != BIT(DRM_ROTATE_0)) {
- intel_disable_fbc(dev);
- }
-
if (state->visible) {
- bool was_enabled = intel_crtc->primary_enabled;
-
/* FIXME: kill this fastboot hack */
intel_update_pipe_size(intel_crtc);
@@ -11912,14 +11952,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
dev_priv->display.update_primary_plane(crtc, plane->fb,
crtc->x, crtc->y);
-
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
- if (IS_BROADWELL(dev) && !was_enabled)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
} else {
/*
* If clipping results in a non-visible primary plane,
@@ -11930,90 +11962,129 @@ intel_commit_primary_plane(struct drm_plane *plane,
*/
intel_disable_primary_hw_plane(plane, crtc);
}
+ }
+}
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+static void intel_begin_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane;
+ struct drm_plane *p;
+ unsigned fb_bits = 0;
+
+ /* Track fb's for any planes being disabled */
+ list_for_each_entry(p, &dev->mode_config.plane_list, head) {
+ intel_plane = to_intel_plane(p);
+
+ if (intel_crtc->atomic.disabled_planes &
+ (1 << drm_plane_index(p))) {
+ switch (p->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe);
+ break;
+ }
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
- if (old_fb && old_fb != fb) {
- if (intel_crtc->active)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ if (intel_crtc->atomic.wait_for_flips)
+ intel_crtc_wait_for_pending_flips(crtc);
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (intel_crtc->atomic.disable_fbc)
+ intel_fbc_disable(dev);
+
+ if (intel_crtc->atomic.pre_disable_primary)
+ intel_pre_disable_primary(crtc);
+
+ if (intel_crtc->atomic.update_wm)
+ intel_update_watermarks(crtc);
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* Perform vblank evasion around commit operation */
+ if (intel_crtc->active)
+ intel_crtc->atomic.evade =
+ intel_pipe_update_start(intel_crtc,
+ &intel_crtc->atomic.start_vbl_count);
}
-static int
-intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static void intel_finish_crtc_commit(struct drm_crtc *crtc)
{
- struct intel_plane_state state;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int ret;
+ struct drm_plane *p;
- state.crtc = crtc;
- state.fb = fb;
+ if (intel_crtc->atomic.evade)
+ intel_pipe_update_end(intel_crtc,
+ intel_crtc->atomic.start_vbl_count);
- /* sample coordinates in 16.16 fixed point */
- state.src.x1 = src_x;
- state.src.x2 = src_x + src_w;
- state.src.y1 = src_y;
- state.src.y2 = src_y + src_h;
-
- /* integer pixels */
- state.dst.x1 = crtc_x;
- state.dst.x2 = crtc_x + crtc_w;
- state.dst.y1 = crtc_y;
- state.dst.y2 = crtc_y + crtc_h;
+ intel_runtime_pm_put(dev_priv);
- state.clip.x1 = 0;
- state.clip.y1 = 0;
- state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
- state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+ if (intel_crtc->atomic.wait_vblank)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
- state.orig_src = state.src;
- state.orig_dst = state.dst;
+ intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits);
- ret = intel_check_primary_plane(plane, &state);
- if (ret)
- return ret;
+ if (intel_crtc->atomic.update_fbc) {
+ mutex_lock(&dev->struct_mutex);
+ intel_fbc_update(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
- ret = intel_prepare_primary_plane(plane, &state);
- if (ret)
- return ret;
+ if (intel_crtc->atomic.post_enable_primary)
+ intel_post_enable_primary(crtc);
- intel_commit_primary_plane(plane, &state);
+ drm_for_each_legacy_plane(p, &dev->mode_config.plane_list)
+ if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p))
+ intel_update_sprite_watermarks(p, crtc, 0, 0, 0,
+ false, false);
- return 0;
+ memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic));
}
-/* Common destruction function for both primary and cursor planes */
-static void intel_plane_destroy(struct drm_plane *plane)
+/**
+ * intel_plane_destroy - destroy a plane
+ * @plane: plane to destroy
+ *
+ * Common destruction function for all types of planes (primary, cursor,
+ * sprite).
+ */
+void intel_plane_destroy(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
drm_plane_cleanup(plane);
kfree(intel_plane);
}
-static const struct drm_plane_funcs intel_primary_plane_funcs = {
- .update_plane = intel_primary_plane_setplane,
- .disable_plane = intel_primary_plane_disable,
+const struct drm_plane_funcs intel_plane_funcs = {
+ .update_plane = drm_plane_helper_update,
+ .disable_plane = drm_plane_helper_disable,
.destroy = intel_plane_destroy,
- .set_property = intel_plane_set_property
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+
};
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
int pipe)
{
struct intel_plane *primary;
+ struct intel_plane_state *state;
const uint32_t *intel_primary_formats;
int num_formats;
@@ -12021,11 +12092,19 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
if (primary == NULL)
return NULL;
+ state = intel_create_plane_state(&primary->base);
+ if (!state) {
+ kfree(primary);
+ return NULL;
+ }
+ primary->base.state = &state->base;
+
primary->can_scale = false;
primary->max_downscale = 1;
primary->pipe = pipe;
primary->plane = pipe;
- primary->rotation = BIT(DRM_ROTATE_0);
+ primary->check_plane = intel_check_primary_plane;
+ primary->commit_plane = intel_commit_primary_plane;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
@@ -12038,7 +12117,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
}
drm_universal_plane_init(dev, &primary->base, 0,
- &intel_primary_plane_funcs,
+ &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY);
@@ -12051,38 +12130,32 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
if (dev->mode_config.rotation_property)
drm_object_attach_property(&primary->base.base,
dev->mode_config.rotation_property,
- primary->rotation);
+ state->base.rotation);
}
- return &primary->base;
-}
-
-static int
-intel_cursor_plane_disable(struct drm_plane *plane)
-{
- if (!plane->fb)
- return 0;
-
- BUG_ON(!plane->crtc);
+ drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
- return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
+ return &primary->base;
}
static int
intel_check_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct drm_device *dev = plane->dev;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- int crtc_w, crtc_h;
+ struct intel_crtc *intel_crtc;
unsigned stride;
int ret;
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
+
ret = drm_plane_helper_check_update(plane, crtc, fb,
src, dest, clip,
DRM_PLANE_HELPER_NO_SCALING,
@@ -12094,18 +12167,17 @@ intel_check_cursor_plane(struct drm_plane *plane,
/* if we want to turn off the cursor ignore width and height */
if (!obj)
- return 0;
+ goto finish;
/* Check for which cursor types we support */
- crtc_w = drm_rect_width(&state->orig_dst);
- crtc_h = drm_rect_height(&state->orig_dst);
- if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
- DRM_DEBUG("Cursor dimension not supported\n");
+ if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) {
+ DRM_DEBUG("Cursor dimension %dx%d not supported\n",
+ state->base.crtc_w, state->base.crtc_h);
return -EINVAL;
}
- stride = roundup_pow_of_two(crtc_w) * 4;
- if (obj->base.size < stride * crtc_h) {
+ stride = roundup_pow_of_two(state->base.crtc_w) * 4;
+ if (obj->base.size < stride * state->base.crtc_h) {
DRM_DEBUG_KMS("buffer is too small\n");
return -ENOMEM;
}
@@ -12121,113 +12193,84 @@ intel_check_cursor_plane(struct drm_plane *plane,
}
mutex_unlock(&dev->struct_mutex);
+finish:
+ if (intel_crtc->active) {
+ if (intel_crtc->cursor_width != state->base.crtc_w)
+ intel_crtc->atomic.update_wm = true;
+
+ intel_crtc->atomic.fb_bits |=
+ INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe);
+ }
+
return ret;
}
-static int
+static void
intel_commit_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_crtc *crtc = state->base.crtc;
+ struct drm_device *dev = plane->dev;
+ struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- int crtc_w, crtc_h;
-
- crtc->cursor_x = state->orig_dst.x1;
- crtc->cursor_y = state->orig_dst.y1;
-
- intel_plane->crtc_x = state->orig_dst.x1;
- intel_plane->crtc_y = state->orig_dst.y1;
- intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
- intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
- intel_plane->src_x = state->orig_src.x1;
- intel_plane->src_y = state->orig_src.y1;
- intel_plane->src_w = drm_rect_width(&state->orig_src);
- intel_plane->src_h = drm_rect_height(&state->orig_src);
- intel_plane->obj = obj;
-
- if (fb != crtc->cursor->fb) {
- crtc_w = drm_rect_width(&state->orig_dst);
- crtc_h = drm_rect_height(&state->orig_dst);
- return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
- } else {
- intel_crtc_update_cursor(crtc, state->visible);
-
- intel_frontbuffer_flip(crtc->dev,
- INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
-
- return 0;
- }
-}
-
-static int
-intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane_state state;
- int ret;
+ struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
+ uint32_t addr;
- state.crtc = crtc;
- state.fb = fb;
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
- /* sample coordinates in 16.16 fixed point */
- state.src.x1 = src_x;
- state.src.x2 = src_x + src_w;
- state.src.y1 = src_y;
- state.src.y2 = src_y + src_h;
+ plane->fb = state->base.fb;
+ crtc->cursor_x = state->base.crtc_x;
+ crtc->cursor_y = state->base.crtc_y;
- /* integer pixels */
- state.dst.x1 = crtc_x;
- state.dst.x2 = crtc_x + crtc_w;
- state.dst.y1 = crtc_y;
- state.dst.y2 = crtc_y + crtc_h;
+ intel_plane->obj = obj;
- state.clip.x1 = 0;
- state.clip.y1 = 0;
- state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
- state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+ if (intel_crtc->cursor_bo == obj)
+ goto update;
- state.orig_src = state.src;
- state.orig_dst = state.dst;
+ if (!obj)
+ addr = 0;
+ else if (!INTEL_INFO(dev)->cursor_needs_physical)
+ addr = i915_gem_obj_ggtt_offset(obj);
+ else
+ addr = obj->phys_handle->busaddr;
- ret = intel_check_cursor_plane(plane, &state);
- if (ret)
- return ret;
+ intel_crtc->cursor_addr = addr;
+ intel_crtc->cursor_bo = obj;
+update:
+ intel_crtc->cursor_width = state->base.crtc_w;
+ intel_crtc->cursor_height = state->base.crtc_h;
- return intel_commit_cursor_plane(plane, &state);
+ if (intel_crtc->active)
+ intel_crtc_update_cursor(crtc, state->visible);
}
-static const struct drm_plane_funcs intel_cursor_plane_funcs = {
- .update_plane = intel_cursor_plane_update,
- .disable_plane = intel_cursor_plane_disable,
- .destroy = intel_plane_destroy,
- .set_property = intel_plane_set_property,
-};
-
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
int pipe)
{
struct intel_plane *cursor;
+ struct intel_plane_state *state;
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
if (cursor == NULL)
return NULL;
+ state = intel_create_plane_state(&cursor->base);
+ if (!state) {
+ kfree(cursor);
+ return NULL;
+ }
+ cursor->base.state = &state->base;
+
cursor->can_scale = false;
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
- cursor->rotation = BIT(DRM_ROTATE_0);
+ cursor->check_plane = intel_check_cursor_plane;
+ cursor->commit_plane = intel_commit_cursor_plane;
drm_universal_plane_init(dev, &cursor->base, 0,
- &intel_cursor_plane_funcs,
+ &intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
@@ -12241,9 +12284,11 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
if (dev->mode_config.rotation_property)
drm_object_attach_property(&cursor->base.base,
dev->mode_config.rotation_property,
- cursor->rotation);
+ state->base.rotation);
}
+ drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+
return &cursor->base;
}
@@ -12251,6 +12296,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *crtc_state = NULL;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
int i, ret;
@@ -12259,6 +12305,11 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
if (intel_crtc == NULL)
return;
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ if (!crtc_state)
+ goto fail;
+ intel_crtc_set_state(intel_crtc, crtc_state);
+
primary = intel_primary_plane_create(dev, pipe);
if (!primary)
goto fail;
@@ -12311,6 +12362,7 @@ fail:
drm_plane_cleanup(primary);
if (cursor)
drm_plane_cleanup(cursor);
+ kfree(crtc_state);
kfree(intel_crtc);
}
@@ -12383,28 +12435,6 @@ static bool has_edp_a(struct drm_device *dev)
return true;
}
-const char *intel_output_name(int output)
-{
- static const char *names[] = {
- [INTEL_OUTPUT_UNUSED] = "Unused",
- [INTEL_OUTPUT_ANALOG] = "Analog",
- [INTEL_OUTPUT_DVO] = "DVO",
- [INTEL_OUTPUT_SDVO] = "SDVO",
- [INTEL_OUTPUT_LVDS] = "LVDS",
- [INTEL_OUTPUT_TVOUT] = "TV",
- [INTEL_OUTPUT_HDMI] = "HDMI",
- [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
- [INTEL_OUTPUT_EDP] = "eDP",
- [INTEL_OUTPUT_DSI] = "DSI",
- [INTEL_OUTPUT_UNKNOWN] = "Unknown",
- };
-
- if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
- return "Invalid";
-
- return names[output];
-}
-
static bool intel_crt_present(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -12428,6 +12458,7 @@ static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
+ struct drm_connector *connector;
bool dpd_is_edp = false;
intel_lvds_init(dev);
@@ -12491,14 +12522,16 @@ static void intel_setup_outputs(struct drm_device *dev)
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
*/
- if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED &&
+ !intel_dp_is_edp(dev, PORT_B))
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
intel_dp_is_edp(dev, PORT_B))
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
- if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED &&
+ !intel_dp_is_edp(dev, PORT_C))
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
@@ -12556,6 +12589,37 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
+ /*
+ * FIXME: We don't have full atomic support yet, but we want to be
+ * able to enable/test plane updates via the atomic interface in the
+ * meantime. However as soon as we flip DRIVER_ATOMIC on, the DRM core
+ * will take some atomic codepaths to lookup properties during
+ * drmModeGetConnector() that unconditionally dereference
+ * connector->state.
+ *
+ * We create a dummy connector state here for each connector to ensure
+ * the DRM core doesn't try to dereference a NULL connector->state.
+ * The actual connector properties will never be updated or contain
+ * useful information, but since we're doing this specifically for
+ * testing/debug of the plane operations (and only when a specific
+ * kernel module option is given), that shouldn't really matter.
+ *
+ * Once atomic support for crtc's + connectors lands, this loop should
+ * be removed since we'll be setting up real connector state, which
+ * will contain Intel-specific properties.
+ */
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ head) {
+ if (!WARN_ON(connector->state)) {
+ connector->state =
+ kzalloc(sizeof(*connector->state),
+ GFP_KERNEL);
+ }
+ }
+ }
+
intel_psr_init(dev);
for_each_intel_encoder(dev, encoder) {
@@ -12696,8 +12760,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
- aligned_height = intel_align_height(dev, mode_cmd->height,
- obj->tiling_mode);
+ aligned_height = intel_fb_align_height(dev, mode_cmd->height,
+ obj->tiling_mode);
/* FIXME drm helper for size checks (especially planar formats)? */
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
return -EINVAL;
@@ -12739,6 +12803,8 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fbdev_output_poll_changed,
+ .atomic_check = intel_atomic_check,
+ .atomic_commit = intel_atomic_commit,
};
/* Set up chip specific display functions */
@@ -12757,23 +12823,32 @@ static void intel_init_display(struct drm_device *dev)
else
dev_priv->display.find_dpll = i9xx_find_best_dpll;
- if (HAS_DDI(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
- dev_priv->display.get_plane_config = ironlake_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ skylake_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
- if (INTEL_INFO(dev)->gen >= 9)
- dev_priv->display.update_primary_plane =
- skylake_update_primary_plane;
- else
- dev_priv->display.update_primary_plane =
- ironlake_update_primary_plane;
+ dev_priv->display.update_primary_plane =
+ skylake_update_primary_plane;
+ } else if (HAS_DDI(dev)) {
+ dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ ironlake_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock =
+ haswell_crtc_compute_clock;
+ dev_priv->display.crtc_enable = haswell_crtc_enable;
+ dev_priv->display.crtc_disable = haswell_crtc_disable;
+ dev_priv->display.off = ironlake_crtc_off;
+ dev_priv->display.update_primary_plane =
+ ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
- dev_priv->display.get_plane_config = ironlake_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ ironlake_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
@@ -12783,7 +12858,8 @@ static void intel_init_display(struct drm_device *dev)
ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_plane_config = i9xx_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -12792,7 +12868,8 @@ static void intel_init_display(struct drm_device *dev)
i9xx_update_primary_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_plane_config = i9xx_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -13147,7 +13224,7 @@ void intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev);
/* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, false);
@@ -13164,8 +13241,8 @@ void intel_modeset_init(struct drm_device *dev)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- if (dev_priv->display.get_plane_config) {
- dev_priv->display.get_plane_config(crtc,
+ if (dev_priv->display.get_initial_plane_config) {
+ dev_priv->display.get_initial_plane_config(crtc,
&crtc->plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -13229,7 +13306,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
u32 reg;
/* Clear any frame start delays used for debugging left by the BIOS */
- reg = PIPECONF(crtc->config.cpu_transcoder);
+ reg = PIPECONF(crtc->config->cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* restore vblank interrupts to correct state */
@@ -13433,12 +13510,12 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
int i;
for_each_intel_crtc(dev, crtc) {
- memset(&crtc->config, 0, sizeof(crtc->config));
+ memset(crtc->config, 0, sizeof(*crtc->config));
- crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+ crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
crtc->active = dev_priv->display.get_pipe_config(crtc,
- &crtc->config);
+ crtc->config);
crtc->base.enabled = crtc->active;
crtc->primary_enabled = primary_get_hw_state(crtc);
@@ -13475,7 +13552,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
- encoder->get_config(encoder, &crtc->config);
+ encoder->get_config(encoder, crtc->config);
} else {
encoder->base.crtc = NULL;
}
@@ -13525,7 +13602,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
*/
for_each_intel_crtc(dev, crtc) {
if (crtc->active && i915.fastboot) {
- intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
+ intel_mode_from_pipe_config(&crtc->base.mode,
+ crtc->config);
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
crtc->base.base.id);
drm_mode_debug_printmodeline(&crtc->base.mode);
@@ -13540,7 +13618,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
intel_sanitize_crtc(crtc);
- intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
+ intel_dump_pipe_config(crtc, crtc->config,
+ "[setup_hw_state]");
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
@@ -13664,7 +13743,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_unregister_dsm_handler();
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5cecc20efa71..a74aaf9242b9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -31,6 +31,7 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@@ -1074,7 +1075,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
-skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
{
u32 ctrl1;
@@ -1101,7 +1102,7 @@ skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
}
static void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
{
switch (link_bw) {
case DP_LINK_BW_1_62:
@@ -1118,7 +1119,7 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
static void
intel_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config, int link_bw)
+ struct intel_crtc_state *pipe_config, int link_bw)
{
struct drm_device *dev = encoder->base.dev;
const struct dp_link_dpll *divisor = NULL;
@@ -1151,11 +1152,11 @@ intel_dp_set_clock(struct intel_encoder *encoder,
bool
intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *intel_crtc = encoder->new_crtc;
@@ -1269,7 +1270,7 @@ found:
&pipe_config->dp_m_n);
if (intel_connector->panel.downclock_mode != NULL &&
- intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
+ dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
pipe_config->has_drrs = true;
intel_link_compute_m_n(bpp, lane_count,
intel_connector->panel.downclock_mode->clock,
@@ -1295,11 +1296,12 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
- DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
+ crtc->config->port_clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
- if (crtc->config.port_clock == 162000) {
+ if (crtc->config->port_clock == 162000) {
/* For a long time we've carried around a ILK-DevA w/a for the
* 160MHz clock. If we're really unlucky, it's still required.
*/
@@ -1324,7 +1326,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
/*
* There are four kinds of DP registers:
@@ -1352,7 +1354,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
- if (crtc->config.has_audio)
+ if (crtc->config->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
/* Split out the IBX/CPU vs CPT settings */
@@ -1558,7 +1560,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp);
- WARN(!vdd, "eDP port %c VDD already requested on\n",
+ I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->port));
}
@@ -1642,7 +1644,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!is_edp(intel_dp))
return;
- WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
+ I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
port_name(dp_to_dig_port(intel_dp)->port));
intel_dp->want_panel_vdd = false;
@@ -2013,7 +2015,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dp_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
@@ -2050,7 +2052,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
tmp & DP_COLOR_RANGE_16_235)
@@ -2073,7 +2075,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
@@ -2102,9 +2104,12 @@ static void intel_disable_dp(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- if (crtc->config.has_audio)
+ if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
+ if (HAS_PSR(dev) && !HAS_DDI(dev))
+ intel_psr_disable(intel_dp);
+
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
intel_edp_panel_vdd_on(intel_dp);
@@ -2309,7 +2314,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
- if (crtc->config.has_audio) {
+ if (crtc->config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder);
@@ -2329,6 +2334,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(intel_dp);
+ intel_psr_enable(intel_dp);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@ -3515,8 +3521,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev)))
@@ -3541,8 +3545,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
-
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
* corresponding HDMI output on transcoder A.
@@ -3553,18 +3555,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
*/
DP &= ~DP_PIPEB_SELECT;
I915_WRITE(intel_dp->output_reg, DP);
-
- /* Changes to enable or select take place the vblank
- * after being written.
- */
- if (WARN_ON(crtc == NULL)) {
- /* We should never try to disable a port without a crtc
- * attached. For paranoia keep the code around for a
- * bit. */
- POSTING_READ(intel_dp->output_reg);
- msleep(50);
- } else
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ POSTING_READ(intel_dp->output_reg);
}
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
@@ -3769,7 +3760,7 @@ go_again:
intel_dp_stop_link_train(intel_dp);
}
- DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ DRM_DEBUG_KMS("got esi %3ph\n", esi);
ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
if (handled) {
@@ -3785,7 +3776,7 @@ go_again:
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
if (bret == true) {
- DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
goto go_again;
}
} else
@@ -4306,7 +4297,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_mst_encoder_cleanup(intel_dig_port);
- drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
@@ -4322,6 +4312,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
intel_dp->edp_notifier.notifier_call = NULL;
}
}
+ drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}
@@ -4396,7 +4387,9 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -4416,7 +4409,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
return;
}
-bool
+enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -4424,7 +4417,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
- bool ret = true;
+ enum irqreturn ret = IRQ_NONE;
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
@@ -4438,7 +4431,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
*/
DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
port_name(intel_dig_port->port));
- return false;
+ return IRQ_HANDLED;
}
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
@@ -4483,7 +4476,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
- ret = false;
+
+ ret = IRQ_HANDLED;
+
goto put_power;
mst_fail:
/* if we were in MST mode, and device is not there get out of MST mode */
@@ -4741,39 +4736,34 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_READ(pp_div_reg));
}
-void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct intel_dp *intel_dp = NULL;
- struct intel_crtc_config *config = NULL;
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_dp *intel_dp = dev_priv->drrs.dp;
+ struct intel_crtc_state *config = NULL;
struct intel_crtc *intel_crtc = NULL;
- struct intel_connector *intel_connector = dev_priv->drrs.connector;
u32 reg, val;
- enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
+ enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
return;
}
- if (intel_connector == NULL) {
- DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
+ if (intel_dp == NULL) {
+ DRM_DEBUG_KMS("DRRS not supported.\n");
return;
}
/*
- * FIXME: This needs proper synchronization with psr state. But really
- * hard to tell without seeing the user of this function of this code.
- * Check locking and ordering once that lands.
+ * FIXME: This needs proper synchronization with psr state for some
+ * platforms that cannot have PSR and DRRS enabled at the same time.
*/
- if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
- DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
- return;
- }
- encoder = intel_attached_encoder(&intel_connector->base);
- intel_dp = enc_to_intel_dp(&encoder->base);
+ dig_port = dp_to_dig_port(intel_dp);
+ encoder = &dig_port->base;
intel_crtc = encoder->new_crtc;
if (!intel_crtc) {
@@ -4781,17 +4771,18 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
- config = &intel_crtc->config;
+ config = intel_crtc->config;
- if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
+ if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
return;
}
- if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
+ if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
+ refresh_rate)
index = DRRS_LOW_RR;
- if (index == intel_dp->drrs_state.refresh_rate_type) {
+ if (index == dev_priv->drrs.refresh_rate_type) {
DRM_DEBUG_KMS(
"DRRS requested for previously set RR...ignoring\n");
return;
@@ -4803,7 +4794,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
}
if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
- reg = PIPECONF(intel_crtc->config.cpu_transcoder);
+ reg = PIPECONF(intel_crtc->config->cpu_transcoder);
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
val |= PIPECONF_EDP_RR_MODE_SWITCH;
@@ -4814,30 +4805,154 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
I915_WRITE(reg, val);
}
+ dev_priv->drrs.refresh_rate_type = index;
+
+ DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+}
+
+void intel_edp_drrs_enable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->config->has_drrs) {
+ DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ if (WARN_ON(dev_priv->drrs.dp)) {
+ DRM_ERROR("DRRS already enabled\n");
+ goto unlock;
+ }
+
+ dev_priv->drrs.busy_frontbuffer_bits = 0;
+
+ dev_priv->drrs.dp = intel_dp;
+
+unlock:
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+void intel_edp_drrs_disable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->config->has_drrs)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
+ if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+ intel_dp_set_drrs_state(dev_priv->dev,
+ intel_dp->attached_connector->panel.
+ fixed_mode->vrefresh);
+
+ dev_priv->drrs.dp = NULL;
+ mutex_unlock(&dev_priv->drrs.mutex);
+
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+}
+
+static void intel_edp_drrs_downclock_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), drrs.work.work);
+ struct intel_dp *intel_dp;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ intel_dp = dev_priv->drrs.dp;
+
+ if (!intel_dp)
+ goto unlock;
+
/*
- * mutex taken to ensure that there is no race between differnt
- * drrs calls trying to update refresh rate. This scenario may occur
- * in future when idleness detection based DRRS in kernel and
- * possible calls from user space to set differnt RR are made.
+ * The delayed work can race with an invalidate hence we need to
+ * recheck.
*/
- mutex_lock(&intel_dp->drrs_state.mutex);
+ if (dev_priv->drrs.busy_frontbuffer_bits)
+ goto unlock;
- intel_dp->drrs_state.refresh_rate_type = index;
+ if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
+ intel_dp_set_drrs_state(dev_priv->dev,
+ intel_dp->attached_connector->panel.
+ downclock_mode->vrefresh);
- mutex_unlock(&intel_dp->drrs_state.mutex);
+unlock:
- DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+void intel_edp_drrs_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ if (!dev_priv->drrs.dp)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+ intel_dp_set_drrs_state(dev_priv->dev,
+ dev_priv->drrs.dp->attached_connector->panel.
+ fixed_mode->vrefresh);
+ }
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+ dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+void intel_edp_drrs_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ if (!dev_priv->drrs.dp)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+
+ if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
+ !dev_priv->drrs.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->drrs.work,
+ msecs_to_jiffies(1000));
+ mutex_unlock(&dev_priv->drrs.mutex);
}
static struct drm_display_mode *
-intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector,
- struct drm_display_mode *fixed_mode)
+intel_dp_drrs_init(struct intel_connector *intel_connector,
+ struct drm_display_mode *fixed_mode)
{
struct drm_connector *connector = &intel_connector->base;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *downclock_mode = NULL;
@@ -4859,13 +4974,13 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
return NULL;
}
- dev_priv->drrs.connector = intel_connector;
+ INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
- mutex_init(&intel_dp->drrs_state.mutex);
+ mutex_init(&dev_priv->drrs.mutex);
- intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
+ dev_priv->drrs.type = dev_priv->vbt.drrs_type;
- intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
+ dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
@@ -4885,7 +5000,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
- intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
+ dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
if (!is_edp(intel_dp))
return true;
@@ -4934,7 +5049,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
downclock_mode = intel_dp_drrs_init(
- intel_dig_port,
intel_connector, fixed_mode);
break;
}
@@ -5086,7 +5200,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp, intel_connector);
/* init MST on ports that can support it */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
if (port == PORT_B || port == PORT_C || port == PORT_D) {
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7f8c6a66680a..9f67a379a9a5 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -26,11 +26,12 @@
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -38,7 +39,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
int bpp;
int lane_count, slots;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_connector *found = NULL, *intel_connector;
int mst_pbn;
@@ -157,7 +158,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder);
- I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
+ I915_WRITE(PORT_CLK_SEL(port),
+ intel_crtc->config->ddi_pll_sel);
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
@@ -170,7 +172,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
}
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
- intel_mst->port, intel_crtc->config.pbn, &slots);
+ intel_mst->port,
+ intel_crtc->config->pbn, &slots);
if (ret == false) {
DRM_ERROR("failed to allocate vcpi\n");
return;
@@ -216,14 +219,14 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
pipe_config->has_dp_encoder = true;
@@ -254,7 +257,7 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
default:
break;
}
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
intel_dp_get_m_n(crtc, pipe_config);
intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
@@ -311,7 +314,9 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_mst_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 25fdbb16d4e0..eef79ccd0b7c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -143,7 +143,7 @@ struct intel_encoder {
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
- struct intel_crtc_config *);
+ struct intel_crtc_state *);
void (*pre_pll_enable)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
@@ -159,7 +159,7 @@ struct intel_encoder {
* pre-filled the pipe config. Note that intel_encoder->base.crtc must
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
@@ -244,23 +244,28 @@ typedef struct dpll {
} intel_clock_t;
struct intel_plane_state {
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
+ struct drm_plane_state base;
struct drm_rect src;
struct drm_rect dst;
struct drm_rect clip;
- struct drm_rect orig_src;
- struct drm_rect orig_dst;
bool visible;
+
+ /*
+ * used only for sprite planes to determine when to implicitly
+ * enable/disable the primary plane
+ */
+ bool hides_primary;
};
-struct intel_plane_config {
- bool tiled;
+struct intel_initial_plane_config {
+ unsigned int tiling;
int size;
u32 base;
};
-struct intel_crtc_config {
+struct intel_crtc_state {
+ struct drm_crtc_state base;
+
/**
* quirks - bitfield with hw state readout quirks
*
@@ -273,16 +278,6 @@ struct intel_crtc_config {
#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
- /* User requested mode, only valid as a starting point to
- * compute adjusted_mode, except in the case of (S)DVO where
- * it's also for the output timings of the (S)DVO chip.
- * adjusted_mode will then correspond to the S(DVO) chip's
- * preferred input timings. */
- struct drm_display_mode requested_mode;
- /* Actual pipe timings ie. what we program into the pipe timing
- * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
- struct drm_display_mode adjusted_mode;
-
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
* and get clipped at the edges. */
@@ -406,8 +401,7 @@ struct intel_pipe_wm {
};
struct intel_mmio_flip {
- u32 seqno;
- struct intel_engine_cs *ring;
+ struct drm_i915_gem_request *req;
struct work_struct work;
};
@@ -417,6 +411,32 @@ struct skl_pipe_wm {
uint32_t linetime;
};
+/*
+ * Tracking of operations that need to be performed at the beginning/end of an
+ * atomic commit, outside the atomic section where interrupts are disabled.
+ * These are generally operations that grab mutexes or might otherwise sleep
+ * and thus can't be run with interrupts disabled.
+ */
+struct intel_crtc_atomic_commit {
+ /* vblank evasion */
+ bool evade;
+ unsigned start_vbl_count;
+
+ /* Sleepable operations to perform before commit */
+ bool wait_for_flips;
+ bool disable_fbc;
+ bool pre_disable_primary;
+ bool update_wm;
+ unsigned disabled_planes;
+
+ /* Sleepable operations to perform after commit */
+ unsigned fb_bits;
+ bool wait_vblank;
+ bool update_fbc;
+ bool post_enable_primary;
+ unsigned update_sprite_watermarks;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -448,9 +468,9 @@ struct intel_crtc {
uint32_t cursor_size;
uint32_t cursor_base;
- struct intel_plane_config plane_config;
- struct intel_crtc_config config;
- struct intel_crtc_config *new_config;
+ struct intel_initial_plane_config plane_config;
+ struct intel_crtc_state *config;
+ struct intel_crtc_state *new_config;
bool new_enabled;
/* reset counter value when the last flip was submitted */
@@ -470,6 +490,8 @@ struct intel_crtc {
int scanline_offset;
struct intel_mmio_flip mmio_flip;
+
+ struct intel_crtc_atomic_commit atomic;
};
struct intel_plane_wm_parameters {
@@ -487,11 +509,6 @@ struct intel_plane {
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y;
- uint32_t src_w, src_h;
- unsigned int rotation;
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
@@ -500,6 +517,12 @@ struct intel_plane {
*/
struct intel_plane_wm_parameters wm;
+ /*
+ * NOTE: Do not place new plane state fields here (e.g., when adding
+ * new plane properties). New runtime state should now be placed in
+ * the intel_plane_state structure and accessed via drm_plane->state.
+ */
+
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -510,6 +533,10 @@ struct intel_plane {
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
+ int (*check_plane)(struct drm_plane *plane,
+ struct intel_plane_state *state);
+ void (*commit_plane)(struct drm_plane *plane,
+ struct intel_plane_state *state);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
@@ -540,6 +567,7 @@ struct cxsr_latency {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi {
@@ -564,17 +592,6 @@ struct intel_hdmi {
struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
-/**
- * HIGH_RR is the highest eDP panel refresh rate read from EDID
- * LOW_RR is the lowest eDP panel refresh rate found from EDID
- * parsing for same resolution.
- */
-enum edp_drrs_refresh_rate_type {
- DRRS_HIGH_RR,
- DRRS_LOW_RR,
- DRRS_MAX_RR, /* RR count */
-};
-
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
@@ -630,12 +647,6 @@ struct intel_dp {
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
- struct {
- enum drrs_support_type type;
- enum edp_drrs_refresh_rate_type refresh_rate_type;
- struct mutex mutex;
- } drrs_state;
-
};
struct intel_digital_port {
@@ -644,7 +655,7 @@ struct intel_digital_port {
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
- bool (*hpd_pulse)(struct intel_digital_port *, bool);
+ enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
};
struct intel_dp_mst_encoder {
@@ -708,8 +719,7 @@ struct intel_unpin_work {
#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
- struct intel_engine_cs *flip_queued_ring;
- u32 flip_queued_seqno;
+ struct drm_i915_gem_request *flip_queued_req;
int flip_queued_vblank;
int flip_ready_vblank;
bool enable_stall_check;
@@ -794,6 +804,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_device *dev);
void gen6_disable_rps_interrupts(struct drm_device *dev);
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
@@ -825,17 +836,18 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-bool intel_ddi_pll_select(struct intel_crtc *crtc);
+bool intel_ddi_pll_select(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_frontbuffer.c */
@@ -865,6 +877,8 @@ void intel_frontbuffer_flip(struct drm_device *dev,
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
+int intel_fb_align_height(struct drm_device *dev, int height,
+ unsigned int tiling);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
@@ -872,9 +886,11 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
void intel_init_audio(struct drm_device *dev);
void intel_audio_codec_enable(struct intel_encoder *encoder);
void intel_audio_codec_disable(struct intel_encoder *encoder);
+void i915_audio_component_init(struct drm_i915_private *dev_priv);
+void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
-const char *intel_output_name(int output);
+extern const struct drm_plane_funcs intel_plane_funcs;
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
@@ -925,6 +941,18 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
+int intel_prepare_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+void intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+int intel_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+int intel_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val);
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
@@ -933,7 +961,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *state);
void intel_put_shared_dpll(struct intel_crtc *crtc);
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
@@ -963,11 +992,11 @@ void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
-ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock);
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
@@ -975,8 +1004,7 @@ void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
- struct intel_crtc_config *pipe_config);
-int intel_format_to_fourcc(int format);
+ struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
@@ -992,16 +1020,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
-bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
- bool long_hpd);
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
@@ -1010,6 +1037,18 @@ void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
+int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int intel_disable_plane(struct drm_plane *plane);
+void intel_plane_destroy(struct drm_plane *plane);
+void intel_edp_drrs_enable(struct intel_dp *intel_dp);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp);
+void intel_edp_drrs_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
@@ -1053,13 +1092,20 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
}
#endif
+/* intel_fbc.c */
+bool intel_fbc_enabled(struct drm_device *dev);
+void intel_fbc_update(struct drm_device *dev);
+void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_disable(struct drm_device *dev);
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
+
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
/* intel_lvds.c */
@@ -1083,6 +1129,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void intel_overlay_reset(struct drm_i915_private *dev_priv);
/* intel_panel.c */
@@ -1093,10 +1140,10 @@ void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
void intel_pch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode);
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
@@ -1115,7 +1162,6 @@ void intel_backlight_unregister(struct drm_device *dev);
/* intel_psr.c */
-bool intel_psr_is_enabled(struct drm_device *dev);
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_invalidate(struct drm_device *dev,
@@ -1159,8 +1205,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
-bool intel_fbc_enabled(struct drm_device *dev);
-void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_device *dev);
@@ -1191,7 +1235,6 @@ int intel_plane_set_property(struct drm_plane *plane,
struct drm_property *prop,
uint64_t val);
int intel_plane_restore(struct drm_plane *plane);
-void intel_plane_disable(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -1199,8 +1242,31 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
+void intel_post_enable_primary(struct drm_crtc *crtc);
+void intel_pre_disable_primary(struct drm_crtc *crtc);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
+/* intel_atomic.c */
+int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async);
+int intel_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
+void intel_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/* intel_atomic_plane.c */
+struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
+struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
+void intel_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 0b184079de14..10ab68457ca8 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -24,24 +24,219 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_mipi_dsi.h>
#include <linux/slab.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
-#include "intel_dsi_cmd.h"
-/* the sub-encoders aka panel drivers */
-static const struct intel_dsi_device intel_dsi_devices[] = {
+static const struct {
+ u16 panel_id;
+ struct drm_panel * (*init)(struct intel_dsi *intel_dsi, u16 panel_id);
+} intel_dsi_drivers[] = {
{
.panel_id = MIPI_DSI_GENERIC_PANEL_ID,
- .name = "vbt-generic-dsi-vid-mode-display",
- .dev_ops = &vbt_generic_dsi_display_ops,
+ .init = vbt_panel_init,
},
};
+static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 mask;
+
+ mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
+ LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
+ DRM_ERROR("DPI FIFOs are not empty\n");
+}
+
+static void write_data(struct drm_i915_private *dev_priv, u32 reg,
+ const u8 *data, u32 len)
+{
+ u32 i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val = 0;
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ val |= *data++ << 8 * j;
+
+ I915_WRITE(reg, val);
+ }
+}
+
+static void read_data(struct drm_i915_private *dev_priv, u32 reg,
+ u8 *data, u32 len)
+{
+ u32 i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val = I915_READ(reg);
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ *data++ = val >> 8 * j;
+ }
+}
+
+static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+ struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dsi_host->port;
+ struct mipi_dsi_packet packet;
+ ssize_t ret;
+ const u8 *header, *data;
+ u32 data_reg, data_mask, ctrl_reg, ctrl_mask;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret < 0)
+ return ret;
+
+ header = packet.header;
+ data = packet.payload;
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM) {
+ data_reg = MIPI_LP_GEN_DATA(port);
+ data_mask = LP_DATA_FIFO_FULL;
+ ctrl_reg = MIPI_LP_GEN_CTRL(port);
+ ctrl_mask = LP_CTRL_FIFO_FULL;
+ } else {
+ data_reg = MIPI_HS_GEN_DATA(port);
+ data_mask = HS_DATA_FIFO_FULL;
+ ctrl_reg = MIPI_HS_GEN_CTRL(port);
+ ctrl_mask = HS_CTRL_FIFO_FULL;
+ }
+
+ /* note: this is never true for reads */
+ if (packet.payload_length) {
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50))
+ DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+
+ write_data(dev_priv, data_reg, packet.payload,
+ packet.payload_length);
+ }
+
+ if (msg->rx_len) {
+ I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
+ }
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) {
+ DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+ }
+
+ I915_WRITE(ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]);
+
+ /* ->rx_len is set only for reads */
+ if (msg->rx_len) {
+ data_mask = GEN_READ_DATA_AVAIL;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50))
+ DRM_ERROR("Timeout waiting for read data.\n");
+
+ read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
+ }
+
+ /* XXX: fix for reads and writes */
+ return 4 + packet.payload_length;
+}
+
+static int intel_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int intel_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
+ .attach = intel_dsi_host_attach,
+ .detach = intel_dsi_host_detach,
+ .transfer = intel_dsi_host_transfer,
+};
+
+static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ enum port port)
+{
+ struct intel_dsi_host *host;
+ struct mipi_dsi_device *device;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ host->base.ops = &intel_dsi_host_ops;
+ host->intel_dsi = intel_dsi;
+ host->port = port;
+
+ /*
+ * We should call mipi_dsi_host_register(&host->base) here, but we don't
+ * have a host->dev, and we don't have OF stuff either. So just use the
+ * dsi framework as a library and hope for the best. Create the dsi
+ * devices by ourselves here too. Need to be careful though, because we
+ * don't initialize any of the driver model devices here.
+ */
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ kfree(host);
+ return NULL;
+ }
+
+ device->host = &host->base;
+ host->device = device;
+
+ return host;
+}
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
+ enum port port)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 mask;
+
+ /* XXX: pipe, hs */
+ if (hs)
+ cmd &= ~DPI_LP_MODE;
+ else
+ cmd |= DPI_LP_MODE;
+
+ /* clear bit */
+ I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+
+ /* XXX: old code skips write if control unchanged */
+ if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
+ DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+
+ I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
+
+ mask = SPL_PKT_SENT_INTERRUPT;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100))
+ DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+
+ return 0;
+}
+
static void band_gap_reset(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->dpio_lock);
@@ -56,12 +251,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->dpio_lock);
}
-static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
-{
- return container_of(intel_attached_encoder(connector),
- struct intel_dsi, base);
-}
-
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
@@ -78,14 +267,13 @@ static void intel_dsi_hot_plug(struct intel_encoder *encoder)
}
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *config)
+ struct intel_crtc_state *config)
{
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
- struct drm_display_mode *mode = &config->requested_mode;
+ struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode;
DRM_DEBUG_KMS("\n");
@@ -95,18 +283,65 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
- if (intel_dsi->dev.dev_ops->mode_fixup)
- return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
- mode, adjusted_mode);
-
return true;
}
+static void intel_dsi_port_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 temp;
+
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ temp = I915_READ(VLV_CHICKEN_3);
+ temp &= ~PIXEL_OVERLAP_CNT_MASK |
+ intel_dsi->pixel_overlap <<
+ PIXEL_OVERLAP_CNT_SHIFT;
+ I915_WRITE(VLV_CHICKEN_3, temp);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ temp = I915_READ(MIPI_PORT_CTRL(port));
+ temp &= ~LANE_CONFIGURATION_MASK;
+ temp &= ~DUAL_LINK_MODE_MASK;
+
+ if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
+ temp |= (intel_dsi->dual_link - 1)
+ << DUAL_LINK_MODE_SHIFT;
+ temp |= intel_crtc->pipe ?
+ LANE_CONFIGURATION_DUAL_LINK_B :
+ LANE_CONFIGURATION_DUAL_LINK_A;
+ }
+ /* assert ip_tg_enable signal */
+ I915_WRITE(MIPI_PORT_CTRL(port), temp | DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(port));
+ }
+}
+
+static void intel_dsi_port_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 temp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* de-assert ip_tg_enable signal */
+ temp = I915_READ(MIPI_PORT_CTRL(port));
+ I915_WRITE(MIPI_PORT_CTRL(port), temp & ~DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(port));
+ }
+}
+
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
@@ -120,48 +355,51 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
- usleep_range(2500, 3000);
+ for_each_dsi_port(port, intel_dsi->ports) {
+
+ I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
+ usleep_range(2500, 3000);
- val = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
+ /* Enable MIPI PHY transparent latch
+ * Common bit for both MIPI Port A & MIPI Port C
+ * No similar bit in MIPI Port C reg
+ */
+ val = I915_READ(MIPI_PORT_CTRL(PORT_A));
+ I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
- usleep_range(2500, 3000);
+ I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT);
+ usleep_range(2500, 3000);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2500, 3000);
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY);
+ usleep_range(2500, 3000);
+ }
}
static void intel_dsi_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- int pipe = intel_crtc->pipe;
- u32 temp;
+ enum port port;
DRM_DEBUG_KMS("\n");
- if (is_cmd_mode(intel_dsi))
- I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
- else {
+ if (is_cmd_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ } else {
msleep(20); /* XXX */
- dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
+ for_each_dsi_port(port, intel_dsi->ports)
+ dpi_send_cmd(intel_dsi, TURN_ON, false, port);
msleep(100);
- if (intel_dsi->dev.dev_ops->enable)
- intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+ drm_panel_enable(intel_dsi->panel);
- wait_for_dsi_fifo_empty(intel_dsi);
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
- /* assert ip_tg_enable signal */
- temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
- temp = temp | intel_dsi->port_bits;
- I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
- POSTING_READ(MIPI_PORT_CTRL(pipe));
+ intel_dsi_port_enable(encoder);
}
}
@@ -172,6 +410,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port;
u32 tmp;
DRM_DEBUG_KMS("\n");
@@ -183,7 +422,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
I915_WRITE(DPLL(pipe), tmp);
/* update the hw state for DPLL */
- intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
+ intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
DPLL_REFA_CLK_ENABLE_VLV;
tmp = I915_READ(DSPCLK_GATE_D);
@@ -195,13 +434,10 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay);
- if (intel_dsi->dev.dev_ops->panel_reset)
- intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
+ drm_panel_prepare(intel_dsi->panel);
- if (intel_dsi->dev.dev_ops->send_otp_cmds)
- intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
-
- wait_for_dsi_fifo_empty(intel_dsi);
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
/* Enable port in pre-enable phase itself because as per hw team
* recommendation, port should be enabled befor plane & pipe */
@@ -221,12 +457,14 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder)
static void intel_dsi_pre_disable(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
/* Send Shutdown command to the panel in LP mode */
- dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
+ for_each_dsi_port(port, intel_dsi->ports)
+ dpi_send_cmd(intel_dsi, SHUTDOWN, false, port);
msleep(10);
}
}
@@ -235,77 +473,85 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- int pipe = intel_crtc->pipe;
+ enum port port;
u32 temp;
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
- wait_for_dsi_fifo_empty(intel_dsi);
-
- /* de-assert ip_tg_enable signal */
- temp = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
- POSTING_READ(MIPI_PORT_CTRL(pipe));
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
+ intel_dsi_port_disable(encoder);
msleep(2);
}
- /* Panel commands can be sent when clock is in LP11 */
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
-
- temp = I915_READ(MIPI_CTRL(pipe));
- temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(pipe), temp |
- intel_dsi->escape_clk_div <<
- ESCAPE_CLOCK_DIVIDER_SHIFT);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* Panel commands can be sent when clock is in LP11 */
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
- I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
+ temp = I915_READ(MIPI_CTRL(port));
+ temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(port), temp |
+ intel_dsi->escape_clk_div <<
+ ESCAPE_CLOCK_DIVIDER_SHIFT);
- temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
- temp &= ~VID_MODE_FORMAT_MASK;
- I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
+ I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
+ temp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ temp &= ~VID_MODE_FORMAT_MASK;
+ I915_WRITE(MIPI_DSI_FUNC_PRG(port), temp);
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
+ }
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
- if (intel_dsi->dev.dev_ops->disable)
- intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+ drm_panel_disable(intel_dsi->panel);
- wait_for_dsi_fifo_empty(intel_dsi);
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
}
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
-
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
- usleep_range(2000, 2500);
-
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
- usleep_range(2000, 2500);
-
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
- usleep_range(2000, 2500);
-
- if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
- == 0x00000), 30))
- DRM_ERROR("DSI LP not going Low\n");
-
- val = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
-
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
- usleep_range(2000, 2500);
+ for_each_dsi_port(port, intel_dsi->ports) {
+
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_EXIT);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ /* Wait till Clock lanes are in LP-00 state for MIPI Port A
+ * only. MIPI Port C has no similar bit for checking
+ */
+ if (wait_for(((I915_READ(MIPI_PORT_CTRL(PORT_A)) & AFE_LATCHOUT)
+ == 0x00000), 30))
+ DRM_ERROR("DSI LP not going Low\n");
+
+ /* Disable MIPI PHY transparent latch
+ * Common bit for both MIPI Port A & MIPI Port C
+ */
+ val = I915_READ(MIPI_PORT_CTRL(PORT_A));
+ I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
+ usleep_range(2000, 2500);
+ }
vlv_disable_dsi_pll(encoder);
}
@@ -326,8 +572,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
- if (intel_dsi->dev.dev_ops->disable_panel_power)
- intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
+ drm_panel_unprepare(intel_dsi->panel);
msleep(intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_pwr_cycle_delay);
@@ -337,9 +582,11 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
- u32 port, func;
- enum pipe p;
+ u32 dpi_enabled, func;
+ enum port port;
DRM_DEBUG_KMS("\n");
@@ -348,13 +595,23 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
return false;
/* XXX: this only works for one DSI output */
- for (p = PIPE_A; p <= PIPE_B; p++) {
- port = I915_READ(MIPI_PORT_CTRL(p));
- func = I915_READ(MIPI_DSI_FUNC_PRG(p));
-
- if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
- if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
- *pipe = p;
+ for_each_dsi_port(port, intel_dsi->ports) {
+ func = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ dpi_enabled = I915_READ(MIPI_PORT_CTRL(port)) &
+ DPI_ENABLE;
+
+ /* Due to some hardware limitations on BYT, MIPI Port C DPI
+ * Enable bit does not get set. To check whether DSI Port C
+ * was enabled in BIOS, check the Pipe B enable bit
+ */
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+ (port == PORT_C))
+ dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
+ PIPECONF_ENABLE;
+
+ if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
+ if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
+ *pipe = port == PORT_A ? PIPE_A : PIPE_B;
return true;
}
}
@@ -364,7 +621,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
u32 pclk;
DRM_DEBUG_KMS("\n");
@@ -379,7 +636,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
if (!pclk)
return;
- pipe_config->adjusted_mode.crtc_clock = pclk;
+ pipe_config->base.adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
}
@@ -389,7 +646,6 @@ intel_dsi_mode_valid(struct drm_connector *connector,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
DRM_DEBUG_KMS("\n");
@@ -405,7 +661,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
+ return MODE_OK;
}
/* return txclkesc cycles in terms of divider and duration in us */
@@ -437,8 +693,8 @@ static void set_dsi_timings(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- int pipe = intel_crtc->pipe;
- unsigned int bpp = intel_crtc->config.pipe_bpp;
+ enum port port;
+ unsigned int bpp = intel_crtc->config->pipe_bpp;
unsigned int lane_count = intel_dsi->lane_count;
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
@@ -448,6 +704,15 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
+ if (intel_dsi->dual_link) {
+ hactive /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ hactive += intel_dsi->pixel_overlap;
+ hfp /= 2;
+ hsync /= 2;
+ hbp /= 2;
+ }
+
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
@@ -460,18 +725,20 @@ static void set_dsi_timings(struct drm_encoder *encoder,
intel_dsi->burst_mode_ratio);
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
- I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
- I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
+ I915_WRITE(MIPI_HFP_COUNT(port), hfp);
- /* meaningful for video mode non-burst sync pulse mode only, can be zero
- * for non-burst sync events and burst modes */
- I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
- I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
+ /* meaningful for video mode non-burst sync pulse mode only,
+ * can be zero for non-burst sync events and burst modes */
+ I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync);
+ I915_WRITE(MIPI_HBP_COUNT(port), hbp);
- /* vertical values are in terms of lines */
- I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
- I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
- I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
+ /* vertical values are in terms of lines */
+ I915_WRITE(MIPI_VFP_COUNT(port), vfp);
+ I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync);
+ I915_WRITE(MIPI_VBP_COUNT(port), vbp);
+ }
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
@@ -482,33 +749,44 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
- int pipe = intel_crtc->pipe;
- unsigned int bpp = intel_crtc->config.pipe_bpp;
+ &intel_crtc->config->base.adjusted_mode;
+ enum port port;
+ unsigned int bpp = intel_crtc->config->pipe_bpp;
u32 val, tmp;
+ u16 mode_hdisplay;
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
- /* escape clock divider, 20MHz, shared for A and C. device ready must be
- * off when doing this! txclkesc? */
- tmp = I915_READ(MIPI_CTRL(0));
- tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
+ mode_hdisplay = adjusted_mode->hdisplay;
- /* read request priority is per pipe */
- tmp = I915_READ(MIPI_CTRL(pipe));
- tmp &= ~READ_REQUEST_PRIORITY_MASK;
- I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
+ if (intel_dsi->dual_link) {
+ mode_hdisplay /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ mode_hdisplay += intel_dsi->pixel_overlap;
+ }
- /* XXX: why here, why like this? handling in irq handler?! */
- I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
- I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* escape clock divider, 20MHz, shared for A and C.
+ * device ready must be off when doing this! txclkesc? */
+ tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1);
- I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
+ /* read request priority is per pipe */
+ tmp = I915_READ(MIPI_CTRL(port));
+ tmp &= ~READ_REQUEST_PRIORITY_MASK;
+ I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH);
- I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
- adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
- adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+ /* XXX: why here, why like this? handling in irq handler?! */
+ I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
+ I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
+
+ I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
+
+ I915_WRITE(MIPI_DPI_RESOLUTION(port),
+ adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
+ mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+ }
set_dsi_timings(encoder, adjusted_mode);
@@ -522,95 +800,102 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
/* XXX: cross-check bpp vs. pixel format? */
val |= intel_dsi->pixel_format;
}
- I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
-
- /* timeouts for recovery. one frame IIUC. if counter expires, EOT and
- * stop state. */
-
- /*
- * In burst mode, value greater than one DPI line Time in byte clock
- * (txbyteclkhs) To timeout this timer 1+ of the above said value is
- * recommended.
- *
- * In non-burst mode, Value greater than one DPI frame time in byte
- * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
- * is recommended.
- *
- * In DBI only mode, value greater than one DBI frame time in byte
- * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
- * is recommended.
- */
-
- if (is_vid_mode(intel_dsi) &&
- intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
- txbyteclkhs(adjusted_mode->htotal, bpp,
- intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
- } else {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
- txbyteclkhs(adjusted_mode->vtotal *
- adjusted_mode->htotal,
- bpp, intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
- }
- I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
- I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
- I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
-
- /* dphy stuff */
- /* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
-
- val = 0;
+ tmp = 0;
if (intel_dsi->eotp_pkt == 0)
- val |= EOT_DISABLE;
-
+ tmp |= EOT_DISABLE;
if (intel_dsi->clock_stop)
- val |= CLOCKSTOP;
-
- /* recovery disables */
- I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
-
- /* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
-
- /* in terms of txbyteclkhs. actual high to low switch +
- * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
- *
- * XXX: write MIPI_STOP_STATE_STALL?
- */
- I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
- intel_dsi->hs_to_lp_count);
-
- /* XXX: low power clock equivalence in terms of byte clock. the number
- * of byte clocks occupied in one low power clock. based on txbyteclkhs
- * and txclkesc. txclkesc time / txbyteclk time * (105 +
- * MIPI_STOP_STATE_STALL) / 105.???
- */
- I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
-
- /* the bw essential for transmitting 16 long packets containing 252
- * bytes meant for dcs write memory command is programmed in this
- * register in terms of byte clocks. based on dsi transfer rate and the
- * number of lanes configured the time taken to transmit 16 long packets
- * in a dsi stream varies. */
- I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
-
- I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
- intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
- intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
-
- if (is_vid_mode(intel_dsi))
- /* Some panels might have resolution which is not a multiple of
- * 64 like 1366 x 768. Enable RANDOM resolution support for such
- * panels by default */
- I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
- intel_dsi->video_frmt_cfg_bits |
- intel_dsi->video_mode_format |
- IP_TG_CONFIG |
- RANDOM_DPI_DISPLAY_RESOLUTION);
+ tmp |= CLOCKSTOP;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+
+ /* timeouts for recovery. one frame IIUC. if counter expires,
+ * EOT and stop state. */
+
+ /*
+ * In burst mode, value greater than one DPI line Time in byte
+ * clock (txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ *
+ * In non-burst mode, Value greater than one DPI frame time in
+ * byte clock(txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ *
+ * In DBI only mode, value greater than one DBI frame time in
+ * byte clock(txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ */
+
+ if (is_vid_mode(intel_dsi) &&
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->htotal, bpp,
+ intel_dsi->lane_count,
+ intel_dsi->burst_mode_ratio) + 1);
+ } else {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->vtotal *
+ adjusted_mode->htotal,
+ bpp, intel_dsi->lane_count,
+ intel_dsi->burst_mode_ratio) + 1);
+ }
+ I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
+ I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
+ intel_dsi->turn_arnd_val);
+ I915_WRITE(MIPI_DEVICE_RESET_TIMER(port),
+ intel_dsi->rst_timer_val);
+
+ /* dphy stuff */
+
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(port),
+ txclkesc(intel_dsi->escape_clk_div, 100));
+
+
+ /* recovery disables */
+ I915_WRITE(MIPI_EOT_DISABLE(port), val);
+
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
+
+ /* in terms of txbyteclkhs. actual high to low switch +
+ * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+ *
+ * XXX: write MIPI_STOP_STATE_STALL?
+ */
+ I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port),
+ intel_dsi->hs_to_lp_count);
+
+ /* XXX: low power clock equivalence in terms of byte clock.
+ * the number of byte clocks occupied in one low power clock.
+ * based on txbyteclkhs and txclkesc.
+ * txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
+ * ) / 105.???
+ */
+ I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
+
+ /* the bw essential for transmitting 16 long packets containing
+ * 252 bytes meant for dcs write memory command is programmed in
+ * this register in terms of byte clocks. based on dsi transfer
+ * rate and the number of lanes configured the time taken to
+ * transmit 16 long packets in a dsi stream varies. */
+ I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
+
+ I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+ intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
+ intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+
+ if (is_vid_mode(intel_dsi))
+ /* Some panels might have resolution which is not a
+ * multiple of 64 like 1366 x 768. Enable RANDOM
+ * resolution support for such panels by default */
+ I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port),
+ intel_dsi->video_frmt_cfg_bits |
+ intel_dsi->video_mode_format |
+ IP_TG_CONFIG |
+ RANDOM_DPI_DISPLAY_RESOLUTION);
+ }
}
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
@@ -625,20 +910,7 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
- struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
- struct intel_encoder *intel_encoder = &intel_dsi->base;
- enum intel_display_power_domain power_domain;
- enum drm_connector_status connector_status;
- struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
-
- DRM_DEBUG_KMS("\n");
- power_domain = intel_display_port_power_domain(intel_encoder);
-
- intel_display_power_get(dev_priv, power_domain);
- connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
- intel_display_power_put(dev_priv, power_domain);
-
- return connector_status;
+ return connector_status_connected;
}
static int intel_dsi_get_modes(struct drm_connector *connector)
@@ -664,7 +936,7 @@ static int intel_dsi_get_modes(struct drm_connector *connector)
return 1;
}
-static void intel_dsi_destroy(struct drm_connector *connector)
+static void intel_dsi_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -674,8 +946,20 @@ static void intel_dsi_destroy(struct drm_connector *connector)
kfree(connector);
}
+static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ if (intel_dsi->panel) {
+ drm_panel_detach(intel_dsi->panel);
+ /* XXX: Logically this call belongs in the panel driver. */
+ drm_panel_remove(intel_dsi->panel);
+ }
+ intel_encoder_destroy(encoder);
+}
+
static const struct drm_encoder_funcs intel_dsi_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_dsi_encoder_destroy,
};
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
@@ -687,8 +971,10 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dsi_detect,
- .destroy = intel_dsi_destroy,
+ .destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_connector_atomic_get_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
void intel_dsi_init(struct drm_device *dev)
@@ -698,9 +984,9 @@ void intel_dsi_init(struct drm_device *dev)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *fixed_mode = NULL;
+ struct drm_display_mode *scan, *fixed_mode = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct intel_dsi_device *dsi;
+ enum port port;
unsigned int i;
DRM_DEBUG_KMS("\n");
@@ -748,22 +1034,43 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
- for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
- dsi = &intel_dsi_devices[i];
- intel_dsi->dev = *dsi;
+ /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
+ if (dev_priv->vbt.dsi.config->dual_link) {
+ /* XXX: does dual link work on either pipe? */
+ intel_encoder->crtc_mask = (1 << PIPE_A);
+ intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+ } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
+ intel_encoder->crtc_mask = (1 << PIPE_A);
+ intel_dsi->ports = (1 << PORT_A);
+ } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
+ intel_encoder->crtc_mask = (1 << PIPE_B);
+ intel_dsi->ports = (1 << PORT_C);
+ }
- if (dsi->dev_ops->init(&intel_dsi->dev))
+ /* Create a DSI host (and a device) for each port. */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+
+ host = intel_dsi_host_init(intel_dsi, port);
+ if (!host)
+ goto err;
+
+ intel_dsi->dsi_hosts[port] = host;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(intel_dsi_drivers); i++) {
+ intel_dsi->panel = intel_dsi_drivers[i].init(intel_dsi,
+ intel_dsi_drivers[i].panel_id);
+ if (intel_dsi->panel)
break;
}
- if (i == ARRAY_SIZE(intel_dsi_devices)) {
+ if (!intel_dsi->panel) {
DRM_DEBUG_KMS("no device found\n");
goto err;
}
intel_encoder->type = INTEL_OUTPUT_DSI;
- intel_encoder->crtc_mask = (1 << 0); /* XXX */
-
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -778,13 +1085,23 @@ void intel_dsi_init(struct drm_device *dev)
drm_connector_register(connector);
- fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
+ drm_panel_attach(intel_dsi->panel, connector);
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_panel_get_modes(intel_dsi->panel);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
if (!fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
goto err;
}
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
return;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 657eb5c1b9d8..2784ac442368 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -26,58 +26,27 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
#include "intel_drv.h"
-struct intel_dsi_device {
- unsigned int panel_id;
- const char *name;
- const struct intel_dsi_dev_ops *dev_ops;
- void *dev_priv;
-};
-
-struct intel_dsi_dev_ops {
- bool (*init)(struct intel_dsi_device *dsi);
-
- void (*panel_reset)(struct intel_dsi_device *dsi);
-
- void (*disable_panel_power)(struct intel_dsi_device *dsi);
-
- /* one time programmable commands if needed */
- void (*send_otp_cmds)(struct intel_dsi_device *dsi);
-
- /* This callback must be able to assume DSI commands can be sent */
- void (*enable)(struct intel_dsi_device *dsi);
-
- /* This callback must be able to assume DSI commands can be sent */
- void (*disable)(struct intel_dsi_device *dsi);
-
- int (*mode_valid)(struct intel_dsi_device *dsi,
- struct drm_display_mode *mode);
-
- bool (*mode_fixup)(struct intel_dsi_device *dsi,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- void (*mode_set)(struct intel_dsi_device *dsi,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
+/* Dual Link support */
+#define DSI_DUAL_LINK_NONE 0
+#define DSI_DUAL_LINK_FRONT_BACK 1
+#define DSI_DUAL_LINK_PIXEL_ALT 2
- bool (*get_hw_state)(struct intel_dsi_device *dev);
-
- struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
-
- void (*destroy) (struct intel_dsi_device *dsi);
-};
+struct intel_dsi_host;
struct intel_dsi {
struct intel_encoder base;
- struct intel_dsi_device dev;
+ struct drm_panel *panel;
+ struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
struct intel_connector *attached_connector;
+ /* bit mask of ports being driven */
+ u16 ports;
+
/* if true, use HS mode, otherwise LP */
bool hs;
@@ -101,6 +70,8 @@ struct intel_dsi {
u8 clock_stop;
u8 escape_clk_div;
+ u8 dual_link;
+ u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
@@ -127,6 +98,24 @@ struct intel_dsi {
u16 panel_pwr_cycle_delay;
};
+struct intel_dsi_host {
+ struct mipi_dsi_host base;
+ struct intel_dsi *intel_dsi;
+ enum port port;
+
+ /* our little hack */
+ struct mipi_dsi_device *device;
+};
+
+static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
+{
+ return container_of(h, struct intel_dsi_host, base);
+}
+
+#define for_each_dsi_port(__port, __ports_mask) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ if ((__ports_mask) & (1 << (__port)))
+
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dsi, base.base);
@@ -136,6 +125,6 @@ extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
+struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
deleted file mode 100644
index f4767fd2ebeb..000000000000
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Copyright © 2013 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Jani Nikula <jani.nikula@intel.com>
- */
-
-#include <linux/export.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <video/mipi_display.h>
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_dsi.h"
-#include "intel_dsi_cmd.h"
-
-/*
- * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
- * MIPI_COMMAND_ADDRESS registers.
- *
- * Apparently these registers provide a MIPI adapter level way to send (lots of)
- * commands and data to the receiver, without having to write the commands and
- * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
- *
- * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
- * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
- * framebuffer in command mode displays) these are just an optimization that can
- * come later.
- *
- * For memory writes, these should probably be used for performance.
- */
-
-static void print_stat(struct intel_dsi *intel_dsi)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 val;
-
- val = I915_READ(MIPI_INTR_STAT(pipe));
-
-#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
- DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
- "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
- "\n", pipe, val,
- STAT_BIT(val, TEARING_EFFECT),
- STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
- STAT_BIT(val, GEN_READ_DATA_AVAIL),
- STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
- STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
- STAT_BIT(val, RX_PROT_VIOLATION),
- STAT_BIT(val, RX_INVALID_TX_LENGTH),
- STAT_BIT(val, ACK_WITH_NO_ERROR),
- STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
- STAT_BIT(val, LP_RX_TIMEOUT),
- STAT_BIT(val, HS_TX_TIMEOUT),
- STAT_BIT(val, DPI_FIFO_UNDERRUN),
- STAT_BIT(val, LOW_CONTENTION),
- STAT_BIT(val, HIGH_CONTENTION),
- STAT_BIT(val, TXDSI_VC_ID_INVALID),
- STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
- STAT_BIT(val, TXCHECKSUM_ERROR),
- STAT_BIT(val, TXECC_MULTIBIT_ERROR),
- STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
- STAT_BIT(val, TXFALSE_CONTROL_ERROR),
- STAT_BIT(val, RXDSI_VC_ID_INVALID),
- STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
- STAT_BIT(val, RXCHECKSUM_ERROR),
- STAT_BIT(val, RXECC_MULTIBIT_ERROR),
- STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
- STAT_BIT(val, RXFALSE_CONTROL_ERROR),
- STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
- STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
- STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
- STAT_BIT(val, RXEOT_SYNC_ERROR),
- STAT_BIT(val, RXSOT_SYNC_ERROR),
- STAT_BIT(val, RXSOT_ERROR));
-#undef STAT_BIT
-}
-
-enum dsi_type {
- DSI_DCS,
- DSI_GENERIC,
-};
-
-/* enable or disable command mode hs transmissions */
-void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 temp;
- u32 mask = DBI_FIFO_EMPTY;
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
-
- temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
- temp &= DBI_HS_LP_MODE_MASK;
- I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
-
- intel_dsi->hs = enable;
-}
-
-static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
- u8 data_type, u16 data)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 ctrl_reg;
- u32 ctrl;
- u32 mask;
-
- DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
- channel, data_type, data);
-
- if (intel_dsi->hs) {
- ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
- mask = HS_CTRL_FIFO_FULL;
- } else {
- ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
- mask = LP_CTRL_FIFO_FULL;
- }
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
- DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
- print_stat(intel_dsi);
- }
-
- /*
- * Note: This function is also used for long packets, with length passed
- * as data, since SHORT_PACKET_PARAM_SHIFT ==
- * LONG_PACKET_WORD_COUNT_SHIFT.
- */
- ctrl = data << SHORT_PACKET_PARAM_SHIFT |
- channel << VIRTUAL_CHANNEL_SHIFT |
- data_type << DATA_TYPE_SHIFT;
-
- I915_WRITE(ctrl_reg, ctrl);
-
- return 0;
-}
-
-static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
- u8 data_type, const u8 *data, int len)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 data_reg;
- int i, j, n;
- u32 mask;
-
- DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
- channel, data_type, len);
-
- if (intel_dsi->hs) {
- data_reg = MIPI_HS_GEN_DATA(pipe);
- mask = HS_DATA_FIFO_FULL;
- } else {
- data_reg = MIPI_LP_GEN_DATA(pipe);
- mask = LP_DATA_FIFO_FULL;
- }
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
- DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
-
- for (i = 0; i < len; i += n) {
- u32 val = 0;
- n = min_t(int, len - i, 4);
-
- for (j = 0; j < n; j++)
- val |= *data++ << 8 * j;
-
- I915_WRITE(data_reg, val);
- /* XXX: check for data fifo full, once that is set, write 4
- * dwords, then wait for not set, then continue. */
- }
-
- return dsi_vc_send_short(intel_dsi, channel, data_type, len);
-}
-
-static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
- int channel, const u8 *data, int len,
- enum dsi_type type)
-{
- int ret;
-
- if (len == 0) {
- BUG_ON(type == DSI_GENERIC);
- ret = dsi_vc_send_short(intel_dsi, channel,
- MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
- 0);
- } else if (len == 1) {
- ret = dsi_vc_send_short(intel_dsi, channel,
- type == DSI_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE, data[0]);
- } else if (len == 2) {
- ret = dsi_vc_send_short(intel_dsi, channel,
- type == DSI_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE_PARAM,
- (data[1] << 8) | data[0]);
- } else {
- ret = dsi_vc_send_long(intel_dsi, channel,
- type == DSI_GENERIC ?
- MIPI_DSI_GENERIC_LONG_WRITE :
- MIPI_DSI_DCS_LONG_WRITE, data, len);
- }
-
- return ret;
-}
-
-int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len)
-{
- return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
-}
-
-int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len)
-{
- return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
-}
-
-static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd)
-{
- return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
- dcs_cmd);
-}
-
-static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
- int channel, u8 *reqdata,
- int reqlen)
-{
- u16 data;
- u8 data_type;
-
- switch (reqlen) {
- case 0:
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
- data = 0;
- break;
- case 1:
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
- data = reqdata[0];
- break;
- case 2:
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
- data = (reqdata[1] << 8) | reqdata[0];
- break;
- default:
- BUG();
- }
-
- return dsi_vc_send_short(intel_dsi, channel, data_type, data);
-}
-
-static int dsi_read_data_return(struct intel_dsi *intel_dsi,
- u8 *buf, int buflen)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- int i, len = 0;
- u32 data_reg, val;
-
- if (intel_dsi->hs) {
- data_reg = MIPI_HS_GEN_DATA(pipe);
- } else {
- data_reg = MIPI_LP_GEN_DATA(pipe);
- }
-
- while (len < buflen) {
- val = I915_READ(data_reg);
- for (i = 0; i < 4 && len < buflen; i++, len++)
- buf[len] = val >> 8 * i;
- }
-
- return len;
-}
-
-int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
- u8 *buf, int buflen)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 mask;
- int ret;
-
- /*
- * XXX: should issue multiple read requests and reads if request is
- * longer than MIPI_MAX_RETURN_PKT_SIZE
- */
-
- I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
-
- ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
- if (ret)
- return ret;
-
- mask = GEN_READ_DATA_AVAIL;
- if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for read data.\n");
-
- ret = dsi_read_data_return(intel_dsi, buf, buflen);
- if (ret < 0)
- return ret;
-
- if (ret != buflen)
- return -EIO;
-
- return 0;
-}
-
-int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 mask;
- int ret;
-
- /*
- * XXX: should issue multiple read requests and reads if request is
- * longer than MIPI_MAX_RETURN_PKT_SIZE
- */
-
- I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
-
- ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
- reqlen);
- if (ret)
- return ret;
-
- mask = GEN_READ_DATA_AVAIL;
- if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for read data.\n");
-
- ret = dsi_read_data_return(intel_dsi, buf, buflen);
- if (ret < 0)
- return ret;
-
- if (ret != buflen)
- return -EIO;
-
- return 0;
-}
-
-/*
- * send a video mode command
- *
- * XXX: commands with data in MIPI_DPI_DATA?
- */
-int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 mask;
-
- /* XXX: pipe, hs */
- if (hs)
- cmd &= ~DPI_LP_MODE;
- else
- cmd |= DPI_LP_MODE;
-
- /* clear bit */
- I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
-
- /* XXX: old code skips write if control unchanged */
- if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
- DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
-
- I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
-
- mask = SPL_PKT_SENT_INTERRUPT;
- if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
- DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
-
- return 0;
-}
-
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
- u32 mask;
-
- mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
- LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
- DRM_ERROR("DPI FIFOs are not empty\n");
-}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
index 46aa1acc00eb..886779030f1a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -33,81 +33,7 @@
#include "intel_drv.h"
#include "intel_dsi.h"
-#define DPI_LP_MODE_EN false
-#define DPI_HS_MODE_EN true
-
-void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
-
-int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len);
-
-int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len);
-
-int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
- u8 *buf, int buflen);
-
-int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen);
-
-int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
-
-/* XXX: questionable write helpers */
-static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd)
-{
- return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
-}
-
-static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd, u8 param)
-{
- u8 buf[2] = { dcs_cmd, param };
- return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
-}
-
-static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
- int channel)
-{
- return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
-}
-
-static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
- int channel, u8 param)
-{
- return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
-}
-
-static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
- int channel, u8 param1, u8 param2)
-{
- u8 buf[2] = { param1, param2 };
- return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
-}
-
-/* XXX: questionable read helpers */
-static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
- int channel, u8 *buf, int buflen)
-{
- return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
-}
-
-static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
- int channel, u8 param, u8 *buf,
- int buflen)
-{
- return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
-}
-
-static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
- int channel, u8 param1, u8 param2,
- u8 *buf, int buflen)
-{
- u8 req[2] = { param1, param2 };
-
- return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
-}
-
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
+ enum port port);
#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index f6bdd44069ce..d2cd8d5b27a1 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -28,6 +28,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
+#include <drm/drm_panel.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
#include <asm/intel-mid.h>
@@ -35,7 +36,16 @@
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
-#include "intel_dsi_cmd.h"
+
+struct vbt_panel {
+ struct drm_panel panel;
+ struct intel_dsi *intel_dsi;
+};
+
+static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct vbt_panel, panel);
+}
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
@@ -94,34 +104,59 @@ static struct gpio_table gtable[] = {
{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
};
-static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
+static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
- u8 type, byte, mode, vc, port;
- u16 len;
-
- byte = *data++;
- mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
- vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
- port = (byte >> MIPI_PORT_SHIFT) & 0x3;
+ return port ? PORT_C : PORT_A;
+}
- /* LP or HS mode */
- intel_dsi->hs = mode;
+static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
+ const u8 *data)
+{
+ struct mipi_dsi_device *dsi_device;
+ u8 type, flags, seq_port;
+ u16 len;
+ enum port port;
- /* get packet type and increment the pointer */
+ flags = *data++;
type = *data++;
len = *((u16 *) data);
data += 2;
+ seq_port = (flags >> MIPI_PORT_SHIFT) & 3;
+
+ /* For DSI single link on Port A & C, the seq_port value which is
+ * parsed from Sequence Block#53 of VBT has been set to 0
+ * Now, read/write of packets for the DSI single link on Port A and
+ * Port C will based on the DVO port from VBT block 2.
+ */
+ if (intel_dsi->ports == (1 << PORT_C))
+ port = PORT_C;
+ else
+ port = intel_dsi_seq_port_to_port(seq_port);
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ if (!dsi_device) {
+ DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port));
+ goto out;
+ }
+
+ if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1)
+ dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ else
+ dsi_device->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3;
+
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
- dsi_vc_generic_write_0(intel_dsi, vc);
+ mipi_dsi_generic_write(dsi_device, NULL, 0);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
- dsi_vc_generic_write_1(intel_dsi, vc, *data);
+ mipi_dsi_generic_write(dsi_device, data, 1);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
- dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1));
+ mipi_dsi_generic_write(dsi_device, data, 2);
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
@@ -129,30 +164,31 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
- dsi_vc_generic_write(intel_dsi, vc, data, len);
+ mipi_dsi_generic_write(dsi_device, data, len);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
- dsi_vc_dcs_write_0(intel_dsi, vc, *data);
+ mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1));
+ mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
- dsi_vc_dcs_write(intel_dsi, vc, data, len);
+ mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
+out:
data += len;
return data;
}
-static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
+static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
- u32 delay = *((u32 *) data);
+ u32 delay = *((const u32 *) data);
usleep_range(delay, delay + 10);
data += 4;
@@ -160,7 +196,7 @@ static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
return data;
}
-static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
+static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
u8 gpio, action;
u16 function, pad;
@@ -193,7 +229,8 @@ static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
return data;
}
-typedef u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, u8 *data);
+typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
+ const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
NULL, /* reserved */
mipi_exec_send_packet,
@@ -217,13 +254,12 @@ static const char * const seq_name[] = {
"MIPI_SEQ_DEASSERT_RESET"
};
-static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
+static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
{
- u8 *data = sequence;
fn_mipi_elem_exec mipi_elem_exec;
int index;
- if (!sequence)
+ if (!data)
return;
DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
@@ -256,14 +292,103 @@ static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
}
}
-static bool generic_init(struct intel_dsi_device *dsi)
+static int vbt_panel_prepare(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_unprepare(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_enable(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_disable(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_get_modes(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *mode;
+
+ if (!panel->connector)
+ return 0;
+
+ mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
+ if (!mode)
+ return 0;
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs vbt_panel_funcs = {
+ .disable = vbt_panel_disable,
+ .unprepare = vbt_panel_unprepare,
+ .prepare = vbt_panel_prepare,
+ .enable = vbt_panel_enable,
+ .get_modes = vbt_panel_get_modes,
+};
+
+struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+ struct vbt_panel *vbt_panel;
u32 bits_per_pixel = 24;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 ui_num, ui_den;
@@ -273,6 +398,7 @@ static bool generic_init(struct intel_dsi_device *dsi)
u32 lp_to_hs_switch, hs_to_lp_switch;
u32 pclk, computed_ddr;
u16 burst_mode_ratio;
+ enum port port;
DRM_DEBUG_KMS("\n");
@@ -280,6 +406,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
+ intel_dsi->dual_link = mipi_config->dual_link;
+ intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
bits_per_pixel = 18;
@@ -299,6 +427,20 @@ static bool generic_init(struct intel_dsi_device *dsi)
pclk = mode->clock;
+ /* In dual link mode each port needs half of pixel clock */
+ if (intel_dsi->dual_link) {
+ pclk = pclk / 2;
+
+ /* we can enable pixel_overlap if needed by panel. In this
+ * case we need to increase the pixelclock for extra pixels
+ */
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ pclk += DIV_ROUND_UP(mode->vtotal *
+ intel_dsi->pixel_overlap *
+ 60, 1000);
+ }
+ }
+
/* Burst Mode Ratio
* Target ddr frequency from VBT / non burst ddr freq
* multiply by 100 to preserve remainder
@@ -311,7 +453,7 @@ static bool generic_init(struct intel_dsi_device *dsi)
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
DRM_ERROR("Burst mode freq is less than computed\n");
- return false;
+ return NULL;
}
burst_mode_ratio = DIV_ROUND_UP(
@@ -321,7 +463,7 @@ static bool generic_init(struct intel_dsi_device *dsi)
pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
} else {
DRM_ERROR("Burst mode target is not set\n");
- return false;
+ return NULL;
}
} else
burst_mode_ratio = 100;
@@ -493,6 +635,12 @@ static bool generic_init(struct intel_dsi_device *dsi)
DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
"disabled" : "enabled");
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
+ DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ else
+ DRM_DEBUG_KMS("Dual link: NONE\n");
DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
@@ -516,110 +664,18 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
- return true;
-}
-
-static int generic_mode_valid(struct intel_dsi_device *dsi,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static bool generic_mode_fixup(struct intel_dsi_device *dsi,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode) {
- return true;
-}
-
-static void generic_panel_reset(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_disable_panel_power(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_send_otp_cmds(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_enable(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_disable(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ /* This is cheating a bit with the cleanup. */
+ vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
+ vbt_panel->intel_dsi = intel_dsi;
+ drm_panel_init(&vbt_panel->panel);
+ vbt_panel->panel.funcs = &vbt_panel_funcs;
+ drm_panel_add(&vbt_panel->panel);
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static enum drm_connector_status generic_detect(struct intel_dsi_device *dsi)
-{
- return connector_status_connected;
-}
-
-static bool generic_get_hw_state(struct intel_dsi_device *dev)
-{
- return true;
-}
-
-static struct drm_display_mode *generic_get_modes(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ /* a regular driver would get the device in probe */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
+ }
- dev_priv->vbt.lfp_lvds_vbt_mode->type |= DRM_MODE_TYPE_PREFERRED;
- return dev_priv->vbt.lfp_lvds_vbt_mode;
+ return &vbt_panel->panel;
}
-
-static void generic_destroy(struct intel_dsi_device *dsi) { }
-
-/* Callbacks. We might not need them all. */
-struct intel_dsi_dev_ops vbt_generic_dsi_display_ops = {
- .init = generic_init,
- .mode_valid = generic_mode_valid,
- .mode_fixup = generic_mode_fixup,
- .panel_reset = generic_panel_reset,
- .disable_panel_power = generic_disable_panel_power,
- .send_otp_cmds = generic_send_otp_cmds,
- .enable = generic_enable,
- .disable = generic_disable,
- .detect = generic_detect,
- .get_hw_state = generic_get_hw_state,
- .get_modes = generic_get_modes,
- .destroy = generic_destroy,
-};
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index fa7a6ca34cd6..3622d0bafdf8 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -241,7 +241,11 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
return;
}
- dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+ if (intel_dsi->ports & (1 << PORT_A))
+ dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+ if (intel_dsi->ports & (1 << PORT_C))
+ dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
@@ -269,12 +273,14 @@ void vlv_enable_dsi_pll(struct intel_encoder *encoder)
tmp |= DSI_PLL_VCO_EN;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
- mutex_unlock(&dev_priv->dpio_lock);
+ if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
+ DSI_PLL_LOCK, 20)) {
- if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
+ mutex_unlock(&dev_priv->dpio_lock);
DRM_ERROR("DSI PLL lock failed\n");
return;
}
+ mutex_unlock(&dev_priv->dpio_lock);
DRM_DEBUG_KMS("DSI PLL locked\n");
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index e40e3df33517..d8579510beb0 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -27,6 +27,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
@@ -144,7 +145,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dvo_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -160,9 +161,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -186,8 +187,8 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
- &crtc->config.requested_mode,
- &crtc->config.adjusted_mode);
+ &crtc->config->base.mode,
+ &crtc->config->base.adjusted_mode);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
@@ -200,7 +201,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_crtc *crtc;
- struct intel_crtc_config *config;
+ struct intel_crtc_state *config;
/* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON)
@@ -221,7 +222,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode == DRM_MODE_DPMS_ON) {
- config = &to_intel_crtc(crtc)->config;
+ config = to_intel_crtc(crtc)->config;
intel_dvo->base.connectors_active = true;
@@ -261,10 +262,10 @@ intel_dvo_mode_valid(struct drm_connector *connector,
}
static bool intel_dvo_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -295,7 +296,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
u32 dvo_val;
@@ -390,6 +391,8 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_connector_atomic_get_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
new file mode 100644
index 000000000000..624d1d92d284
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Frame Buffer Compression (FBC)
+ *
+ * FBC tries to save memory bandwidth (and so power consumption) by
+ * compressing the amount of memory used by the display. It is total
+ * transparent to user space and completely handled in the kernel.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns. It comes from keeping the memory footprint small
+ * and having fewer memory pages opened and accessed for refreshing the display.
+ *
+ * i915 is responsible to reserve stolen memory for FBC and configure its
+ * offset on proper registers. The hardware takes care of all
+ * compress/decompress. However there are many known cases where we have to
+ * forcibly disable it to allow proper screen updates.
+ */
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static void i8xx_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ dev_priv->fbc.enabled = false;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int i;
+ u32 fbc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 32B or 64B units */
+ if (IS_GEN2(dev))
+ cfb_pitch = (cfb_pitch / 32) - 1;
+ else
+ cfb_pitch = (cfb_pitch / 64) - 1;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ if (IS_GEN4(dev)) {
+ u32 fbc_ctl2;
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+ }
+
+ /* enable it... */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
+ cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+static void g4x_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = false;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void snb_fbc_blit_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+
+ /* Blitter is part of Media powerwell on VLV. No impact of
+ * his param in other platforms for now */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+}
+
+static void ilk_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ break;
+ case 1:
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
+ dpfc_ctl |= DPFC_CTL_FENCE_EN;
+ if (IS_GEN5(dev))
+ dpfc_ctl |= obj->fence_reg;
+
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ snb_fbc_blit_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+static void ilk_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = false;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool ilk_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void gen7_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ break;
+ case 1:
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
+
+ dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+
+ if (dev_priv->fbc.false_color)
+ dpfc_ctl |= FBC_CTL_FALSE_COLOR;
+
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_IVYBRIDGE(dev)) {
+ /* WaFbcAsynchFlipDisableFbcQueue:ivb */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ } else {
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+ I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
+ I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+ HSW_FBCQ_DIS);
+ }
+
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+
+ snb_fbc_blit_update(dev);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+/**
+ * intel_fbc_enabled - Is FBC enabled?
+ * @dev: the drm_device
+ *
+ * This function is used to verify the current state of FBC.
+ * FIXME: This should be tracked in the plane config eventually
+ * instead of queried at runtime for most callers.
+ */
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return dev_priv->fbc.enabled;
+}
+
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_GEN8(dev))
+ return;
+
+ if (!intel_fbc_enabled(dev))
+ return;
+
+ I915_WRITE(MSG_FBC_REND_STATE, value);
+}
+
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc.fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->primary->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc);
+
+ dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
+ dev_priv->fbc.y = work->crtc->y;
+ }
+
+ dev_priv->fbc.fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work);
+}
+
+static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc.fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc.fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc.fbc_work);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc.fbc_work = NULL;
+}
+
+static void intel_fbc_enable(struct drm_crtc *crtc)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_fbc_cancel_work(dev_priv);
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (work == NULL) {
+ DRM_ERROR("Failed to allocate FBC work structure\n");
+ dev_priv->display.enable_fbc(crtc);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->primary->fb;
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+ dev_priv->fbc.fbc_work = work;
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ *
+ * WaFbcWaitForVBlankBeforeEnable:ilk,snb
+ */
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+}
+
+/**
+ * intel_fbc_disable - disable FBC
+ * @dev: the drm_device
+ *
+ * This function disables FBC.
+ */
+void intel_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_fbc_cancel_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->fbc.plane = -1;
+}
+
+static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+ enum no_fbc_reason reason)
+{
+ if (dev_priv->fbc.no_fbc_reason == reason)
+ return false;
+
+ dev_priv->fbc.no_fbc_reason = reason;
+ return true;
+}
+
+/**
+ * intel_fbc_update - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= max_hdisplay in width, max_vdisplay in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void intel_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
+ const struct drm_display_mode *adjusted_mode;
+ unsigned int max_width, max_height;
+
+ if (!HAS_FBC(dev)) {
+ set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
+ return;
+ }
+
+ if (!i915.powersave) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ return;
+ }
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ for_each_crtc(dev, tmp_crtc) {
+ if (intel_crtc_active(tmp_crtc) &&
+ to_intel_crtc(tmp_crtc)->primary_enabled) {
+ if (crtc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->primary->fb == NULL) {
+ if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
+ DRM_DEBUG_KMS("no output, disabling\n");
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->primary->fb;
+ obj = intel_fb_obj(fb);
+ adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+
+ if (i915.enable_fbc < 0) {
+ if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ goto out_disable;
+ }
+ if (!i915.enable_fbc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ goto out_disable;
+ }
+ if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+ if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ goto out_disable;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+ max_width = 4096;
+ max_height = 4096;
+ } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ max_width = 4096;
+ max_height = 2048;
+ } else {
+ max_width = 2048;
+ max_height = 1536;
+ }
+ if (intel_crtc->config->pipe_src_w > max_width ||
+ intel_crtc->config->pipe_src_h > max_height) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ goto out_disable;
+ }
+ if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
+ intel_crtc->plane != PLANE_A) {
+ if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
+ DRM_DEBUG_KMS("plane not A, disabling compression\n");
+ goto out_disable;
+ }
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ goto out_disable;
+ }
+ if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
+ if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
+ DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
+ if (i915_gem_stolen_setup_compression(dev, obj->base.size,
+ drm_format_plane_cpp(fb->pixel_format, 0))) {
+ if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
+ DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+ goto out_disable;
+ }
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.fb_id == fb->base.id &&
+ dev_priv->fbc.y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_fbc_disable(dev);
+ }
+
+ intel_fbc_enable(crtc);
+ dev_priv->fbc.no_fbc_reason = FBC_OK;
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_fbc_disable(dev);
+ }
+ i915_gem_stolen_cleanup_compression(dev);
+}
+
+/**
+ * intel_fbc_init - Initialize FBC
+ * @dev_priv: the i915 device
+ *
+ * This function might be called during PM init process.
+ */
+void intel_fbc_init(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_FBC(dev_priv)) {
+ dev_priv->fbc.enabled = false;
+ return;
+ }
+
+ if (INTEL_INFO(dev_priv)->gen >= 7) {
+ dev_priv->display.fbc_enabled = ilk_fbc_enabled;
+ dev_priv->display.enable_fbc = gen7_fbc_enable;
+ dev_priv->display.disable_fbc = ilk_fbc_disable;
+ } else if (INTEL_INFO(dev_priv)->gen >= 5) {
+ dev_priv->display.fbc_enabled = ilk_fbc_enabled;
+ dev_priv->display.enable_fbc = ilk_fbc_enable;
+ dev_priv->display.disable_fbc = ilk_fbc_disable;
+ } else if (IS_GM45(dev_priv)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_fbc_enable;
+ dev_priv->display.disable_fbc = g4x_fbc_disable;
+ } else {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_fbc_enable;
+ dev_priv->display.disable_fbc = i8xx_fbc_disable;
+
+ /* This value was pulled out of someone's hat */
+ I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+ }
+
+ dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
+}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 850cf7d6578c..3001a8674611 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -443,7 +443,7 @@ retry:
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
intel_mode_from_pipe_config(&encoder->crtc->hwmode,
- &to_intel_crtc(encoder->crtc)->config);
+ to_intel_crtc(encoder->crtc)->config);
modes[i] = &encoder->crtc->hwmode;
}
crtcs[i] = new_crtc;
@@ -531,7 +531,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
- struct intel_plane_config *plane_config = NULL;
+ struct intel_initial_plane_config *plane_config = NULL;
unsigned int max_size = 0;
if (!i915.fastboot)
@@ -581,7 +581,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
- cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay;
+ cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.bits_per_pixel / 8;
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@@ -592,13 +592,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
- cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay;
- cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1);
+ cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
+ cur_size = intel_fb_align_height(dev, cur_size,
+ plane_config->tiling);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
- intel_crtc->config.adjusted_mode.crtc_hdisplay,
- intel_crtc->config.adjusted_mode.crtc_vdisplay,
+ intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
+ intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
fb->base.bits_per_pixel,
cur_size);
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 77af512d2d35..04e248dd2259 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -341,7 +341,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
}
/**
- * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
* @dev_priv: i915 device instance
* @pipe: (CPU) pipe to set state for
*
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 79f6d72179c5..73cb6e036445 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -157,6 +157,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
intel_psr_invalidate(dev, obj->frontbuffer_bits);
+ intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
}
/**
@@ -182,6 +183,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+ intel_edp_drrs_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits);
/*
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 3abc2000fce9..995c5b261f4f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -337,13 +338,13 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 data_reg;
int i;
u32 val = I915_READ(ctl_reg);
data_reg = hsw_infoframe_data_reg(type,
- intel_crtc->config.cpu_transcoder,
+ intel_crtc->config->cpu_transcoder,
dev_priv);
if (data_reg == 0)
return;
@@ -371,7 +372,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(ctl_reg);
return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
@@ -436,7 +437,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
}
if (intel_hdmi->rgb_quant_range_selectable) {
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
@@ -672,7 +673,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -700,7 +701,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 hdmi_val;
hdmi_val = SDVO_ENCODING_HDMI;
@@ -711,12 +712,12 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
- if (crtc->config.pipe_bpp > 24)
+ if (crtc->config->pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
- if (crtc->config.has_hdmi_sink)
+ if (crtc->config->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
if (HAS_PCH_CPT(dev))
@@ -759,7 +760,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
}
static void intel_hdmi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -792,7 +793,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
dotclock = pipe_config->port_clock * 2 / 3;
@@ -802,7 +803,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -814,7 +815,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
u32 temp;
u32 enable_bits = SDVO_ENABLE;
- if (intel_crtc->config.has_audio)
+ if (intel_crtc->config->has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -845,8 +846,8 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
POSTING_READ(intel_hdmi->hdmi_reg);
}
- if (intel_crtc->config.has_audio) {
- WARN_ON(!intel_crtc->config.has_hdmi_sink);
+ if (intel_crtc->config->has_audio) {
+ WARN_ON(!intel_crtc->config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
intel_audio_codec_enable(encoder);
@@ -866,7 +867,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
- if (crtc->config.has_audio)
+ if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -975,12 +976,12 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
}
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
- int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp;
@@ -1252,12 +1253,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
intel_hdmi_prepare(encoder);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config.has_hdmi_sink,
+ intel_crtc->config->has_hdmi_sink,
adjusted_mode);
}
@@ -1270,7 +1271,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
@@ -1302,7 +1303,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config.has_hdmi_sink,
+ intel_crtc->config->has_hdmi_sink,
adjusted_mode);
intel_enable_hdmi(encoder);
@@ -1467,7 +1468,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
@@ -1593,7 +1594,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config.has_hdmi_sink,
+ intel_crtc->config->has_hdmi_sink,
adjusted_mode);
intel_enable_hdmi(encoder);
@@ -1614,7 +1615,9 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.force = intel_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_hdmi_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e588376227ea..0f358c5999ec 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -212,8 +212,7 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
* @enable_execlists: value of i915.enable_execlists module parameter.
*
* Only certain platforms support Execlists (the prerequisites being
- * support for Logical Ring Contexts and Aliasing PPGTT or better),
- * and only when enabled via module parameter.
+ * support for Logical Ring Contexts and Aliasing PPGTT or better).
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
@@ -284,7 +283,6 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t temp = 0;
uint32_t desc[4];
- unsigned long flags;
/* XXX: You must always write both descriptors in the order below. */
if (ctx_obj1)
@@ -298,63 +296,17 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
desc[3] = (u32)(temp >> 32);
desc[2] = (u32)temp;
- /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
- * are in progress.
- *
- * The other problem is that we can't just call gen6_gt_force_wake_get()
- * because that function calls intel_runtime_pm_get(), which might sleep.
- * Instead, we do the runtime_pm_get/put when creating/destroying requests.
- */
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
- if (dev_priv->uncore.fw_rendercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_RENDER);
- if (dev_priv->uncore.fw_mediacount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_MEDIA);
- if (INTEL_INFO(dev)->gen >= 9) {
- if (dev_priv->uncore.fw_blittercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_BLITTER);
- }
- } else {
- if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_ALL);
- }
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
-
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
I915_WRITE(RING_ELSP(ring), desc[1]);
I915_WRITE(RING_ELSP(ring), desc[0]);
I915_WRITE(RING_ELSP(ring), desc[3]);
+
/* The context is automatically loaded after the following */
I915_WRITE(RING_ELSP(ring), desc[2]);
/* ELSP is a wo register, so use another nearby reg for posting instead */
POSTING_READ(RING_EXECLIST_STATUS(ring));
-
- /* Release Force Wakeup (see the big comment above). */
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
- if (--dev_priv->uncore.fw_rendercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_RENDER);
- if (--dev_priv->uncore.fw_mediacount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_MEDIA);
- if (INTEL_INFO(dev)->gen >= 9) {
- if (--dev_priv->uncore.fw_blittercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_BLITTER);
- }
- } else {
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_ALL);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
@@ -405,8 +357,8 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
- struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
- struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
+ struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
+ struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
assert_spin_locked(&ring->execlist_lock);
@@ -446,12 +398,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
- struct intel_ctx_submit_request *head_req;
+ struct drm_i915_gem_request *head_req;
assert_spin_locked(&ring->execlist_lock);
head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
if (head_req != NULL) {
@@ -474,13 +426,13 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
}
/**
- * intel_execlists_handle_ctx_events() - handle Context Switch interrupts
+ * intel_lrc_irq_handler() - handle Context Switch interrupts
* @ring: Engine Command Streamer to handle.
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
-void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
+void intel_lrc_irq_handler(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 status_pointer;
@@ -535,24 +487,34 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
- u32 tail)
+ u32 tail,
+ struct drm_i915_gem_request *request)
{
- struct intel_ctx_submit_request *req = NULL, *cursor;
+ struct drm_i915_gem_request *cursor;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
int num_elements = 0;
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (req == NULL)
- return -ENOMEM;
- req->ctx = to;
- i915_gem_context_reference(req->ctx);
-
if (to != ring->default_context)
intel_lr_context_pin(ring, to);
- req->ring = ring;
- req->tail = tail;
+ if (!request) {
+ /*
+ * If there isn't a request associated with this submission,
+ * create one as a temporary holder.
+ */
+ WARN(1, "execlist context submission without request");
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+ request->ring = ring;
+ request->ctx = to;
+ } else {
+ WARN_ON(to != request->ctx);
+ }
+ request->tail = tail;
+ i915_gem_request_reference(request);
+ i915_gem_context_reference(request->ctx);
intel_runtime_pm_get(dev_priv);
@@ -563,10 +525,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
break;
if (num_elements > 2) {
- struct intel_ctx_submit_request *tail_req;
+ struct drm_i915_gem_request *tail_req;
tail_req = list_last_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
if (to == tail_req->ctx) {
@@ -578,7 +540,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
}
}
- list_add_tail(&req->execlist_link, &ring->execlist_queue);
+ list_add_tail(&request->execlist_link, &ring->execlist_queue);
if (num_elements == 0)
execlists_context_unqueue(ring);
@@ -587,7 +549,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
return 0;
}
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
+static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx)
{
struct intel_engine_cs *ring = ringbuf->ring;
uint32_t flush_domains;
@@ -597,7 +560,8 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = ring->emit_flush(ringbuf, ctx,
+ I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
@@ -606,6 +570,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
}
static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
struct list_head *vmas)
{
struct intel_engine_cs *ring = ringbuf->ring;
@@ -633,7 +598,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return logical_ring_invalidate_all_caches(ringbuf);
+ return logical_ring_invalidate_all_caches(ringbuf, ctx);
}
/**
@@ -713,13 +678,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return -EINVAL;
}
- ret = execlists_move_to_gpu(ringbuf, vmas);
+ ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
if (ret)
return ret;
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_logical_ring_begin(ringbuf, 4);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret)
return ret;
@@ -732,7 +697,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode;
}
- ret = ring->emit_bb_start(ringbuf, exec_start, flags);
+ ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags);
if (ret)
return ret;
@@ -744,7 +709,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
void intel_execlists_retire_requests(struct intel_engine_cs *ring)
{
- struct intel_ctx_submit_request *req, *tmp;
+ struct drm_i915_gem_request *req, *tmp;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
struct list_head retired_list;
@@ -766,9 +731,9 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(ring, ctx);
intel_runtime_pm_put(dev_priv);
- i915_gem_context_unreference(req->ctx);
+ i915_gem_context_unreference(ctx);
list_del(&req->execlist_link);
- kfree(req);
+ i915_gem_request_unreference(req);
}
}
@@ -794,7 +759,8 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
}
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx)
{
struct intel_engine_cs *ring = ringbuf->ring;
int ret;
@@ -802,7 +768,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
if (!ring->gpu_caches_dirty)
return 0;
- ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
+ ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -819,17 +785,18 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
-void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
+void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = ringbuf->ring;
- struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
intel_logical_ring_advance(ringbuf);
if (intel_ring_stopped(ring))
return;
- execlists_context_queue(ring, ctx, ringbuf->tail);
+ execlists_context_queue(ring, ctx, ringbuf->tail, request);
}
static int intel_lr_context_pin(struct intel_engine_cs *ring,
@@ -840,11 +807,11 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
int ret = 0;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (ctx->engine[ring->id].unpin_count++ == 0) {
+ if (ctx->engine[ring->id].pin_count++ == 0) {
ret = i915_gem_obj_ggtt_pin(ctx_obj,
GEN8_LR_CONTEXT_ALIGN, 0);
if (ret)
- goto reset_unpin_count;
+ goto reset_pin_count;
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
@@ -855,8 +822,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
-reset_unpin_count:
- ctx->engine[ring->id].unpin_count = 0;
+reset_pin_count:
+ ctx->engine[ring->id].pin_count = 0;
return ret;
}
@@ -869,47 +836,55 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring,
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (--ctx->engine[ring->id].unpin_count == 0) {
+ if (--ctx->engine[ring->id].pin_count == 0) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
}
}
-static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int logical_ring_alloc_request(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
{
+ struct drm_i915_gem_request *request;
+ struct drm_i915_private *dev_private = ring->dev->dev_private;
int ret;
- if (ring->outstanding_lazy_seqno)
+ if (ring->outstanding_lazy_request)
return 0;
- if (ring->preallocated_lazy_request == NULL) {
- struct drm_i915_gem_request *request;
-
- request = kmalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
- if (ctx != ring->default_context) {
- ret = intel_lr_context_pin(ring, ctx);
- if (ret) {
- kfree(request);
- return ret;
- }
+ if (ctx != ring->default_context) {
+ ret = intel_lr_context_pin(ring, ctx);
+ if (ret) {
+ kfree(request);
+ return ret;
}
+ }
- /* Hold a reference to the context this request belongs to
- * (we will need it when the time comes to emit/retire the
- * request).
- */
- request->ctx = ctx;
- i915_gem_context_reference(request->ctx);
+ kref_init(&request->ref);
+ request->ring = ring;
+ request->uniq = dev_private->request_uniq++;
- ring->preallocated_lazy_request = request;
+ ret = i915_gem_get_seqno(ring->dev, &request->seqno);
+ if (ret) {
+ intel_lr_context_unpin(ring, ctx);
+ kfree(request);
+ return ret;
}
- return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+ /* Hold a reference to the context this request belongs to
+ * (we will need it when the time comes to emit/retire the
+ * request).
+ */
+ request->ctx = ctx;
+ i915_gem_context_reference(request->ctx);
+
+ ring->outstanding_lazy_request = request;
+ return 0;
}
static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
@@ -917,42 +892,42 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_i915_gem_request *request;
- u32 seqno = 0;
int ret;
- if (ringbuf->last_retired_head != -1) {
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
-
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= bytes)
- return 0;
- }
+ if (intel_ring_space(ringbuf) >= bytes)
+ return 0;
list_for_each_entry(request, &ring->request_list, list) {
+ /*
+ * The request queue is per-engine, so can contain requests
+ * from multiple ringbuffers. Here, we must ignore any that
+ * aren't from the ringbuffer we're considering.
+ */
+ struct intel_context *ctx = request->ctx;
+ if (ctx->engine[ring->id].ringbuf != ringbuf)
+ continue;
+
+ /* Would completion of this request free enough space? */
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= bytes) {
- seqno = request->seqno;
break;
}
}
- if (seqno == 0)
+ if (&request->list == &ring->request_list)
return -ENOSPC;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(request);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring);
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
- ringbuf->space = intel_ring_space(ringbuf);
- return 0;
+ return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC;
}
static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
int bytes)
{
struct intel_engine_cs *ring = ringbuf->ring;
@@ -966,7 +941,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
return ret;
/* Force the context submission in case we have been skipping it */
- intel_logical_ring_advance_and_submit(ringbuf);
+ intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL);
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
@@ -975,13 +950,10 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
+ ret = 0;
do {
- ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= bytes) {
- ret = 0;
+ if (intel_ring_space(ringbuf) >= bytes)
break;
- }
msleep(1);
@@ -1004,13 +976,14 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
return ret;
}
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
+static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx)
{
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
if (ringbuf->space < rem) {
- int ret = logical_ring_wait_for_space(ringbuf, rem);
+ int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
if (ret)
return ret;
@@ -1022,23 +995,24 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = intel_ring_space(ringbuf);
+ intel_ring_update_space(ringbuf);
return 0;
}
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
+static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx, int bytes)
{
int ret;
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
- ret = logical_ring_wrap_buffer(ringbuf);
+ ret = logical_ring_wrap_buffer(ringbuf, ctx);
if (unlikely(ret))
return ret;
}
if (unlikely(ringbuf->space < bytes)) {
- ret = logical_ring_wait_for_space(ringbuf, bytes);
+ ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
if (unlikely(ret))
return ret;
}
@@ -1059,7 +1033,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
*
* Return: non-zero if the ringbuffer is not ready to be written to.
*/
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx, int num_dwords)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
@@ -1071,12 +1046,12 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
if (ret)
return ret;
- ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
+ ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
/* Preallocate the olr before touching the ring */
- ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
+ ret = logical_ring_alloc_request(ring, ctx);
if (ret)
return ret;
@@ -1093,15 +1068,15 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON(w->count == 0))
+ if (WARN_ON_ONCE(w->count == 0))
return 0;
ring->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(ringbuf);
+ ret = logical_ring_flush_all_caches(ringbuf, ctx);
if (ret)
return ret;
- ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
+ ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
if (ret)
return ret;
@@ -1115,7 +1090,7 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
intel_logical_ring_advance(ringbuf);
ring->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(ringbuf);
+ ret = logical_ring_flush_all_caches(ringbuf, ctx);
if (ret)
return ret;
@@ -1134,6 +1109,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
POSTING_READ(RING_MODE_GEN7(ring));
+ ring->next_context_status_buffer = 0;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -1159,22 +1135,19 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
*/
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
- ret = intel_init_pipe_control(ring);
- if (ret)
- return ret;
-
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return init_workarounds_ring(ring);
}
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u64 offset, unsigned flags)
{
bool ppgtt = !(flags & I915_DISPATCH_SECURE);
int ret;
- ret = intel_logical_ring_begin(ringbuf, 4);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret)
return ret;
@@ -1222,6 +1195,7 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
}
static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u32 invalidate_domains,
u32 unused)
{
@@ -1231,21 +1205,23 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
uint32_t cmd;
int ret;
- ret = intel_logical_ring_begin(ringbuf, 4);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret)
return ret;
cmd = MI_FLUSH_DW + 1;
- if (ring == &dev_priv->ring[VCS]) {
- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
- MI_FLUSH_DW_STORE_INDEX |
- MI_FLUSH_DW_OP_STOREDW;
- } else {
- if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
- cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
- MI_FLUSH_DW_OP_STOREDW;
+ /* We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (ring == &dev_priv->ring[VCS])
+ cmd |= MI_INVALIDATE_BSD;
}
intel_logical_ring_emit(ringbuf, cmd);
@@ -1260,6 +1236,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
}
static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -1286,7 +1263,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
- ret = intel_logical_ring_begin(ringbuf, 6);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 6);
if (ret)
return ret;
@@ -1311,17 +1288,18 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
-static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
+static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
+ struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = ringbuf->ring;
u32 cmd;
int ret;
- ret = intel_logical_ring_begin(ringbuf, 6);
+ ret = intel_logical_ring_begin(ringbuf, request->ctx, 6);
if (ret)
return ret;
- cmd = MI_STORE_DWORD_IMM_GEN8;
+ cmd = MI_STORE_DWORD_IMM_GEN4;
cmd |= MI_GLOBAL_GTT;
intel_logical_ring_emit(ringbuf, cmd);
@@ -1329,14 +1307,27 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
(ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
+ intel_logical_ring_emit(ringbuf,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance_and_submit(ringbuf);
+ intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
return 0;
}
+static int gen8_init_rcs_context(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ int ret;
+
+ ret = intel_logical_ring_workarounds_emit(ring, ctx);
+ if (ret)
+ return ret;
+
+ return intel_lr_context_render_state_init(ring, ctx);
+}
+
/**
* intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
*
@@ -1354,8 +1345,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
- ring->preallocated_lazy_request = NULL;
- ring->outstanding_lazy_seqno = 0;
+ i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -1383,18 +1373,11 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
INIT_LIST_HEAD(&ring->execlist_queue);
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
- ring->next_context_status_buffer = 0;
ret = i915_cmd_parser_init_ring(ring);
if (ret)
return ret;
- if (ring->init) {
- ret = ring->init(ring);
- if (ret)
- return ret;
- }
-
ret = intel_lr_context_deferred_create(ring->default_context, ring);
return ret;
@@ -1404,6 +1387,7 @@ static int logical_render_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ int ret;
ring->name = "render ring";
ring->id = RCS;
@@ -1415,8 +1399,8 @@ static int logical_render_ring_init(struct drm_device *dev)
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- ring->init = gen8_init_render_ring;
- ring->init_context = intel_logical_ring_workarounds_emit;
+ ring->init_hw = gen8_init_render_ring;
+ ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
@@ -1426,7 +1410,12 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
- return logical_ring_init(dev, ring);
+ ring->dev = dev;
+ ret = logical_ring_init(dev, ring);
+ if (ret)
+ return ret;
+
+ return intel_init_pipe_control(ring);
}
static int logical_bsd_ring_init(struct drm_device *dev)
@@ -1442,7 +1431,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1467,7 +1456,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1492,7 +1481,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1517,7 +1506,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1609,6 +1598,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
return 0;
ret = ring->emit_bb_start(ringbuf,
+ ctx,
so.ggtt_offset,
I915_DISPATCH_SECURE);
if (ret)
@@ -1616,7 +1606,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, file, so.obj, NULL);
+ ret = __i915_add_request(ring, file, so.obj);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out:
@@ -1763,6 +1753,7 @@ void intel_lr_context_free(struct intel_context *ctx)
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
+ WARN_ON(ctx->engine[ring->id].pin_count);
intel_destroy_ringbuffer_obj(ringbuf);
kfree(ringbuf);
drm_gem_object_unreference(&ctx_obj->base);
@@ -1835,8 +1826,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
- if (ctx->engine[ring->id].state)
- return 0;
+ WARN_ON(ctx->engine[ring->id].state);
context_size = round_up(get_lr_context_size(ring), 4096);
@@ -1866,14 +1856,13 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
}
ringbuf->ring = ring;
- ringbuf->FIXME_lrc_ctx = ctx;
ringbuf->size = 32 * PAGE_SIZE;
ringbuf->effective_size = ringbuf->size;
ringbuf->head = 0;
ringbuf->tail = 0;
- ringbuf->space = ringbuf->size;
ringbuf->last_retired_head = -1;
+ intel_ring_update_space(ringbuf);
if (ringbuf->obj == NULL) {
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
@@ -1907,21 +1896,17 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
if (ctx == ring->default_context)
lrc_setup_hardware_status_page(ring, ctx_obj);
-
- if (ring->id == RCS && !ctx->rcs_initialized) {
+ else if (ring->id == RCS && !ctx->rcs_initialized) {
if (ring->init_context) {
ret = ring->init_context(ring, ctx);
- if (ret)
+ if (ret) {
DRM_ERROR("ring init context: %d\n", ret);
+ ctx->engine[ring->id].ringbuf = NULL;
+ ctx->engine[ring->id].state = NULL;
+ goto error;
+ }
}
- ret = intel_lr_context_render_state_init(ring, ctx);
- if (ret) {
- DRM_ERROR("Init render state failed: %d\n", ret);
- ctx->engine[ring->id].ringbuf = NULL;
- ctx->engine[ring->id].state = NULL;
- goto error;
- }
ctx->rcs_initialized = true;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 14b216b9be7f..6f2d7da594f6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -38,8 +38,12 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
-void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx);
+void intel_logical_ring_advance_and_submit(
+ struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request *request);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
@@ -61,7 +65,9 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ int num_dwords);
/* Logical Ring Contexts */
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
@@ -83,36 +89,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
u64 exec_start, u32 flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
-/**
- * struct intel_ctx_submit_request - queued context submission request
- * @ctx: Context to submit to the ELSP.
- * @ring: Engine to submit it to.
- * @tail: how far in the context's ringbuffer this request goes to.
- * @execlist_link: link in the submission queue.
- * @work: workqueue for processing this request in a bottom half.
- * @elsp_submitted: no. of times this request has been sent to the ELSP.
- *
- * The ELSP only accepts two elements at a time, so we queue context/tail
- * pairs on a given queue (ring->execlist_queue) until the hardware is
- * available. The queue serves a double purpose: we also use it to keep track
- * of the up to 2 contexts currently in the hardware (usually one in execution
- * and the other queued up by the GPU): We only remove elements from the head
- * of the queue when the hardware informs us that an element has been
- * completed.
- *
- * All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
- */
-struct intel_ctx_submit_request {
- struct intel_context *ctx;
- struct intel_engine_cs *ring;
- u32 tail;
-
- struct list_head execlist_link;
-
- int elsp_submitted;
-};
-
-void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
+void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 14654d628ca4..071b96d6e146 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -32,6 +32,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -93,7 +94,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -115,7 +116,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_PVSYNC;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_INFO(dev)->gen < 4) {
@@ -129,7 +130,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
@@ -139,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode =
- &crtc->config.adjusted_mode;
+ &crtc->config->base.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
@@ -167,7 +168,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= crtc->config.gmch_pfit.lvds_border_bits;
+ temp |= crtc->config->gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
@@ -190,7 +191,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
if (INTEL_INFO(dev)->gen == 4) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
- if (crtc->config.dither && crtc->config.pipe_bpp == 18)
+ if (crtc->config->dither && crtc->config->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -277,14 +278,14 @@ intel_lvds_mode_valid(struct drm_connector *connector,
}
static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
unsigned int lvds_bpp;
@@ -531,7 +532,9 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_lvds_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index dc2f4f26c961..f93dfc174495 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -182,7 +182,7 @@ struct intel_overlay {
u32 flip_addr;
struct drm_i915_gem_object *reg_bo;
/* flip handling */
- uint32_t last_flip_req;
+ struct drm_i915_gem_request *last_flip_req;
void (*flip_tail)(struct intel_overlay *);
};
@@ -217,17 +217,19 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(ring, &overlay->last_flip_req);
+ i915_gem_request_assign(&overlay->last_flip_req,
+ ring->outstanding_lazy_request);
+ ret = i915_add_request(ring);
if (ret)
return ret;
overlay->flip_tail = tail;
- ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
- overlay->last_flip_req = 0;
+ i915_gem_request_assign(&overlay->last_flip_req, NULL);
return 0;
}
@@ -286,7 +288,10 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
- return i915_add_request(ring, &overlay->last_flip_req);
+ WARN_ON(overlay->last_flip_req);
+ i915_gem_request_assign(&overlay->last_flip_req,
+ ring->outstanding_lazy_request);
+ return i915_add_request(ring);
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -361,23 +366,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
- if (overlay->last_flip_req == 0)
+ if (overlay->last_flip_req == NULL)
return 0;
- ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(overlay->dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
- overlay->last_flip_req = 0;
+ i915_gem_request_assign(&overlay->last_flip_req, NULL);
return 0;
}
@@ -392,6 +394,8 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
/* Only wait if there is actually an old frame to release to
* guarantee forward progress.
*/
@@ -422,6 +426,22 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return 0;
}
+void intel_overlay_reset(struct drm_i915_private *dev_priv)
+{
+ struct intel_overlay *overlay = dev_priv->overlay;
+
+ if (!overlay)
+ return;
+
+ intel_overlay_release_old_vid(overlay);
+
+ overlay->last_flip_req = NULL;
+ overlay->old_xscale = 0;
+ overlay->old_yscale = 0;
+ overlay->crtc = NULL;
+ overlay->active = false;
+}
+
struct put_image_params {
int format;
short dst_x;
@@ -836,7 +856,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
return -EINVAL;
/* can't use the overlay with double wide pipe */
- if (crtc->config.double_wide)
+ if (crtc->config->double_wide)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4d63839bd9b4..d8686ce89160 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -98,13 +98,13 @@ intel_find_panel_downclock(struct drm_device *dev,
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode)
{
struct drm_display_mode *adjusted_mode;
int x, y, width, height;
- adjusted_mode = &pipe_config->adjusted_mode;
+ adjusted_mode = &pipe_config->base.adjusted_mode;
x = y = width = height = 0;
@@ -223,10 +223,10 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
-static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
+static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
u32 scaled_width = adjusted_mode->hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -243,11 +243,11 @@ static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
}
-static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
+static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
u32 scaled_width = adjusted_mode->hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -301,14 +301,14 @@ static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
}
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode;
- adjusted_mode = &pipe_config->adjusted_mode;
+ adjusted_mode = &pipe_config->base.adjusted_mode;
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
- if (panel->backlight.level == 0) {
+ if (panel->backlight.level <= panel->backlight.min) {
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 964b28e3c630..24d77ddcc5f4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -52,17 +52,6 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-/* FBC, or Frame Buffer Compression, is a technique employed to compress the
- * framebuffer contents in-memory, aiming at reducing the required bandwidth
- * during in-memory transfers and, therefore, reduce the power packet.
- *
- * The benefits of FBC are mostly visible with solid backgrounds and
- * variation-less patterns.
- *
- * FBC-related functionality can be enabled by the means of the
- * i915.i915_enable_fbc parameter
- */
-
static void gen9_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -87,614 +76,6 @@ static void gen9_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
}
-static void i8xx_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
-
- dev_priv->fbc.enabled = false;
-
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- if ((fbc_ctl & FBC_CTL_EN) == 0)
- return;
-
- fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- /* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
- }
-
- DRM_DEBUG_KMS("disabled FBC\n");
-}
-
-static void i8xx_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int cfb_pitch;
- int i;
- u32 fbc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
- if (fb->pitches[0] < cfb_pitch)
- cfb_pitch = fb->pitches[0];
-
- /* FBC_CTL wants 32B or 64B units */
- if (IS_GEN2(dev))
- cfb_pitch = (cfb_pitch / 32) - 1;
- else
- cfb_pitch = (cfb_pitch / 64) - 1;
-
- /* Clear old tags */
- for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG + (i * 4), 0);
-
- if (IS_GEN4(dev)) {
- u32 fbc_ctl2;
-
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
- }
-
- /* enable it... */
- fbc_ctl = I915_READ(FBC_CONTROL);
- fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
- fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev))
- fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= obj->fence_reg;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
- cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
-}
-
-static bool i8xx_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
-}
-
-static void g4x_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
-
- I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
-
- /* enable it... */
- I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
-}
-
-static void g4x_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = false;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool g4x_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void sandybridge_blit_fbc_update(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
-
- /* Blitter is part of Media powerwell on VLV. No impact of
- * his param in other platforms for now */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
-
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
-
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
-}
-
-static void ironlake_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dev_priv->fbc.threshold++;
-
- switch (dev_priv->fbc.threshold) {
- case 4:
- case 3:
- dpfc_ctl |= DPFC_CTL_LIMIT_4X;
- break;
- case 2:
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- break;
- case 1:
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- break;
- }
- dpfc_ctl |= DPFC_CTL_FENCE_EN;
- if (IS_GEN5(dev))
- dpfc_ctl |= obj->fence_reg;
-
- I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
- /* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- if (IS_GEN6(dev)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- sandybridge_blit_fbc_update(dev);
- }
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
-}
-
-static void ironlake_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = false;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool ironlake_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void gen7_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dev_priv->fbc.threshold++;
-
- switch (dev_priv->fbc.threshold) {
- case 4:
- case 3:
- dpfc_ctl |= DPFC_CTL_LIMIT_4X;
- break;
- case 2:
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- break;
- case 1:
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- break;
- }
-
- dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
-
- if (dev_priv->fbc.false_color)
- dpfc_ctl |= FBC_CTL_FALSE_COLOR;
-
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- if (IS_IVYBRIDGE(dev)) {
- /* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS);
- } else {
- /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
- I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
- HSW_FBCQ_DIS);
- }
-
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
-
- sandybridge_blit_fbc_update(dev);
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
-}
-
-bool intel_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return dev_priv->fbc.enabled;
-}
-
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!IS_GEN8(dev))
- return;
-
- if (!intel_fbc_enabled(dev))
- return;
-
- I915_WRITE(MSG_FBC_REND_STATE, value);
-}
-
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct intel_fbc_work *work =
- container_of(to_delayed_work(__work),
- struct intel_fbc_work, work);
- struct drm_device *dev = work->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc.fbc_work) {
- /* Double check that we haven't switched fb without cancelling
- * the prior work.
- */
- if (work->crtc->primary->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc);
-
- dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
- dev_priv->fbc.y = work->crtc->y;
- }
-
- dev_priv->fbc.fbc_work = NULL;
- }
- mutex_unlock(&dev->struct_mutex);
-
- kfree(work);
-}
-
-static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->fbc.fbc_work == NULL)
- return;
-
- DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-
- /* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc.fbc_work, so we can perform the cancellation
- * entirely asynchronously.
- */
- if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
- /* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc.fbc_work);
-
- /* Mark the work as no longer wanted so that if it does
- * wake-up (because the work was already running and waiting
- * for our mutex), it will discover that is no longer
- * necessary to run.
- */
- dev_priv->fbc.fbc_work = NULL;
-}
-
-static void intel_enable_fbc(struct drm_crtc *crtc)
-{
- struct intel_fbc_work *work;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.enable_fbc)
- return;
-
- intel_cancel_fbc_work(dev_priv);
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (work == NULL) {
- DRM_ERROR("Failed to allocate FBC work structure\n");
- dev_priv->display.enable_fbc(crtc);
- return;
- }
-
- work->crtc = crtc;
- work->fb = crtc->primary->fb;
- INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
- dev_priv->fbc.fbc_work = work;
-
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * A more complicated solution would involve tracking vblanks
- * following the termination of the page-flipping sequence
- * and indeed performing the enable as a co-routine and not
- * waiting synchronously upon the vblank.
- *
- * WaFbcWaitForVBlankBeforeEnable:ilk,snb
- */
- schedule_delayed_work(&work->work, msecs_to_jiffies(50));
-}
-
-void intel_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_cancel_fbc_work(dev_priv);
-
- if (!dev_priv->display.disable_fbc)
- return;
-
- dev_priv->display.disable_fbc(dev);
- dev_priv->fbc.plane = -1;
-}
-
-static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
- enum no_fbc_reason reason)
-{
- if (dev_priv->fbc.no_fbc_reason == reason)
- return false;
-
- dev_priv->fbc.no_fbc_reason = reason;
- return true;
-}
-
-/**
- * intel_update_fbc - enable/disable FBC as needed
- * @dev: the drm_device
- *
- * Set up the framebuffer compression hardware at mode set time. We
- * enable it if possible:
- * - plane A only (on pre-965)
- * - no pixel mulitply/line duplication
- * - no alpha buffer discard
- * - no dual wide
- * - framebuffer <= max_hdisplay in width, max_vdisplay in height
- *
- * We can't assume that any compression will take place (worst case),
- * so the compressed buffer has to be the same size as the uncompressed
- * one. It also must reside (along with the line length buffer) in
- * stolen memory.
- *
- * We need to enable/disable FBC on a global basis.
- */
-void intel_update_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL, *tmp_crtc;
- struct intel_crtc *intel_crtc;
- struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
- const struct drm_display_mode *adjusted_mode;
- unsigned int max_width, max_height;
-
- if (!HAS_FBC(dev)) {
- set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
- return;
- }
-
- if (!i915.powersave) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- return;
- }
-
- /*
- * If FBC is already on, we just have to verify that we can
- * keep it that way...
- * Need to disable if:
- * - more than one pipe is active
- * - changing FBC params (stride, fence, mode)
- * - new fb is too large to fit in compressed buffer
- * - going to an unsupported config (interlace, pixel multiply, etc.)
- */
- for_each_crtc(dev, tmp_crtc) {
- if (intel_crtc_active(tmp_crtc) &&
- to_intel_crtc(tmp_crtc)->primary_enabled) {
- if (crtc) {
- if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- goto out_disable;
- }
- crtc = tmp_crtc;
- }
- }
-
- if (!crtc || crtc->primary->fb == NULL) {
- if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
- DRM_DEBUG_KMS("no output, disabling\n");
- goto out_disable;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- fb = crtc->primary->fb;
- obj = intel_fb_obj(fb);
- adjusted_mode = &intel_crtc->config.adjusted_mode;
-
- if (i915.enable_fbc < 0) {
- if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
- DRM_DEBUG_KMS("disabled per chip default\n");
- goto out_disable;
- }
- if (!i915.enable_fbc) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- goto out_disable;
- }
- if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
- if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
- goto out_disable;
- }
-
- if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
- max_width = 4096;
- max_height = 4096;
- } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
- max_width = 4096;
- max_height = 2048;
- } else {
- max_width = 2048;
- max_height = 1536;
- }
- if (intel_crtc->config.pipe_src_w > max_width ||
- intel_crtc->config.pipe_src_h > max_height) {
- if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- goto out_disable;
- }
- if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
- intel_crtc->plane != PLANE_A) {
- if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
- DRM_DEBUG_KMS("plane not A, disabling compression\n");
- goto out_disable;
- }
-
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
- */
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- goto out_disable;
- }
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
- if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
- DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
- goto out_disable;
- }
-
- /* If the kernel debugger is active, always disable compression */
- if (in_dbg_master())
- goto out_disable;
-
- if (i915_gem_stolen_setup_compression(dev, obj->base.size,
- drm_format_plane_cpp(fb->pixel_format, 0))) {
- if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
- DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
- goto out_disable;
- }
-
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (dev_priv->fbc.plane == intel_crtc->plane &&
- dev_priv->fbc.fb_id == fb->base.id &&
- dev_priv->fbc.y == crtc->y)
- return;
-
- if (intel_fbc_enabled(dev)) {
- /* We update FBC along two paths, after changing fb/crtc
- * configuration (modeswitching) and after page-flipping
- * finishes. For the latter, we know that not only did
- * we disable the FBC at the start of the page-flip
- * sequence, but also more than one vblank has passed.
- *
- * For the former case of modeswitching, it is possible
- * to switch between two FBC valid configurations
- * instantaneously so we do need to disable the FBC
- * before we can modify its control registers. We also
- * have to wait for the next vblank for that to take
- * effect. However, since we delay enabling FBC we can
- * assume that a vblank has passed since disabling and
- * that we can safely alter the registers in the deferred
- * callback.
- *
- * In the scenario that we go from a valid to invalid
- * and then back to valid FBC configuration we have
- * no strict enforcement that a vblank occurred since
- * disabling the FBC. However, along all current pipe
- * disabling paths we do need to wait for a vblank at
- * some point. And we wait before enabling FBC anyway.
- */
- DRM_DEBUG_KMS("disabling active FBC for update\n");
- intel_disable_fbc(dev);
- }
-
- intel_enable_fbc(crtc);
- dev_priv->fbc.no_fbc_reason = FBC_OK;
- return;
-
-out_disable:
- /* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev)) {
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- intel_disable_fbc(dev);
- }
- i915_gem_stolen_cleanup_compression(dev);
-}
-
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1157,7 +538,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
int clock;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
/* Display SR */
@@ -1226,10 +607,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
return false;
}
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
pixel_size = crtc->primary->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
@@ -1313,10 +694,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
}
crtc = intel_get_crtc_for_plane(dev, plane);
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
pixel_size = crtc->primary->fb->bits_per_pixel / 8;
line_time_us = max(htotal * 1000 / clock, 1);
@@ -1347,7 +728,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
int entries;
- int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
+ int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
if (WARN(clock == 0, "Pixel clock is zero!\n"))
return false;
@@ -1677,10 +1058,10 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
const struct drm_display_mode *adjusted_mode =
- &to_intel_crtc(crtc)->config.adjusted_mode;
+ &to_intel_crtc(crtc)->config->base.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1762,7 +1143,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_GEN2(dev))
cpp = 4;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1784,7 +1165,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_GEN2(dev))
cpp = 4;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1823,10 +1204,10 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *adjusted_mode =
- &to_intel_crtc(enabled)->config.adjusted_mode;
+ &to_intel_crtc(enabled)->config->base.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
+ int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1879,7 +1260,7 @@ static void i845_update_wm(struct drm_crtc *unused_crtc)
if (crtc == NULL)
return;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
@@ -1898,17 +1279,17 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate;
- pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
+ pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
- if (intel_crtc->config.pch_pfit.enabled) {
+ if (intel_crtc->config->pch_pfit.enabled) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
- uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
+ uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
- pipe_w = intel_crtc->config.pipe_src_w;
- pipe_h = intel_crtc->config.pipe_src_h;
+ pipe_w = intel_crtc->config->pipe_src_w;
+ pipe_h = intel_crtc->config->pipe_src_h;
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
@@ -2261,7 +1642,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
u32 linetime, ips_linetime;
if (!intel_crtc_active(crtc))
@@ -2521,11 +1902,11 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
return;
p->active = true;
- p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
+ p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
- p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
+ p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
p->cur.horiz_pixels = intel_crtc->cursor_width;
/* TODO: for now, assume primary and cursor planes are always enabled. */
p->pri.enabled = true;
@@ -3174,10 +2555,10 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
}
-static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
+static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
{
/* TODO: Take into account the scalers once we support them */
- return config->adjusted_mode.crtc_clock;
+ return config->base.adjusted_mode.crtc_clock;
}
/*
@@ -3265,8 +2646,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
p->active = intel_crtc_active(crtc);
if (p->active) {
- p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
- p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
+ p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
+ p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
/*
* For now, assume primary and cursor planes are always enabled.
@@ -3274,8 +2655,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
p->plane[0].enabled = true;
p->plane[0].bytes_per_pixel =
crtc->primary->fb->bits_per_pixel / 8;
- p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
- p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
+ p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
+ p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
p->cursor.enabled = true;
p->cursor.bytes_per_pixel = 4;
@@ -3286,7 +2667,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe == pipe)
+ if (intel_plane->pipe == pipe &&
+ plane->type == DRM_PLANE_TYPE_OVERLAY)
p->plane[i++] = intel_plane->wm;
}
}
@@ -3621,9 +3003,8 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
skl_wm_flush_pipe(dev_priv, pipe, 2);
intel_wait_for_vblank(dev, pipe);
+ reallocated[pipe] = true;
}
-
- reallocated[pipe] = true;
}
/*
@@ -4363,16 +3744,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
- /* IVB and SNB hard hangs on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- */
- if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
- mask |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (IS_GEN8(dev_priv->dev))
- mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
- return ~mask;
+ return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
}
/* gen6_set_rps is called to update the frequency request, but should also be
@@ -4427,8 +3799,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- /* Latest VLV doesn't need to force the gfx clock */
- if (dev->pdev->revision >= 0xd) {
+ /* CHV and latest VLV don't need to force the gfx clock */
+ if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
return;
}
@@ -4441,7 +3813,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
return;
/* Mask turbo interrupt so that they will not come in between */
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_sanitize_rps_pm_mask(dev_priv, ~0));
vlv_force_gfx_clock(dev_priv, true);
@@ -4466,9 +3839,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (IS_CHERRYVIEW(dev))
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
- else if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@ -4510,7 +3881,7 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
dev_priv->rps.cur_freq = val;
- trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
+ trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
static void gen9_disable_rps(struct drm_device *dev)
@@ -4518,6 +3889,7 @@ static void gen9_disable_rps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
+ I915_WRITE(GEN9_PG_ENABLE, 0);
}
static void gen6_disable_rps(struct drm_device *dev)
@@ -4541,11 +3913,11 @@ static void valleyview_disable_rps(struct drm_device *dev)
/* we're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
I915_WRITE(GEN6_RC_CONTROL, 0);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
@@ -4633,7 +4005,10 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
&ddcc_status);
if (0 == ret)
dev_priv->rps.efficient_freq =
- (ddcc_status >> 8) & 0xff;
+ clamp_t(u8,
+ ((ddcc_status >> 8) & 0xff),
+ dev_priv->rps.min_freq,
+ dev_priv->rps.max_freq);
}
/* Preserve min/max settings in case of re-init */
@@ -4651,9 +4026,39 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
}
}
+/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static void gen9_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ gen6_init_rps_frequencies(dev);
+
+ I915_WRITE(GEN6_RPNSWREQ, 0xc800000);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ, 0xc800000);
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 0x12060000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 0xe808);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x3bd08);
+ I915_WRITE(GEN6_RP_UP_EI, 0x101d0);
+ I915_WRITE(GEN6_RP_DOWN_EI, 0x55730);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
+ I915_WRITE(GEN6_PMINTRMSK, 0x6);
+ I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_MODE | GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ gen6_enable_rps_interrupts(dev);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void gen9_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t rc6_mask = 0;
int unused;
@@ -4663,7 +4068,7 @@ static void gen9_enable_rps(struct drm_device *dev)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -4677,6 +4082,10 @@ static void gen9_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+ /* 2c: Program Coarse Power Gating Policies. */
+ I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
+ I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
+
/* 3a: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
@@ -4686,7 +4095,10 @@ static void gen9_enable_rps(struct drm_device *dev)
GEN6_RC_CTL_EI_MODE(1) |
rc6_mask);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ /* 3b: Enable Coarse Power Gating only when RC6 is enabled */
+ I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 3 : 0);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4702,7 +4114,7 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 1c & 1d: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -4769,7 +4181,7 @@ static void gen8_enable_rps(struct drm_device *dev)
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void gen6_enable_rps(struct drm_device *dev)
@@ -4797,7 +4209,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Initialize rps frequencies */
gen6_init_rps_frequencies(dev);
@@ -4877,7 +4289,7 @@ static void gen6_enable_rps(struct drm_device *dev)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void __gen6_update_ring_freq(struct drm_device *dev)
@@ -4964,11 +4376,35 @@ void gen6_update_ring_freq(struct drm_device *dev)
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
u32 val, rp0;
- val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
- rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+ if (dev->pdev->revision >= 0x20) {
+ val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
+ switch (INTEL_INFO(dev)->eu_total) {
+ case 8:
+ /* (2 * 4) config */
+ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
+ break;
+ case 12:
+ /* (2 * 6) config */
+ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
+ break;
+ case 16:
+ /* (2 * 8) config */
+ default:
+ /* Setting (2 * 8) Min RP0 for any other combination */
+ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
+ break;
+ }
+ rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
+ } else {
+ /* For pre-production hardware */
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
+ PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+ }
return rp0;
}
@@ -4984,20 +4420,36 @@ static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
u32 val, rp1;
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
-
+ if (dev->pdev->revision >= 0x20) {
+ val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
+ rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
+ } else {
+ /* For pre-production hardware */
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
+ PUNIT_GPU_STATUS_MAX_FREQ_MASK);
+ }
return rp1;
}
static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
u32 val, rpn;
- val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
- rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+ if (dev->pdev->revision >= 0x20) {
+ val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
+ rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
+ FB_GFX_FREQ_FUSE_MASK);
+ } else { /* For pre-production hardware */
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rpn = ((val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) &
+ PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK);
+ }
+
return rpn;
}
@@ -5168,22 +4620,22 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
dev_priv->rps.max_freq);
dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
/* Preserve min/max settings in case of re-init */
@@ -5237,22 +4689,22 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
dev_priv->rps.max_freq);
dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
WARN_ONCE((dev_priv->rps.max_freq |
@@ -5296,7 +4748,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* 1a & 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
/* 2a: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -5307,7 +4762,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ /* TO threshold set to 1750 us ( 0x557 * 1.28 us) */
+ I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
@@ -5321,11 +4777,12 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
- rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
/* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
@@ -5333,14 +4790,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- /* WaDisablePwrmtrEvent:chv (pre-production hw) */
- I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
- I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
-
/* 5: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+ GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
@@ -5355,16 +4808,16 @@ static void cherryview_enable_rps(struct drm_device *dev)
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void valleyview_enable_rps(struct drm_device *dev)
@@ -5385,15 +4838,18 @@ static void valleyview_enable_rps(struct drm_device *dev)
}
/* If VLV, Forcewake all wells, else re-direct to regular path */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ /* Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
@@ -5436,16 +4892,16 @@ static void valleyview_enable_rps(struct drm_device *dev)
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
void ironlake_teardown_rc6(struct drm_device *dev)
@@ -5681,146 +5137,27 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
return ((m * x) / 127) - b;
}
-static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static int _pxvid_to_vd(u8 pxvid)
+{
+ if (pxvid == 0)
+ return 0;
+
+ if (pxvid >= 8 && pxvid < 31)
+ pxvid = 31;
+
+ return (pxvid + 2) * 125;
+}
+
+static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
struct drm_device *dev = dev_priv->dev;
- static const struct v_table {
- u16 vd; /* in .1 mil */
- u16 vm; /* in .1 mil */
- } v_table[] = {
- { 0, 0, },
- { 375, 0, },
- { 500, 0, },
- { 625, 0, },
- { 750, 0, },
- { 875, 0, },
- { 1000, 0, },
- { 1125, 0, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4250, 3125, },
- { 4375, 3250, },
- { 4500, 3375, },
- { 4625, 3500, },
- { 4750, 3625, },
- { 4875, 3750, },
- { 5000, 3875, },
- { 5125, 4000, },
- { 5250, 4125, },
- { 5375, 4250, },
- { 5500, 4375, },
- { 5625, 4500, },
- { 5750, 4625, },
- { 5875, 4750, },
- { 6000, 4875, },
- { 6125, 5000, },
- { 6250, 5125, },
- { 6375, 5250, },
- { 6500, 5375, },
- { 6625, 5500, },
- { 6750, 5625, },
- { 6875, 5750, },
- { 7000, 5875, },
- { 7125, 6000, },
- { 7250, 6125, },
- { 7375, 6250, },
- { 7500, 6375, },
- { 7625, 6500, },
- { 7750, 6625, },
- { 7875, 6750, },
- { 8000, 6875, },
- { 8125, 7000, },
- { 8250, 7125, },
- { 8375, 7250, },
- { 8500, 7375, },
- { 8625, 7500, },
- { 8750, 7625, },
- { 8875, 7750, },
- { 9000, 7875, },
- { 9125, 8000, },
- { 9250, 8125, },
- { 9375, 8250, },
- { 9500, 8375, },
- { 9625, 8500, },
- { 9750, 8625, },
- { 9875, 8750, },
- { 10000, 8875, },
- { 10125, 9000, },
- { 10250, 9125, },
- { 10375, 9250, },
- { 10500, 9375, },
- { 10625, 9500, },
- { 10750, 9625, },
- { 10875, 9750, },
- { 11000, 9875, },
- { 11125, 10000, },
- { 11250, 10125, },
- { 11375, 10250, },
- { 11500, 10375, },
- { 11625, 10500, },
- { 11750, 10625, },
- { 11875, 10750, },
- { 12000, 10875, },
- { 12125, 11000, },
- { 12250, 11125, },
- { 12375, 11250, },
- { 12500, 11375, },
- { 12625, 11500, },
- { 12750, 11625, },
- { 12875, 11750, },
- { 13000, 11875, },
- { 13125, 12000, },
- { 13250, 12125, },
- { 13375, 12250, },
- { 13500, 12375, },
- { 13625, 12500, },
- { 13750, 12625, },
- { 13875, 12750, },
- { 14000, 12875, },
- { 14125, 13000, },
- { 14250, 13125, },
- { 14375, 13250, },
- { 14500, 13375, },
- { 14625, 13500, },
- { 14750, 13625, },
- { 14875, 13750, },
- { 15000, 13875, },
- { 15125, 14000, },
- { 15250, 14125, },
- { 15375, 14250, },
- { 15500, 14375, },
- { 15625, 14500, },
- { 15750, 14625, },
- { 15875, 14750, },
- { 16000, 14875, },
- { 16125, 15000, },
- };
+ const int vd = _pxvid_to_vd(pxvid);
+ const int vm = vd - 1125;
+
if (INTEL_INFO(dev)->is_mobile)
- return v_table[pxvid].vm;
- else
- return v_table[pxvid].vd;
+ return vm > 0 ? vm : 0;
+
+ return vd;
}
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -6272,7 +5609,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
} else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (INTEL_INFO(dev)->gen >= 9) {
+ gen9_enable_rc6(dev);
gen9_enable_rps(dev);
+ __gen6_update_ring_freq(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
__gen6_update_ring_freq(dev);
@@ -6718,6 +6057,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
+ /* WaSampleCChickenBitEnable:hsw */
+ I915_WRITE(HALF_SLICE_CHICKEN3,
+ _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
+
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -6888,6 +6231,17 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
/*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
+
+ /*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
@@ -7051,43 +6405,12 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
-static void intel_init_fbc(struct drm_i915_private *dev_priv)
-{
- if (!HAS_FBC(dev_priv)) {
- dev_priv->fbc.enabled = false;
- return;
- }
-
- if (INTEL_INFO(dev_priv)->gen >= 7) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = gen7_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (INTEL_INFO(dev_priv)->gen >= 5) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = ironlake_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (IS_GM45(dev_priv)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
-
- /* This value was pulled out of someone's hat */
- I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
- }
-
- dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
-}
-
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_init_fbc(dev_priv);
+ intel_fbc_init(dev_priv);
/* For cxsr */
if (IS_PINEVIEW(dev))
@@ -7293,28 +6616,24 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
}
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int ret = -1;
-
if (IS_CHERRYVIEW(dev_priv->dev))
- ret = chv_gpu_freq(dev_priv, val);
+ return chv_gpu_freq(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
- ret = byt_gpu_freq(dev_priv, val);
-
- return ret;
+ return byt_gpu_freq(dev_priv, val);
+ else
+ return val * GT_FREQUENCY_MULTIPLIER;
}
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int ret = -1;
-
if (IS_CHERRYVIEW(dev_priv->dev))
- ret = chv_freq_opcode(dev_priv, val);
+ return chv_freq_opcode(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
- ret = byt_freq_opcode(dev_priv, val);
-
- return ret;
+ return byt_freq_opcode(dev_priv, val);
+ else
+ return val / GT_FREQUENCY_MULTIPLIER;
}
void intel_pm_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 716b8a961eea..b9f40c2e0af7 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -61,14 +61,15 @@ static bool is_edp_psr(struct intel_dp *intel_dp)
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
-bool intel_psr_is_enabled(struct drm_device *dev)
+static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
- if (!HAS_PSR(dev))
- return false;
-
- return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ val = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
@@ -78,8 +79,8 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
- u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config->cpu_transcoder);
+ u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config->cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
@@ -100,7 +101,23 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
POSTING_READ(ctl_reg);
}
-static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
+static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ uint32_t val;
+
+ /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
+ val = I915_READ(VLV_VSCSDP(pipe));
+ val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
+ val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
+ I915_WRITE(VLV_VSCSDP(pipe), val);
+}
+
+static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
@@ -113,14 +130,20 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
-static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE);
+}
+
+static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
+ uint32_t aux_data_reg, aux_ctl_reg;
int precharge = 0x3;
- bool only_standby = false;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
@@ -134,49 +157,96 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
- if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
- only_standby = true;
-
/* Enable PSR in sink */
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
+ if (dev_priv->psr.link_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+ DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+
+ aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
+ DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
+ aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
+ DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
/* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
+ I915_WRITE(aux_data_reg + i,
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
- I915_WRITE(EDP_PSR_AUX_CTL(dev),
+ if (INTEL_INFO(dev)->gen >= 9) {
+ uint32_t val;
+
+ val = I915_READ(aux_ctl_reg);
+ val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
+ val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
+ val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
+ val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+ /* Use hardcoded data values for PSR */
+ val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
+ I915_WRITE(aux_ctl_reg, val);
+ } else {
+ I915_WRITE(aux_ctl_reg,
DP_AUX_CH_CTL_TIME_OUT_400us |
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+ }
}
-static void intel_psr_enable_source(struct intel_dp *intel_dp)
+static void vlv_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
+ I915_WRITE(VLV_PSRCTL(pipe),
+ VLV_EDP_PSR_MODE_SW_TIMER |
+ VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
+ VLV_EDP_PSR_ENABLE);
+}
+
+static void vlv_psr_activate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ /* Let's do the transition from PSR_state 1 to PSR_state 2
+ * that is PSR transition to active - static frame transmission.
+ * Then Hardware is responsible for the transition to PSR_state 3
+ * that is PSR active - no Remote Frame Buffer (RFB) update.
+ */
+ I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
+ VLV_EDP_PSR_ACTIVE_ENTRY);
+}
+
+static void hsw_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
- uint32_t idle_frames = 1;
+ /* Lately it was identified that depending on panel idle frame count
+ * calculated at HW can be off by 1. So let's use what came
+ * from VBT + 1 and at minimum 2 to be on the safe side.
+ */
+ uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
+ dev_priv->vbt.psr.idle_frames + 1 : 2;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- bool only_standby = false;
-
- if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
- only_standby = true;
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
+ if (dev_priv->psr.link_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
- val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
@@ -211,27 +281,24 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- /* Below limitations aren't valid for Broadwell */
- if (IS_BROADWELL(dev))
- goto out;
-
- if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
- S3D_ENABLE) {
+ if (IS_HASWELL(dev) &&
+ I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
+ S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (IS_HASWELL(dev) &&
+ intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
- out:
dev_priv->psr.source_ok = true;
return true;
}
-static void intel_psr_do_enable(struct intel_dp *intel_dp)
+static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -242,7 +309,14 @@ static void intel_psr_do_enable(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
- intel_psr_enable_source(intel_dp);
+ if (HAS_DDI(dev))
+ /* On HSW+ after we enable PSR on source it will activate it
+ * as soon as it match configure idle_frame count. So
+ * we just actually enable it here on activation time.
+ */
+ hsw_psr_enable_source(intel_dp);
+ else
+ vlv_psr_activate(intel_dp);
dev_priv->psr.active = true;
}
@@ -278,39 +352,79 @@ void intel_psr_enable(struct intel_dp *intel_dp)
if (!intel_psr_match_conditions(intel_dp))
goto unlock;
+ /* First we check VBT, but we must respect sink and source
+ * known restrictions */
+ dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+ if ((intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) ||
+ (IS_BROADWELL(dev) && intel_dig_port->port != PORT_A))
+ dev_priv->psr.link_standby = true;
+
dev_priv->psr.busy_frontbuffer_bits = 0;
- intel_psr_setup_vsc(intel_dp);
+ if (HAS_DDI(dev)) {
+ hsw_psr_setup_vsc(intel_dp);
+
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
- /* Avoid continuous PSR exit by masking memup and hpd */
- I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+ /* Enable PSR on the panel */
+ hsw_psr_enable_sink(intel_dp);
- /* Enable PSR on the panel */
- intel_psr_enable_sink(intel_dp);
+ if (INTEL_INFO(dev)->gen >= 9)
+ intel_psr_activate(intel_dp);
+ } else {
+ vlv_psr_setup_vsc(intel_dp);
+
+ /* Enable PSR on the panel */
+ vlv_psr_enable_sink(intel_dp);
+
+ /* On HSW+ enable_source also means go to PSR entry/active
+ * state as soon as idle_frame achieved and here would be
+ * to soon. However on VLV enable_source just enable PSR
+ * but let it on inactive state. So we might do this prior
+ * to active transition, i.e. here.
+ */
+ vlv_psr_enable_source(intel_dp);
+ }
dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-/**
- * intel_psr_disable - Disable PSR
- * @intel_dp: Intel DP
- *
- * This function needs to be called before disabling pipe.
- */
-void intel_psr_disable(struct intel_dp *intel_dp)
+static void vlv_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
+ uint32_t val;
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
+ if (dev_priv->psr.active) {
+ /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
+ if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
+ VLV_EDP_PSR_IN_TRANS) == 0, 1))
+ WARN(1, "PSR transition took longer than expected\n");
+
+ val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
+ val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
+ val &= ~VLV_EDP_PSR_ENABLE;
+ val &= ~VLV_EDP_PSR_MODE_MASK;
+ I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
+
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
}
+}
+
+static void hsw_psr_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
@@ -325,6 +439,30 @@ void intel_psr_disable(struct intel_dp *intel_dp)
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
+}
+
+/**
+ * intel_psr_disable - Disable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function needs to be called before disabling pipe.
+ */
+void intel_psr_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ if (HAS_DDI(dev))
+ hsw_psr_disable(intel_dp);
+ else
+ vlv_psr_disable(intel_dp);
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
@@ -337,18 +475,27 @@ static void intel_psr_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
+ struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
+ if (HAS_DDI(dev_priv->dev)) {
+ if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
+ } else {
+ if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
}
-
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
@@ -363,7 +510,7 @@ static void intel_psr_work(struct work_struct *work)
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
- intel_psr_do_enable(intel_dp);
+ intel_psr_activate(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
@@ -371,17 +518,47 @@ unlock:
static void intel_psr_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp *intel_dp = dev_priv->psr.enabled;
+ struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ u32 val;
- if (dev_priv->psr.active) {
- u32 val = I915_READ(EDP_PSR_CTL(dev));
+ if (!dev_priv->psr.active)
+ return;
+
+ if (HAS_DDI(dev)) {
+ val = I915_READ(EDP_PSR_CTL(dev));
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
dev_priv->psr.active = false;
+ } else {
+ val = I915_READ(VLV_PSRCTL(pipe));
+
+ /* Here we do the transition from PSR_state 3 to PSR_state 5
+ * directly once PSR State 4 that is active with single frame
+ * update can be skipped. PSR_state 5 that is PSR exit then
+ * Hardware is responsible to transition back to PSR_state 1
+ * that is PSR inactive. Same state after
+ * vlv_edp_psr_enable_source.
+ */
+ val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
+ I915_WRITE(VLV_PSRCTL(pipe), val);
+
+ /* Send AUX wake up - Spec says after transitioning to PSR
+ * active we have to send AUX wake up by writing 01h in DPCD
+ * 600h of sink device.
+ * XXX: This might slow down the transition, but without this
+ * HW doesn't complete the transition to PSR_state 1 and we
+ * never get the screen updated.
+ */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
}
+ dev_priv->psr.active = false;
}
/**
@@ -459,6 +636,15 @@ void intel_psr_flush(struct drm_device *dev,
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
intel_psr_exit(dev);
+ /*
+ * On Valleyview and Cherryview we don't use hardware tracking so
+ * any plane updates or cursor moves don't result in a PSR
+ * invalidating. Which means we need to manually fake this in
+ * software for all flushes, not just when we've seen a preceding
+ * invalidation through frontbuffer rendering. */
+ if (!HAS_DDI(dev))
+ intel_psr_exit(dev);
+
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
index 56c1429d8a60..11c8e7b3dd7c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen6_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
index 419e35a7b0ff..655180646152 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen7_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 78011d73fa9f..95288a34c15d 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen8_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
index 875075373807..16a7ec273bd9 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen9.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen9_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c7bc93d28d84..e5b3c6dbd467 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -52,16 +52,27 @@ intel_ring_initialized(struct intel_engine_cs *ring)
int __intel_ring_space(int head, int tail, int size)
{
- int space = head - (tail + I915_RING_FREE_SPACE);
- if (space < 0)
+ int space = head - tail;
+ if (space <= 0)
space += size;
- return space;
+ return space - I915_RING_FREE_SPACE;
+}
+
+void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+{
+ if (ringbuf->last_retired_head != -1) {
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
+ }
+
+ ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
+ ringbuf->tail, ringbuf->size);
}
int intel_ring_space(struct intel_ringbuffer *ringbuf)
{
- return __intel_ring_space(ringbuf->head & HEAD_ADDR,
- ringbuf->tail, ringbuf->size);
+ intel_ring_update_space(ringbuf);
+ return ringbuf->space;
}
bool intel_ring_stopped(struct intel_engine_cs *ring)
@@ -528,7 +539,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
if (!stop_ring(ring)) {
/* G45 ring initialization often fails to reset head to zero */
@@ -592,15 +603,15 @@ static int init_ring_common(struct intel_engine_cs *ring)
goto out;
}
+ ringbuf->last_retired_head = -1;
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = intel_ring_space(ringbuf);
- ringbuf->last_retired_head = -1;
+ intel_ring_update_space(ringbuf);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
out:
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
@@ -627,8 +638,7 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
{
int ret;
- if (ring->scratch.obj)
- return 0;
+ WARN_ON(ring->scratch.obj);
ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (ring->scratch.obj == NULL) {
@@ -672,7 +682,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON(w->count == 0))
+ if (WARN_ON_ONCE(w->count == 0))
return 0;
ring->gpu_caches_dirty = true;
@@ -703,6 +713,22 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
}
+static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ int ret;
+
+ ret = intel_ring_workarounds_emit(ring, ctx);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_render_state_init(ring);
+ if (ret)
+ DRM_ERROR("init render state: %d\n", ret);
+
+ return ret;
+}
+
static int wa_add(struct drm_i915_private *dev_priv,
const u32 addr, const u32 mask, const u32 val)
{
@@ -762,11 +788,24 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
+ /* WaForceEnableNonCoherent:bdw */
+ /* WaHdcDisableFetchWhenMasked:bdw */
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
+ * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
+ * polygons in the same 8x4 pixel/sample area to be processed without
+ * stalling waiting for the earlier ones to write to Hierarchical Z
+ * buffer."
+ *
+ * This optimization is off by default for Broadwell; turn it on.
+ */
+ WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
/* Wa4x4STCOptimizationDisable:bdw */
WA_SET_BIT_MASKED(CACHE_MODE_1,
GEN8_4x4_STC_OPTIMIZATION_DISABLE);
@@ -807,6 +846,30 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
HDC_FORCE_NON_COHERENT |
HDC_DONOT_FETCH_MEM_WHEN_MASKED);
+ /* According to the CACHE_MODE_0 default value documentation, some
+ * CHV platforms disable this optimization by default. Turn it on.
+ */
+ WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* Wa4x4STCOptimizationDisable:chv */
+ WA_SET_BIT_MASKED(CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+ /* Improve HiZ throughput on CHV. */
+ WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
+
return 0;
}
@@ -861,12 +924,6 @@ static int init_render_ring(struct intel_engine_cs *ring)
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (INTEL_INFO(dev)->gen >= 5) {
- ret = intel_init_pipe_control(ring);
- if (ret)
- return ret;
- }
-
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -918,17 +975,20 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
return ret;
for_each_ring(waiter, dev_priv, i) {
+ u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
+ seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_FLUSH_ENABLE);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
@@ -956,16 +1016,19 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
return ret;
for_each_ring(waiter, dev_priv, i) {
+ u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
+ seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0);
@@ -994,9 +1057,11 @@ static int gen6_signal(struct intel_engine_cs *signaller,
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
+ u32 seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
}
}
@@ -1031,7 +1096,8 @@ gen6_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1149,7 +1215,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1168,7 +1235,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0);
__intel_ring_advance(ring);
@@ -1408,7 +1476,8 @@ i9xx_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1789,15 +1858,15 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *ring)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf;
int ret;
- if (ringbuf == NULL) {
- ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
- if (!ringbuf)
- return -ENOMEM;
- ring->buffer = ringbuf;
- }
+ WARN_ON(ring->buffer);
+
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
@@ -1820,21 +1889,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- if (ringbuf->obj == NULL) {
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
- ring->name, ret);
- goto error;
- }
+ WARN_ON(ringbuf->obj);
- ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
- ring->name, ret);
- intel_destroy_ringbuffer_obj(ringbuf);
- goto error;
- }
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+ ring->name, ret);
+ goto error;
+ }
+
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+ ring->name, ret);
+ intel_destroy_ringbuffer_obj(ringbuf);
+ goto error;
}
/* Workaround an erratum on the i830 which causes a hang if
@@ -1849,10 +1918,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto error;
- ret = ring->init(ring);
- if (ret)
- goto error;
-
return 0;
error:
@@ -1877,8 +1942,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
- ring->preallocated_lazy_request = NULL;
- ring->outstanding_lazy_seqno = 0;
+ i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -1895,38 +1959,27 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
- u32 seqno = 0;
int ret;
- if (ringbuf->last_retired_head != -1) {
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
-
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= n)
- return 0;
- }
+ if (intel_ring_space(ringbuf) >= n)
+ return 0;
list_for_each_entry(request, &ring->request_list, list) {
- if (__intel_ring_space(request->tail, ringbuf->tail,
+ if (__intel_ring_space(request->postfix, ringbuf->tail,
ringbuf->size) >= n) {
- seqno = request->seqno;
break;
}
}
- if (seqno == 0)
+ if (&request->list == &ring->request_list)
return -ENOSPC;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(request);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring);
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
- ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
@@ -1952,14 +2005,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
+ ret = 0;
trace_i915_ring_wait_begin(ring);
do {
+ if (intel_ring_space(ringbuf) >= n)
+ break;
ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= n) {
- ret = 0;
+ if (intel_ring_space(ringbuf) >= n)
break;
- }
msleep(1);
@@ -2000,19 +2053,19 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = intel_ring_space(ringbuf);
+ intel_ring_update_space(ringbuf);
return 0;
}
int intel_ring_idle(struct intel_engine_cs *ring)
{
- u32 seqno;
+ struct drm_i915_gem_request *req;
int ret;
/* We need to add any requests required to flush the objects and ring */
- if (ring->outstanding_lazy_seqno) {
- ret = i915_add_request(ring, NULL);
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring);
if (ret)
return ret;
}
@@ -2021,30 +2074,39 @@ int intel_ring_idle(struct intel_engine_cs *ring)
if (list_empty(&ring->request_list))
return 0;
- seqno = list_entry(ring->request_list.prev,
+ req = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
- list)->seqno;
+ list);
- return i915_wait_seqno(ring, seqno);
+ return i915_wait_request(req);
}
static int
-intel_ring_alloc_seqno(struct intel_engine_cs *ring)
+intel_ring_alloc_request(struct intel_engine_cs *ring)
{
- if (ring->outstanding_lazy_seqno)
+ int ret;
+ struct drm_i915_gem_request *request;
+ struct drm_i915_private *dev_private = ring->dev->dev_private;
+
+ if (ring->outstanding_lazy_request)
return 0;
- if (ring->preallocated_lazy_request == NULL) {
- struct drm_i915_gem_request *request;
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
- request = kmalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
+ kref_init(&request->ref);
+ request->ring = ring;
+ request->uniq = dev_private->request_uniq++;
- ring->preallocated_lazy_request = request;
+ ret = i915_gem_get_seqno(ring->dev, &request->seqno);
+ if (ret) {
+ kfree(request);
+ return ret;
}
- return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+ ring->outstanding_lazy_request = request;
+ return 0;
}
static int __intel_ring_prepare(struct intel_engine_cs *ring,
@@ -2084,7 +2146,7 @@ int intel_ring_begin(struct intel_engine_cs *ring,
return ret;
/* Preallocate the olr before touching the ring */
- ret = intel_ring_alloc_seqno(ring);
+ ret = intel_ring_alloc_request(ring);
if (ret)
return ret;
@@ -2119,7 +2181,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- BUG_ON(ring->outstanding_lazy_seqno);
+ BUG_ON(ring->outstanding_lazy_request);
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -2178,6 +2240,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1;
+
+ /* We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
/*
* Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
@@ -2185,8 +2255,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_GPU_DOMAINS)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
- MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) {
@@ -2282,6 +2352,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1;
+
+ /* We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
@@ -2289,8 +2367,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_DOMAIN_RENDER)
- cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
- MI_FLUSH_DW_OP_STOREDW;
+ cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) {
@@ -2341,7 +2418,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
}
}
- ring->init_context = intel_ring_workarounds_emit;
+ ring->init_context = intel_rcs_ctx_init;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
@@ -2426,7 +2503,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
- ring->init = init_render_ring;
+ ring->init_hw = init_render_ring;
ring->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
@@ -2448,7 +2525,17 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
- return intel_init_ring_buffer(dev, ring);
+ ret = intel_init_ring_buffer(dev, ring);
+ if (ret)
+ return ret;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = intel_init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
@@ -2519,7 +2606,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2558,7 +2645,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2615,7 +2702,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2666,7 +2753,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index fe426cff598b..714f3fdd57d2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -99,13 +99,6 @@ struct intel_ringbuffer {
struct intel_engine_cs *ring;
- /*
- * FIXME: This backpointer is an artifact of the history of how the
- * execlist patches came into being. It will get removed once the basic
- * code has landed.
- */
- struct intel_context *FIXME_lrc_ctx;
-
u32 head;
u32 tail;
int space;
@@ -123,6 +116,8 @@ struct intel_ringbuffer {
u32 last_retired_head;
};
+struct intel_context;
+
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
@@ -142,11 +137,11 @@ struct intel_engine_cs {
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- u32 trace_irq_seqno;
+ struct drm_i915_gem_request *trace_irq_req;
bool __must_check (*irq_get)(struct intel_engine_cs *ring);
void (*irq_put)(struct intel_engine_cs *ring);
- int (*init)(struct intel_engine_cs *ring);
+ int (*init_hw)(struct intel_engine_cs *ring);
int (*init_context)(struct intel_engine_cs *ring,
struct intel_context *ctx);
@@ -239,11 +234,14 @@ struct intel_engine_cs {
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
- int (*emit_request)(struct intel_ringbuffer *ringbuf);
+ int (*emit_request)(struct intel_ringbuffer *ringbuf,
+ struct drm_i915_gem_request *request);
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u32 invalidate_domains,
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u64 offset, unsigned flags);
/**
@@ -251,7 +249,7 @@ struct intel_engine_cs {
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
+ * flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
@@ -267,8 +265,7 @@ struct intel_engine_cs {
/**
* Do we have some not yet emitted requests outstanding?
*/
- struct drm_i915_gem_request *preallocated_lazy_request;
- u32 outstanding_lazy_seqno;
+ struct drm_i915_gem_request *outstanding_lazy_request;
bool gpu_caches_dirty;
bool fbc_dirty;
@@ -408,6 +405,7 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring)
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
+void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
@@ -436,16 +434,11 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
return ringbuf->tail;
}
-static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
-{
- BUG_ON(ring->outstanding_lazy_seqno == 0);
- return ring->outstanding_lazy_seqno;
-}
-
-static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
+static inline struct drm_i915_gem_request *
+intel_ring_get_request(struct intel_engine_cs *ring)
{
- if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
- ring->trace_irq_seqno = seqno;
+ BUG_ON(ring->outstanding_lazy_request == NULL);
+ return ring->outstanding_lazy_request;
}
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ac6da7102fbb..49695d7d51e3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -31,7 +31,6 @@
#include "i915_drv.h"
#include "intel_drv.h"
-#include <drm/i915_powerwell.h>
/**
* DOC: runtime pm
@@ -50,8 +49,6 @@
* present for a given platform.
*/
-static struct i915_power_domains *hsw_pwr;
-
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
i < (power_domains)->power_well_count && \
@@ -118,7 +115,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
}
/**
- * intel_display_power_is_enabled - unlocked check for a power domain
+ * intel_display_power_is_enabled - check for a power domain
* @dev_priv: i915 device instance
* @domain: power domain to check
*
@@ -706,6 +703,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -727,24 +728,30 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_PIPE_A_POWER_DOMAINS ( \
@@ -764,20 +771,25 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
@@ -1071,10 +1083,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_HASWELL(dev_priv->dev)) {
set_power_wells(power_domains, hsw_power_wells);
- hsw_pwr = power_domains;
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
- hsw_pwr = power_domains;
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -1118,8 +1128,6 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_display_set_init_power(dev_priv, true);
-
- hsw_pwr = NULL;
}
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
@@ -1328,52 +1336,3 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
pm_runtime_put_autosuspend(device);
}
-/* Display audio driver power well request */
-int i915_request_power_well(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- return 0;
-}
-EXPORT_SYMBOL_GPL(i915_request_power_well);
-
-/* Display audio driver power well release */
-int i915_release_power_well(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
- return 0;
-}
-EXPORT_SYMBOL_GPL(i915_release_power_well);
-
-/*
- * Private interface for the audio driver to get CDCLK in kHz.
- *
- * Caller must request power well using i915_request_power_well() prior to
- * making the call.
- */
-int i915_get_cdclk_freq(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
-
- return intel_ddi_get_cdclk_freq(dev_priv);
-}
-EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6d7a277458b5..64ad2b40179f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -1007,7 +1008,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
}
if (intel_sdvo->rgb_quant_range_selectable) {
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
@@ -1085,7 +1086,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
return true;
}
-static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
{
unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
@@ -1112,11 +1113,11 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
}
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
- struct drm_display_mode *mode = &pipe_config->requested_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->base.mode;
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
@@ -1181,8 +1182,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &crtc->config.adjusted_mode;
- struct drm_display_mode *mode = &crtc->config.requested_mode;
+ &crtc->config->base.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1224,7 +1225,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (crtc->config.has_hdmi_sink) {
+ if (crtc->config->has_hdmi_sink) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
@@ -1244,7 +1245,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
- switch (crtc->config.pixel_multiplier) {
+ switch (crtc->config->pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1259,7 +1260,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev) && crtc->config.limited_color_range)
+ if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
@@ -1289,7 +1290,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (crtc->config.pixel_multiplier - 1)
+ sdvox |= (crtc->config->pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
@@ -1338,7 +1339,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
}
static void intel_sdvo_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1370,7 +1371,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
/*
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
@@ -1392,7 +1393,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
@@ -1617,6 +1618,9 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
struct drm_device *dev = intel_sdvo->base.base.dev;
uint16_t hotplug;
+ if (!I915_HAS_HOTPLUG(dev))
+ return 0;
+
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
@@ -2187,7 +2191,9 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_sdvo_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 01d841ea3140..693ce8281970 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -75,26 +75,26 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
return 0;
}
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
{
u32 val = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
return val;
}
-void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRWRDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -103,7 +103,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRRDDA_NP, reg, &val);
return val;
@@ -111,7 +111,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRWRDA_NP, reg, &val);
}
@@ -122,7 +122,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
@@ -132,56 +132,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRWRDA_NP, reg, &val);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7d9c340f7693..0a52c44ad03d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -33,6 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
+#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -79,7 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
- const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
@@ -255,7 +256,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
default:
BUG();
}
- if (intel_plane->rotation == BIT(DRM_ROTATE_180))
+ if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
plane_ctl |= PLANE_CTL_ENABLE;
@@ -412,8 +413,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- u32 start_vbl_count;
- bool atomic_update;
sprctl = I915_READ(SPCNTR(pipe, plane));
@@ -494,7 +493,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
- if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
x += src_w;
@@ -502,8 +501,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
-
intel_update_primary_plane(intel_crtc);
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
@@ -525,9 +522,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
sprsurf_offset);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void
@@ -539,10 +533,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
- u32 start_vbl_count;
- bool atomic_update;
-
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
intel_update_primary_plane(intel_crtc);
@@ -553,9 +543,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
-
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
@@ -626,8 +613,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- u32 start_vbl_count;
- bool atomic_update;
sprctl = I915_READ(SPRCTL(pipe));
@@ -699,7 +684,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
- if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
/* HSW and BDW does this automagically in hardware */
@@ -711,8 +696,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
}
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
-
intel_update_primary_plane(intel_crtc);
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -735,9 +718,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void
@@ -748,10 +728,6 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
- u32 start_vbl_count;
- bool atomic_update;
-
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
intel_update_primary_plane(intel_crtc);
@@ -764,16 +740,12 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
-
/*
* Avoid underruns when disabling the sprite.
* FIXME remove once watermark updates are done properly.
*/
- intel_wait_for_vblank(dev, pipe);
-
- intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
}
static int
@@ -846,8 +818,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- u32 start_vbl_count;
- bool atomic_update;
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -914,7 +884,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
- if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
x += src_w;
@@ -922,8 +892,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
-
intel_update_primary_plane(intel_crtc);
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -941,9 +909,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void
@@ -954,10 +919,6 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
- u32 start_vbl_count;
- bool atomic_update;
-
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
intel_update_primary_plane(intel_crtc);
@@ -969,19 +930,25 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
-
/*
* Avoid underruns when disabling the sprite.
* FIXME remove once watermark updates are done properly.
*/
- intel_wait_for_vblank(dev, pipe);
-
- intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
}
-static void
+/**
+ * intel_post_enable_primary - Perform operations after enabling primary plane
+ * @crtc: the CRTC whose primary plane was just enabled
+ *
+ * Performs potentially sleeping operations that must be done after the primary
+ * plane is enabled, such as updating FBC and IPS. Note that this may be
+ * called due to an explicit primary plane update, or due to an implicit
+ * re-enable that is caused when a sprite plane is updated to no longer
+ * completely hide the primary plane.
+ */
+void
intel_post_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -1004,11 +971,21 @@ intel_post_enable_primary(struct drm_crtc *crtc)
hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
-static void
+/**
+ * intel_pre_disable_primary - Perform operations before disabling primary plane
+ * @crtc: the CRTC whose primary plane is to be disabled
+ *
+ * Performs potentially sleeping operations that must be done before the
+ * primary plane is enabled, such as updating FBC and IPS. Note that this may
+ * be called due to an explicit primary plane update, or due to an implicit
+ * disable that is caused when a sprite plane completely hides the primary
+ * plane.
+ */
+void
intel_pre_disable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -1017,7 +994,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
if (dev_priv->fbc.plane == intel_crtc->plane)
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
mutex_unlock(&dev->struct_mutex);
/*
@@ -1096,20 +1073,26 @@ static int
intel_check_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
struct drm_rect *src = &state->src;
struct drm_rect *dst = &state->dst;
- struct drm_rect *orig_src = &state->orig_src;
const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
- int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ int pixel_size;
+
+ intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
+
+ if (!fb) {
+ state->visible = false;
+ goto finish;
+ }
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
@@ -1142,7 +1125,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
- intel_plane->rotation);
+ state->base.rotation);
hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(hscale < 0);
@@ -1183,13 +1166,13 @@ intel_check_sprite_plane(struct drm_plane *plane,
drm_rect_height(dst) * vscale - drm_rect_height(src));
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
- intel_plane->rotation);
+ state->base.rotation);
/* sanity check to make sure the src viewport wasn't enlarged */
- WARN_ON(src->x1 < (int) orig_src->x1 ||
- src->y1 < (int) orig_src->y1 ||
- src->x2 > (int) orig_src->x2 ||
- src->y2 > (int) orig_src->y2);
+ WARN_ON(src->x1 < (int) state->base.src_x ||
+ src->y1 < (int) state->base.src_y ||
+ src->x2 > (int) state->base.src_x + state->base.src_w ||
+ src->y2 > (int) state->base.src_y + state->base.src_h);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -1232,6 +1215,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
if (src_w < 3 || src_h < 3)
state->visible = false;
+ pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
width_bytes = ((src_x * pixel_size) & 63) +
src_w * pixel_size;
@@ -1254,39 +1238,27 @@ intel_check_sprite_plane(struct drm_plane *plane,
dst->y1 = crtc_y;
dst->y2 = crtc_y + crtc_h;
- return 0;
-}
+finish:
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ state->hides_primary = fb != NULL && drm_rect_equals(dst, clip) &&
+ !colorkey_enabled(intel_plane);
+ WARN_ON(state->hides_primary && !state->visible && intel_crtc->active);
-static int
-intel_prepare_sprite_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct drm_crtc *crtc = state->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *fb = state->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_plane->obj;
- int ret;
+ if (intel_crtc->active) {
+ if (intel_crtc->primary_enabled == state->hides_primary)
+ intel_crtc->atomic.wait_for_flips = true;
- if (old_obj != obj) {
- mutex_lock(&dev->struct_mutex);
+ if (intel_crtc->primary_enabled && state->hides_primary)
+ intel_crtc->atomic.pre_disable_primary = true;
- /* Note that this will apply the VT-d workaround for scanouts,
- * which is more restrictive than required for sprites. (The
- * primary plane requires 256KiB alignment with 64 PTE padding,
- * the sprite planes only require 128KiB alignment and 32 PTE
- * padding.
- */
- ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
- if (ret == 0)
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_SPRITE(pipe));
- mutex_unlock(&dev->struct_mutex);
- if (ret)
- return ret;
+ intel_crtc->atomic.fb_bits |=
+ INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
+
+ if (!intel_crtc->primary_enabled && !state->hides_primary)
+ intel_crtc->atomic.post_enable_primary = true;
}
return 0;
@@ -1296,48 +1268,23 @@ static void
intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
- struct drm_crtc *crtc = state->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_crtc *crtc = state->base.crtc;
+ struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_plane->obj;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
- struct drm_rect *dst = &state->dst;
- const struct drm_rect *clip = &state->clip;
- bool primary_enabled;
- /*
- * If the sprite is completely covering the primary plane,
- * we can disable the primary and save power.
- */
- primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
- WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
-
- intel_plane->crtc_x = state->orig_dst.x1;
- intel_plane->crtc_y = state->orig_dst.y1;
- intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
- intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
- intel_plane->src_x = state->orig_src.x1;
- intel_plane->src_y = state->orig_src.y1;
- intel_plane->src_w = drm_rect_width(&state->orig_src);
- intel_plane->src_h = drm_rect_height(&state->orig_src);
+ crtc = crtc ? crtc : plane->crtc;
+ intel_crtc = to_intel_crtc(crtc);
+
+ plane->fb = state->base.fb;
intel_plane->obj = obj;
if (intel_crtc->active) {
- bool primary_was_enabled = intel_crtc->primary_enabled;
-
- intel_crtc->primary_enabled = primary_enabled;
-
- if (primary_was_enabled != primary_enabled)
- intel_crtc_wait_for_pending_flips(crtc);
-
- if (primary_was_enabled && !primary_enabled)
- intel_pre_disable_primary(crtc);
+ intel_crtc->primary_enabled = !state->hides_primary;
if (state->visible) {
crtc_x = state->dst.x1;
@@ -1354,129 +1301,9 @@ intel_commit_sprite_plane(struct drm_plane *plane,
} else {
intel_plane->disable_plane(plane, crtc);
}
-
-
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
-
- if (!primary_was_enabled && primary_enabled)
- intel_post_enable_primary(crtc);
- }
-
- /* Unpin old obj after new one is active to avoid ugliness */
- if (old_obj && old_obj != obj) {
-
- /*
- * It's fairly common to simply update the position of
- * an existing object. In that case, we don't need to
- * wait for vblank to avoid ugliness, we only need to
- * do the pin & ref bookkeeping.
- */
- if (intel_crtc->active)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- mutex_unlock(&dev->struct_mutex);
}
}
-static int
-intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct intel_plane_state state;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int ret;
-
- state.crtc = crtc;
- state.fb = fb;
-
- /* sample coordinates in 16.16 fixed point */
- state.src.x1 = src_x;
- state.src.x2 = src_x + src_w;
- state.src.y1 = src_y;
- state.src.y2 = src_y + src_h;
-
- /* integer pixels */
- state.dst.x1 = crtc_x;
- state.dst.x2 = crtc_x + crtc_w;
- state.dst.y1 = crtc_y;
- state.dst.y2 = crtc_y + crtc_h;
-
- state.clip.x1 = 0;
- state.clip.y1 = 0;
- state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
- state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
- state.orig_src = state.src;
- state.orig_dst = state.dst;
-
- ret = intel_check_sprite_plane(plane, &state);
- if (ret)
- return ret;
-
- ret = intel_prepare_sprite_plane(plane, &state);
- if (ret)
- return ret;
-
- intel_commit_sprite_plane(plane, &state);
- return 0;
-}
-
-static int
-intel_disable_plane(struct drm_plane *plane)
-{
- struct drm_device *dev = plane->dev;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_crtc *intel_crtc;
- enum pipe pipe;
-
- if (!plane->fb)
- return 0;
-
- if (WARN_ON(!plane->crtc))
- return -EINVAL;
-
- intel_crtc = to_intel_crtc(plane->crtc);
- pipe = intel_crtc->pipe;
-
- if (intel_crtc->active) {
- bool primary_was_enabled = intel_crtc->primary_enabled;
-
- intel_crtc->primary_enabled = true;
-
- intel_plane->disable_plane(plane, plane->crtc);
-
- if (!primary_was_enabled && intel_crtc->primary_enabled)
- intel_post_enable_primary(plane->crtc);
- }
-
- if (intel_plane->obj) {
- if (intel_crtc->active)
- intel_wait_for_vblank(dev, intel_plane->pipe);
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(intel_plane->obj);
- i915_gem_track_fb(intel_plane->obj, NULL,
- INTEL_FRONTBUFFER_SPRITE(pipe));
- mutex_unlock(&dev->struct_mutex);
-
- intel_plane->obj = NULL;
- }
-
- return 0;
-}
-
-static void intel_destroy_plane(struct drm_plane *plane)
-{
- struct intel_plane *intel_plane = to_intel_plane(plane);
- intel_disable_plane(plane);
- drm_plane_cleanup(plane);
- kfree(intel_plane);
-}
-
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1535,62 +1362,18 @@ out_unlock:
return ret;
}
-int intel_plane_set_property(struct drm_plane *plane,
- struct drm_property *prop,
- uint64_t val)
-{
- struct drm_device *dev = plane->dev;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- uint64_t old_val;
- int ret = -ENOENT;
-
- if (prop == dev->mode_config.rotation_property) {
- /* exactly one rotation angle please */
- if (hweight32(val & 0xf) != 1)
- return -EINVAL;
-
- if (intel_plane->rotation == val)
- return 0;
-
- old_val = intel_plane->rotation;
- intel_plane->rotation = val;
- ret = intel_plane_restore(plane);
- if (ret)
- intel_plane->rotation = old_val;
- }
-
- return ret;
-}
-
int intel_plane_restore(struct drm_plane *plane)
{
- struct intel_plane *intel_plane = to_intel_plane(plane);
-
if (!plane->crtc || !plane->fb)
return 0;
return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
- intel_plane->crtc_x, intel_plane->crtc_y,
- intel_plane->crtc_w, intel_plane->crtc_h,
- intel_plane->src_x, intel_plane->src_y,
- intel_plane->src_w, intel_plane->src_h);
-}
-
-void intel_plane_disable(struct drm_plane *plane)
-{
- if (!plane->crtc || !plane->fb)
- return;
-
- intel_disable_plane(plane);
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h,
+ plane->state->src_x, plane->state->src_y,
+ plane->state->src_w, plane->state->src_h);
}
-static const struct drm_plane_funcs intel_plane_funcs = {
- .update_plane = intel_update_plane,
- .disable_plane = intel_disable_plane,
- .destroy = intel_destroy_plane,
- .set_property = intel_plane_set_property,
-};
-
static uint32_t ilk_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
@@ -1638,6 +1421,7 @@ int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
struct intel_plane *intel_plane;
+ struct intel_plane_state *state;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
int num_plane_formats;
@@ -1650,6 +1434,13 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (!intel_plane)
return -ENOMEM;
+ state = intel_create_plane_state(&intel_plane->base);
+ if (!state) {
+ kfree(intel_plane);
+ return -ENOMEM;
+ }
+ intel_plane->base.state = &state->base;
+
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
@@ -1719,7 +1510,8 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->pipe = pipe;
intel_plane->plane = plane;
- intel_plane->rotation = BIT(DRM_ROTATE_0);
+ intel_plane->check_plane = intel_check_sprite_plane;
+ intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
@@ -1739,7 +1531,9 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (dev->mode_config.rotation_property)
drm_object_attach_property(&intel_plane->base.base,
dev->mode_config.rotation_property,
- intel_plane->rotation);
+ state->base.rotation);
+
+ drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
out:
return ret;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6f5f59b880f5..892d23c8479d 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -31,6 +31,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -908,14 +909,14 @@ intel_tv_mode_valid(struct drm_connector *connector,
static void
intel_tv_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static bool
intel_tv_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -923,12 +924,12 @@ intel_tv_compute_config(struct intel_encoder *encoder,
if (!tv_mode)
return false;
- pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
+ pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
/* TV has it's own notion of sync and other mode flags, so clear them. */
- pipe_config->adjusted_mode.flags = 0;
+ pipe_config->base.adjusted_mode.flags = 0;
/*
* FIXME: We don't check whether the input mode is actually what we want
@@ -1512,7 +1513,9 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 46de8d75b4bf..c47a3baa53d5 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -24,6 +24,8 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include <linux/pm_runtime.h>
+
#define FORCEWAKE_ACK_TIMEOUT_MS 2
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
@@ -40,6 +42,26 @@
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+static const char * const forcewake_domain_names[] = {
+ "render",
+ "blitter",
+ "media",
+};
+
+const char *
+intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
+{
+ BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
+ FW_DOMAIN_ID_COUNT);
+
+ if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
+ return forcewake_domain_names[id];
+
+ WARN_ON(id);
+
+ return "unknown";
+}
+
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
@@ -47,73 +69,128 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv)
"Device suspended\n");
}
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+static inline void
+fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
{
- /* w/a for a sporadic read returning 0 by waiting for the GT
- * thread to wake up.
- */
- if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
- GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
- DRM_ERROR("GT thread status wait timed out\n");
+ WARN_ON(d->reg_set == 0);
+ __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
}
-static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+static inline void
+fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(dev_priv, FORCEWAKE, 0);
- /* something from same cacheline, but !FORCEWAKE */
- __raw_posting_read(dev_priv, ECOBUS);
+ mod_timer_pinned(&d->timer, jiffies + 1);
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
- int fw_engine)
+static inline void
+fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
+ if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+ DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
+ intel_uncore_forcewake_domain_to_str(d->id));
+}
- __raw_i915_write32(dev_priv, FORCEWAKE, 1);
- /* something from same cacheline, but !FORCEWAKE */
- __raw_posting_read(dev_priv, ECOBUS);
+static inline void
+fw_domain_get(const struct intel_uncore_forcewake_domain *d)
+{
+ __raw_i915_write32(d->i915, d->reg_set, d->val_set);
+}
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
+static inline void
+fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
+{
+ if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+ DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
+ intel_uncore_forcewake_domain_to_str(d->id));
+}
- /* WaRsForcewakeWaitTC0:snb */
- __gen6_gt_wait_for_thread_c0(dev_priv);
+static inline void
+fw_domain_put(const struct intel_uncore_forcewake_domain *d)
+{
+ __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
}
-static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+static inline void
+fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_MT */
- __raw_posting_read(dev_priv, ECOBUS);
+ /* something from same cacheline, but not from the set register */
+ if (d->reg_post)
+ __raw_posting_read(d->i915, d->reg_post);
}
-static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
- int fw_engine)
+static void
+fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
- u32 forcewake_ack;
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
- if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
- forcewake_ack = FORCEWAKE_ACK_HSW;
- else
- forcewake_ack = FORCEWAKE_MT_ACK;
+ for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+ fw_domain_wait_ack_clear(d);
+ fw_domain_get(d);
+ fw_domain_wait_ack(d);
+ }
+}
- if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+static void
+fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
- __raw_i915_write32(dev_priv, FORCEWAKE_MT,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- __raw_posting_read(dev_priv, ECOBUS);
+ for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+ fw_domain_put(d);
+ fw_domain_posting_read(d);
+ }
+}
- if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+static void
+fw_domains_posting_read(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
- /* WaRsForcewakeWaitTC0:ivb,hsw */
+ /* No need to do for all, just do for first found */
+ for_each_fw_domain(d, dev_priv, id) {
+ fw_domain_posting_read(d);
+ break;
+ }
+}
+
+static void
+fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
+
+ if (dev_priv->uncore.fw_domains == 0)
+ return;
+
+ for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
+ fw_domain_reset(d);
+
+ fw_domains_posting_read(dev_priv);
+}
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
+ */
+ if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
+ GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ fw_domains_get(dev_priv, fw_domains);
+
+ /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@@ -126,27 +203,13 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
- int fw_engine)
+static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
- __raw_i915_write32(dev_priv, FORCEWAKE, 0);
- /* something from same cacheline, but !FORCEWAKE */
- __raw_posting_read(dev_priv, ECOBUS);
+ fw_domains_put(dev_priv, fw_domains);
gen6_gt_check_fifodbg(dev_priv);
}
-static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
- int fw_engine)
-{
- __raw_i915_write32(dev_priv, FORCEWAKE_MT,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- __raw_posting_read(dev_priv, ECOBUS);
-
- if (IS_GEN7(dev_priv->dev))
- gen6_gt_check_fifodbg(dev_priv);
-}
-
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
@@ -174,332 +237,78 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
-static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_DISABLE(0xffff));
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_VLV */
- __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
-}
-
-static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
- int fw_engine)
-{
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_VLV) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Render to ack.\n");
- }
-
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for media to ack.\n");
- }
-}
-
-static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
- int fw_engine)
-{
-
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
-
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
- /* something from same cacheline, but !FORCEWAKE_VLV */
- __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
- if (!IS_CHERRYVIEW(dev_priv->dev))
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+static void intel_uncore_fw_release_timer(unsigned long arg)
{
+ struct intel_uncore_forcewake_domain *domain = (void *)arg;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (fw_engine & FORCEWAKE_RENDER &&
- dev_priv->uncore.fw_rendercount++ != 0)
- fw_engine &= ~FORCEWAKE_RENDER;
- if (fw_engine & FORCEWAKE_MEDIA &&
- dev_priv->uncore.fw_mediacount++ != 0)
- fw_engine &= ~FORCEWAKE_MEDIA;
-
- if (fw_engine)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (fw_engine & FORCEWAKE_RENDER) {
- WARN_ON(!dev_priv->uncore.fw_rendercount);
- if (--dev_priv->uncore.fw_rendercount != 0)
- fw_engine &= ~FORCEWAKE_RENDER;
- }
-
- if (fw_engine & FORCEWAKE_MEDIA) {
- WARN_ON(!dev_priv->uncore.fw_mediacount);
- if (--dev_priv->uncore.fw_mediacount != 0)
- fw_engine &= ~FORCEWAKE_MEDIA;
- }
-
- if (fw_engine)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
-{
- __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
- _MASKED_BIT_DISABLE(0xffff));
-
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
- _MASKED_BIT_DISABLE(0xffff));
-
- __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
- _MASKED_BIT_DISABLE(0xffff));
-}
-
-static void
-__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
-{
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_RENDER_GEN9) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_RENDER_GEN9) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Render to ack.\n");
- }
+ assert_device_not_suspended(domain->i915);
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_GEN9) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_GEN9) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Media to ack.\n");
- }
+ spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
+ if (WARN_ON(domain->wake_count == 0))
+ domain->wake_count++;
- /* Check for Blitter Engine */
- if (FORCEWAKE_BLITTER & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_BLITTER_GEN9) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_BLITTER_GEN9) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
- }
-}
+ if (--domain->wake_count == 0)
+ domain->i915->uncore.funcs.force_wake_put(domain->i915,
+ 1 << domain->id);
-static void
-__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-{
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
- /* Check for Blitter Engine */
- if (FORCEWAKE_BLITTER & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
}
-static void
-gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ struct intel_uncore_forcewake_domain *domain;
+ int retry_count = 100;
+ enum forcewake_domain_id id;
+ enum forcewake_domains fw = 0, active_domains;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (FORCEWAKE_RENDER & fw_engine) {
- if (dev_priv->uncore.fw_rendercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_RENDER);
- }
-
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (dev_priv->uncore.fw_mediacount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_MEDIA);
- }
+ /* Hold uncore.lock across reset to prevent any register access
+ * with forcewake not set correctly. Wait until all pending
+ * timers are run before holding.
+ */
+ while (1) {
+ active_domains = 0;
- if (FORCEWAKE_BLITTER & fw_engine) {
- if (dev_priv->uncore.fw_blittercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_BLITTER);
- }
+ for_each_fw_domain(domain, dev_priv, id) {
+ if (del_timer_sync(&domain->timer) == 0)
+ continue;
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
+ intel_uncore_fw_release_timer((unsigned long)domain);
+ }
-static void
-gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-{
- unsigned long irqflags;
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ for_each_fw_domain(domain, dev_priv, id) {
+ if (timer_pending(&domain->timer))
+ active_domains |= (1 << id);
+ }
- if (FORCEWAKE_RENDER & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_rendercount == 0);
- if (--dev_priv->uncore.fw_rendercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_RENDER);
- }
+ if (active_domains == 0)
+ break;
- if (FORCEWAKE_MEDIA & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_mediacount == 0);
- if (--dev_priv->uncore.fw_mediacount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_MEDIA);
- }
+ if (--retry_count == 0) {
+ DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
+ break;
+ }
- if (FORCEWAKE_BLITTER & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_blittercount == 0);
- if (--dev_priv->uncore.fw_blittercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_BLITTER);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ cond_resched();
}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void gen6_force_wake_timer(unsigned long arg)
-{
- struct drm_i915_private *dev_priv = (void *)arg;
- unsigned long irqflags;
-
- assert_device_not_suspended(dev_priv);
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- WARN_ON(!dev_priv->uncore.forcewake_count);
-
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
- intel_runtime_pm_put(dev_priv);
-}
-
-void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
-
- if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
- gen6_force_wake_timer((unsigned long)dev_priv);
-
- /* Hold uncore.lock across reset to prevent any register access
- * with forcewake not set correctly
- */
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ WARN_ON(active_domains);
- if (IS_VALLEYVIEW(dev))
- vlv_force_wake_reset(dev_priv);
- else if (IS_GEN6(dev) || IS_GEN7(dev))
- __gen6_gt_force_wake_reset(dev_priv);
+ for_each_fw_domain(domain, dev_priv, id)
+ if (domain->wake_count)
+ fw |= 1 << id;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
- __gen7_gt_force_wake_mt_reset(dev_priv);
+ if (fw)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
- if (IS_GEN9(dev))
- __gen9_gt_force_wake_mt_reset(dev_priv);
+ fw_domains_reset(dev_priv, FORCEWAKE_ALL);
if (restore) { /* If reset with a user forcewake, try to restore */
- unsigned fw = 0;
-
- if (IS_VALLEYVIEW(dev)) {
- if (dev_priv->uncore.fw_rendercount)
- fw |= FORCEWAKE_RENDER;
-
- if (dev_priv->uncore.fw_mediacount)
- fw |= FORCEWAKE_MEDIA;
- } else if (IS_GEN9(dev)) {
- if (dev_priv->uncore.fw_rendercount)
- fw |= FORCEWAKE_RENDER;
-
- if (dev_priv->uncore.fw_mediacount)
- fw |= FORCEWAKE_MEDIA;
-
- if (dev_priv->uncore.fw_blittercount)
- fw |= FORCEWAKE_BLITTER;
- } else {
- if (dev_priv->uncore.forcewake_count)
- fw = FORCEWAKE_ALL;
- }
-
if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
@@ -509,17 +318,16 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
GT_FIFO_FREE_ENTRIES_MASK;
}
+ if (!restore)
+ assert_forcewakes_inactive(dev_priv);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void __intel_uncore_early_sanitize(struct drm_device *dev,
- bool restore_forcewake)
+static void intel_uncore_ellc_detect(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
@@ -530,6 +338,15 @@ static void __intel_uncore_early_sanitize(struct drm_device *dev,
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
+}
+
+static void __intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
/* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev))
@@ -551,81 +368,92 @@ void intel_uncore_sanitize(struct drm_device *dev)
intel_disable_gt_powersave(dev);
}
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
+/**
+ * intel_uncore_forcewake_get - grab forcewake domain references
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to get reference on
+ *
+ * This function can be used get GT's forcewake domain references.
+ * Normal register access will handle the forcewake domains automatically.
+ * However if some sequence requires the GT to not power down a particular
+ * forcewake domains this function should be called at the beginning of the
+ * sequence. And subsequently the reference should be dropped by symmetric
+ * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
+ * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
*/
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
unsigned long irqflags;
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- intel_runtime_pm_get(dev_priv);
+ WARN_ON(dev_priv->pm.suspended);
- /* Redirect to Gen9 specific routine */
- if (IS_GEN9(dev_priv->dev))
- return gen9_force_wake_get(dev_priv, fw_engine);
-
- /* Redirect to VLV specific routine */
- if (IS_VALLEYVIEW(dev_priv->dev))
- return vlv_force_wake_get(dev_priv, fw_engine);
+ fw_domains &= dev_priv->uncore.fw_domains;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ if (domain->wake_count++)
+ fw_domains &= ~(1 << id);
+ }
+
+ if (fw_domains)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-/*
- * see gen6_gt_force_wake_get()
+/**
+ * intel_uncore_forcewake_put - release a forcewake domain reference
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to put references
+ *
+ * This function drops the device-level forcewakes for specified
+ * domains obtained by intel_uncore_forcewake_get().
*/
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
unsigned long irqflags;
- bool delayed = false;
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_put)
return;
- /* Redirect to Gen9 specific routine */
- if (IS_GEN9(dev_priv->dev)) {
- gen9_force_wake_put(dev_priv, fw_engine);
- goto out;
- }
+ fw_domains &= dev_priv->uncore.fw_domains;
- /* Redirect to VLV specific routine */
- if (IS_VALLEYVIEW(dev_priv->dev)) {
- vlv_force_wake_put(dev_priv, fw_engine);
- goto out;
- }
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ if (WARN_ON(domain->wake_count == 0))
+ continue;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- WARN_ON(!dev_priv->uncore.forcewake_count);
+ if (--domain->wake_count)
+ continue;
- if (--dev_priv->uncore.forcewake_count == 0) {
- dev_priv->uncore.forcewake_count++;
- delayed = true;
- mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
- jiffies + 1);
+ domain->wake_count++;
+ fw_domain_arm_timer(domain);
}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-out:
- if (!delayed)
- intel_runtime_pm_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
+void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
+
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- WARN_ON(dev_priv->uncore.forcewake_count > 0);
+ for_each_fw_domain(domain, dev_priv, id)
+ WARN_ON(domain->wake_count);
}
/* We give fast paths for the really cool registers */
@@ -647,9 +475,9 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0x5200, 0x8000) || \
REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0xB000, 0xC000) || \
+ REG_RANGE((reg), 0xB000, 0xB480) || \
REG_RANGE((reg), 0xE000, 0xE800))
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
@@ -658,17 +486,14 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x12000, 0x14000) || \
REG_RANGE((reg), 0x1A000, 0x1C000) || \
REG_RANGE((reg), 0x1E800, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x40000))
+ REG_RANGE((reg), 0x30000, 0x38000))
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x4000, 0x5000) || \
REG_RANGE((reg), 0x8000, 0x8300) || \
REG_RANGE((reg), 0x8500, 0x8600) || \
REG_RANGE((reg), 0x9000, 0xB000) || \
- REG_RANGE((reg), 0xC000, 0xC800) || \
- REG_RANGE((reg), 0xF000, 0x10000) || \
- REG_RANGE((reg), 0x14000, 0x14400) || \
- REG_RANGE((reg), 0x22000, 0x24000))
+ REG_RANGE((reg), 0xF000, 0x10000))
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0xB00, 0x2000)
@@ -740,96 +565,118 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
}
}
-#define REG_READ_HEADER(x) \
- unsigned long irqflags; \
+#define GEN2_READ_HEADER(x) \
u##x val = 0; \
- assert_device_not_suspended(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+ assert_device_not_suspended(dev_priv);
-#define REG_READ_FOOTER \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+#define GEN2_READ_FOOTER \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-#define __gen4_read(x) \
+#define __gen2_read(x) \
static u##x \
-gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
+gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ GEN2_READ_HEADER(x); \
val = __raw_i915_read##x(dev_priv, reg); \
- REG_READ_FOOTER; \
+ GEN2_READ_FOOTER; \
}
#define __gen5_read(x) \
static u##x \
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
+ GEN2_READ_HEADER(x); \
ilk_dummy_write(dev_priv); \
val = __raw_i915_read##x(dev_priv, reg); \
- REG_READ_FOOTER; \
+ GEN2_READ_FOOTER; \
+}
+
+__gen5_read(8)
+__gen5_read(16)
+__gen5_read(32)
+__gen5_read(64)
+__gen2_read(8)
+__gen2_read(16)
+__gen2_read(32)
+__gen2_read(64)
+
+#undef __gen5_read
+#undef __gen2_read
+
+#undef GEN2_READ_FOOTER
+#undef GEN2_READ_HEADER
+
+#define GEN6_READ_HEADER(x) \
+ unsigned long irqflags; \
+ u##x val = 0; \
+ assert_device_not_suspended(dev_priv); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define GEN6_READ_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val
+
+static inline void __force_wake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
+
+ if (WARN_ON(!fw_domains))
+ return;
+
+ /* Ideally GCC would be constant-fold and eliminate this loop */
+ for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ if (domain->wake_count) {
+ fw_domains &= ~(1 << id);
+ continue;
+ }
+
+ domain->wake_count++;
+ fw_domain_arm_timer(domain);
+ }
+
+ if (fw_domains)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
+ GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
- if (dev_priv->uncore.forcewake_count == 0 && \
- NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- FORCEWAKE_ALL); \
- val = __raw_i915_read##x(dev_priv, reg); \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- FORCEWAKE_ALL); \
- } else { \
- val = __raw_i915_read##x(dev_priv, reg); \
- } \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ val = __raw_i915_read##x(dev_priv, reg); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
- REG_READ_FOOTER; \
+ GEN6_READ_FOOTER; \
}
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- unsigned fwengine = 0; \
- REG_READ_HEADER(x); \
- if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ GEN6_READ_HEADER(x); \
+ if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
val = __raw_i915_read##x(dev_priv, reg); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- REG_READ_FOOTER; \
+ GEN6_READ_FOOTER; \
}
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- unsigned fwengine = 0; \
- REG_READ_HEADER(x); \
- if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ GEN6_READ_HEADER(x); \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
+ else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, \
+ FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
val = __raw_i915_read##x(dev_priv, reg); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- REG_READ_FOOTER; \
+ GEN6_READ_FOOTER; \
}
#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
@@ -838,33 +685,22 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
- if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- val = __raw_i915_read##x(dev_priv, reg); \
- } else { \
- unsigned fwengine = 0; \
- if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } else { \
- if (dev_priv->uncore.fw_blittercount == 0) \
- fwengine = FORCEWAKE_BLITTER; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- } \
- REG_READ_FOOTER; \
+ enum forcewake_domains fw_engine; \
+ GEN6_READ_HEADER(x); \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \
+ fw_engine = 0; \
+ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ else \
+ fw_engine = FORCEWAKE_BLITTER; \
+ if (fw_engine) \
+ __force_wake_get(dev_priv, fw_engine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ GEN6_READ_FOOTER; \
}
__gen9_read(8)
@@ -883,55 +719,66 @@ __gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
-__gen5_read(8)
-__gen5_read(16)
-__gen5_read(32)
-__gen5_read(64)
-__gen4_read(8)
-__gen4_read(16)
-__gen4_read(32)
-__gen4_read(64)
#undef __gen9_read
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
-#undef __gen5_read
-#undef __gen4_read
-#undef REG_READ_FOOTER
-#undef REG_READ_HEADER
+#undef GEN6_READ_FOOTER
+#undef GEN6_READ_HEADER
-#define REG_WRITE_HEADER \
- unsigned long irqflags; \
+#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_device_not_suspended(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-#define REG_WRITE_FOOTER \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+#define GEN2_WRITE_FOOTER
-#define __gen4_write(x) \
+#define __gen2_write(x) \
static void \
-gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- REG_WRITE_HEADER; \
+gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ GEN2_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
- REG_WRITE_FOOTER; \
+ GEN2_WRITE_FOOTER; \
}
#define __gen5_write(x) \
static void \
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- REG_WRITE_HEADER; \
+ GEN2_WRITE_HEADER; \
ilk_dummy_write(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
- REG_WRITE_FOOTER; \
+ GEN2_WRITE_FOOTER; \
}
+__gen5_write(8)
+__gen5_write(16)
+__gen5_write(32)
+__gen5_write(64)
+__gen2_write(8)
+__gen2_write(16)
+__gen2_write(32)
+__gen2_write(64)
+
+#undef __gen5_write
+#undef __gen2_write
+
+#undef GEN2_WRITE_FOOTER
+#undef GEN2_WRITE_HEADER
+
+#define GEN6_WRITE_HEADER \
+ unsigned long irqflags; \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ assert_device_not_suspended(dev_priv); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define GEN6_WRITE_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+
#define __gen6_write(x) \
static void \
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -939,14 +786,14 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
#define __hsw_write(x) \
static void \
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -957,7 +804,7 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
static const u32 gen8_shadowed_regs[] = {
@@ -984,50 +831,31 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
- if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
- if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- FORCEWAKE_ALL); \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- FORCEWAKE_ALL); \
- } else { \
- __raw_i915_write##x(dev_priv, reg, val); \
- } \
+ if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ __raw_i915_write##x(dev_priv, reg, val); \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- unsigned fwengine = 0; \
bool shadowed = is_gen8_shadowed(dev_priv, reg); \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
if (!shadowed) { \
- if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
+ else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
} \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
__raw_i915_write##x(dev_priv, reg, val); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
static const u32 gen9_shadowed_regs[] = {
@@ -1057,36 +885,23 @@ static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
static void \
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
bool trace) { \
- REG_WRITE_HEADER; \
- if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
- is_gen9_shadowed(dev_priv, reg)) { \
- __raw_i915_write##x(dev_priv, reg, val); \
- } else { \
- unsigned fwengine = 0; \
- if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } else { \
- if (dev_priv->uncore.fw_blittercount == 0) \
- fwengine = FORCEWAKE_BLITTER; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- fwengine); \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- fwengine); \
- } \
- REG_WRITE_FOOTER; \
+ enum forcewake_domains fw_engine; \
+ GEN6_WRITE_HEADER; \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
+ is_gen9_shadowed(dev_priv, reg)) \
+ fw_engine = 0; \
+ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ else \
+ fw_engine = FORCEWAKE_BLITTER; \
+ if (fw_engine) \
+ __force_wake_get(dev_priv, fw_engine); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ GEN6_WRITE_FOOTER; \
}
__gen9_write(8)
@@ -1109,24 +924,14 @@ __gen6_write(8)
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
-__gen5_write(8)
-__gen5_write(16)
-__gen5_write(32)
-__gen5_write(64)
-__gen4_write(8)
-__gen4_write(16)
-__gen4_write(32)
-__gen4_write(64)
#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
-#undef __gen5_write
-#undef __gen4_write
-#undef REG_WRITE_FOOTER
-#undef REG_WRITE_HEADER
+#undef GEN6_WRITE_FOOTER
+#undef GEN6_WRITE_HEADER
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
@@ -1144,24 +949,86 @@ do { \
dev_priv->uncore.funcs.mmio_readq = x##_read64; \
} while (0)
-void intel_uncore_init(struct drm_device *dev)
+
+static void fw_domain_init(struct drm_i915_private *dev_priv,
+ enum forcewake_domain_id domain_id,
+ u32 reg_set, u32 reg_ack)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore_forcewake_domain *d;
- setup_timer(&dev_priv->uncore.force_wake_timer,
- gen6_force_wake_timer, (unsigned long)dev_priv);
+ if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
+ return;
- __intel_uncore_early_sanitize(dev, false);
+ d = &dev_priv->uncore.fw_domain[domain_id];
+
+ WARN_ON(d->wake_count);
+
+ d->wake_count = 0;
+ d->reg_set = reg_set;
+ d->reg_ack = reg_ack;
+
+ if (IS_GEN6(dev_priv)) {
+ d->val_reset = 0;
+ d->val_set = FORCEWAKE_KERNEL;
+ d->val_clear = 0;
+ } else {
+ d->val_reset = _MASKED_BIT_DISABLE(0xffff);
+ d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
+ d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
+ }
+
+ if (IS_VALLEYVIEW(dev_priv))
+ d->reg_post = FORCEWAKE_ACK_VLV;
+ else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
+ d->reg_post = ECOBUS;
+ else
+ d->reg_post = 0;
+
+ d->i915 = dev_priv;
+ d->id = domain_id;
+
+ setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
+
+ dev_priv->uncore.fw_domains |= (1 << domain_id);
+
+ fw_domain_reset(d);
+}
+
+static void intel_uncore_fw_domains_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev_priv->dev)->gen <= 5)
+ return;
if (IS_GEN9(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
+ dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_RENDER_GEN9,
+ FORCEWAKE_ACK_RENDER_GEN9);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ FORCEWAKE_BLITTER_GEN9,
+ FORCEWAKE_ACK_BLITTER_GEN9);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
+ dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ if (!IS_CHERRYVIEW(dev))
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+ else
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_thread_status;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
@@ -1174,35 +1041,54 @@ void intel_uncore_init(struct drm_device *dev)
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_thread_status;
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+
+ /* We need to init first for ECOBUS access and then
+ * determine later if we want to reinit, in case of MT access is
+ * not working
+ */
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+
mutex_lock(&dev->struct_mutex);
- __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
+ fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
+ fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- dev_priv->uncore.funcs.force_wake_get =
- __gen7_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen7_gt_force_wake_mt_put;
- } else {
+ if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
- dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE, FORCEWAKE_ACK);
}
} else if (IS_GEN6(dev)) {
dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_get;
+ fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_put;
+ fw_domains_put_with_fifo;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE, FORCEWAKE_ACK);
}
+ /* All future platforms are expected to require complex power gating */
+ WARN_ON(dev_priv->uncore.fw_domains == 0);
+}
+
+void intel_uncore_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_uncore_ellc_detect(dev);
+ intel_uncore_fw_domains_init(dev);
+ __intel_uncore_early_sanitize(dev, false);
+
switch (INTEL_INFO(dev)->gen) {
default:
- WARN_ON(1);
+ MISSING_CASE(INTEL_INFO(dev)->gen);
return;
case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
@@ -1239,8 +1125,8 @@ void intel_uncore_init(struct drm_device *dev)
case 4:
case 3:
case 2:
- ASSIGN_WRITE_MMIO_VFUNCS(gen4);
- ASSIGN_READ_MMIO_VFUNCS(gen4);
+ ASSIGN_WRITE_MMIO_VFUNCS(gen2);
+ ASSIGN_READ_MMIO_VFUNCS(gen2);
break;
}
@@ -1300,7 +1186,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
reg->val = I915_READ8(reg->offset);
break;
default:
- WARN_ON(1);
+ MISSING_CASE(entry->size);
ret = -EINVAL;
goto out;
}