aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c75
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c352
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c242
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c178
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h392
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c545
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c115
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c194
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1068
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h160
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c335
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c22
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c294
-rw-r--r--drivers/gpu/drm/i915/i915_params.c19
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h199
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c215
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c78
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h107
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c552
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c264
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h91
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c16
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c111
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1788
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c459
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c38
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h129
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h39
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c184
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c32
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c29
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c208
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h12
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c6
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1286
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c351
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c266
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c25
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c476
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c69
57 files changed, 6729 insertions, 4430 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index f01922591679..a69002e2257d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,6 +28,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
i915_gem.o \
+ i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
@@ -83,9 +84,11 @@ i915-y += dvo_ch7017.o \
intel_sdvo.o \
intel_tv.o
+# virtual gpu code
+i915-y += i915_vgpu.o
+
# legacy horrors
-i915-y += i915_dma.o \
- i915_ums.o
+i915-y += i915_dma.o
obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 806e812340d0..61ae8ff4eaed 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -818,23 +818,28 @@ static bool valid_reg(const u32 *table, int count, u32 addr)
return false;
}
-static u32 *vmap_batch(struct drm_i915_gem_object *obj)
+static u32 *vmap_batch(struct drm_i915_gem_object *obj,
+ unsigned start, unsigned len)
{
int i;
void *addr = NULL;
struct sg_page_iter sg_iter;
+ int first_page = start >> PAGE_SHIFT;
+ int last_page = (len + start + 4095) >> PAGE_SHIFT;
+ int npages = last_page - first_page;
struct page **pages;
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+ pages = drm_malloc_ab(npages, sizeof(*pages));
if (pages == NULL) {
DRM_DEBUG_DRIVER("Failed to get space for pages\n");
goto finish;
}
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- pages[i] = sg_page_iter_page(&sg_iter);
- i++;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
+ pages[i++] = sg_page_iter_page(&sg_iter);
+ if (i == npages)
+ break;
}
addr = vmap(pages, i, 0, PAGE_KERNEL);
@@ -855,61 +860,61 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
u32 batch_start_offset,
u32 batch_len)
{
- int ret = 0;
int needs_clflush = 0;
- u32 *src_base, *dest_base = NULL;
- u32 *src_addr, *dest_addr;
- u32 offset = batch_start_offset / sizeof(*dest_addr);
- u32 end = batch_start_offset + batch_len;
+ void *src_base, *src;
+ void *dst = NULL;
+ int ret;
- if (end > dest_obj->base.size || end > src_obj->base.size)
+ if (batch_len > dest_obj->base.size ||
+ batch_len + batch_start_offset > src_obj->base.size)
return ERR_PTR(-E2BIG);
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) {
- DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+ DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
return ERR_PTR(ret);
}
- src_base = vmap_batch(src_obj);
+ src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
if (!src_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
ret = -ENOMEM;
goto unpin_src;
}
- src_addr = src_base + offset;
-
- if (needs_clflush)
- drm_clflush_virt_range((char *)src_addr, batch_len);
+ ret = i915_gem_object_get_pages(dest_obj);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
+ goto unmap_src;
+ }
+ i915_gem_object_pin_pages(dest_obj);
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) {
- DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
+ DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
goto unmap_src;
}
- dest_base = vmap_batch(dest_obj);
- if (!dest_base) {
+ dst = vmap_batch(dest_obj, 0, batch_len);
+ if (!dst) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
+ i915_gem_object_unpin_pages(dest_obj);
ret = -ENOMEM;
goto unmap_src;
}
- dest_addr = dest_base + offset;
-
- if (batch_start_offset != 0)
- memset((u8 *)dest_base, 0, batch_start_offset);
+ src = src_base + offset_in_page(batch_start_offset);
+ if (needs_clflush)
+ drm_clflush_virt_range(src, batch_len);
- memcpy(dest_addr, src_addr, batch_len);
- memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
+ memcpy(dst, src, batch_len);
unmap_src:
vunmap(src_base);
unpin_src:
i915_gem_object_unpin_pages(src_obj);
- return ret ? ERR_PTR(ret) : dest_base;
+ return ret ? ERR_PTR(ret) : dst;
}
/**
@@ -1046,34 +1051,26 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
u32 batch_len,
bool is_master)
{
- int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
-
- ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
- return -1;
- }
+ int ret = 0;
batch_base = copy_batch(shadow_batch_obj, batch_obj,
batch_start_offset, batch_len);
if (IS_ERR(batch_base)) {
DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
- i915_gem_object_ggtt_unpin(shadow_batch_obj);
return PTR_ERR(batch_base);
}
- cmd = batch_base + (batch_start_offset / sizeof(*cmd));
-
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
- batch_end = cmd + (batch_len / sizeof(*batch_end));
+ batch_end = batch_base + (batch_len / sizeof(*batch_end));
+ cmd = batch_base;
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;
u32 length;
@@ -1132,7 +1129,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
}
vunmap(batch_base);
- i915_gem_object_ggtt_unpin(shadow_batch_obj);
+ i915_gem_object_unpin_pages(shadow_batch_obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e8b18e542da4..007c7d7d8295 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -139,10 +139,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (vma->pin_count > 0)
pin_count++;
- seq_printf(m, " (pinned x %d)", pin_count);
+ }
+ seq_printf(m, " (pinned x %d)", pin_count);
if (obj->pin_display)
seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -580,7 +581,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank,
work->flip_ready_vblank,
- drm_vblank_count(dev, crtc->pipe));
+ drm_crtc_vblank_count(&crtc->base));
if (work->enable_stall_check)
seq_puts(m, "Stall check enabled, ");
else
@@ -1089,7 +1090,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
- IS_BROADWELL(dev)) {
+ IS_BROADWELL(dev) || IS_GEN9(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1108,11 +1109,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
- reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- reqf >>= 24;
- else
- reqf >>= 25;
+ if (IS_GEN9(dev))
+ reqf >>= 23;
+ else {
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ }
reqf = intel_gpu_freq(dev_priv, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
@@ -1126,7 +1131,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1152,7 +1159,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & 0xff00) >> 8);
+ (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -1177,19 +1184,25 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
GEN6_CURBSYTAVG_MASK);
max_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = rp_state_cap & 0xff;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+
+ seq_printf(m, "Idle freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts;
@@ -1204,6 +1217,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "min GPU freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ seq_printf(m, "idle GPU freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
@@ -1778,11 +1794,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel,
+ fb->base.modifier[0],
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
@@ -1793,11 +1810,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
if (ifbdev && &fb->base == ifbdev->helper.fb)
continue;
- seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel,
+ fb->base.modifier[0],
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
@@ -1828,18 +1846,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- if (dev_priv->ips.pwrctx) {
- seq_puts(m, "power context ");
- describe_obj(m, dev_priv->ips.pwrctx);
- seq_putc(m, '\n');
- }
-
- if (dev_priv->ips.renderctx) {
- seq_puts(m, "render context ");
- describe_obj(m, dev_priv->ips.renderctx);
- seq_putc(m, '\n');
- }
-
list_for_each_entry(ctx, &dev_priv->context_list, link) {
if (!i915.enable_execlists &&
ctx->legacy_hw_ctx.rcs_state == NULL)
@@ -2183,7 +2189,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
seq_puts(m, "aliasing PPGTT:\n");
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
ppgtt->debug_dump(ppgtt, m);
}
@@ -2243,6 +2249,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enum pipe pipe;
bool enabled = false;
+ if (!HAS_PSR(dev)) {
+ seq_puts(m, "PSR not supported\n");
+ return 0;
+ }
+
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->psr.lock);
@@ -2255,17 +2266,15 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- if (HAS_PSR(dev)) {
- if (HAS_DDI(dev))
- enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- else {
- for_each_pipe(dev_priv, pipe) {
- stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- enabled = true;
- }
+ if (HAS_DDI(dev))
+ enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ else {
+ for_each_pipe(dev_priv, pipe) {
+ stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ enabled = true;
}
}
seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
@@ -2282,7 +2291,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
yesno((bool)dev_priv->psr.link_standby));
/* CHV PSR has no kind of performance counter */
- if (HAS_PSR(dev) && HAS_DDI(dev)) {
+ if (HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
@@ -2305,8 +2314,7 @@ static int i915_sink_crc(struct seq_file *m, void *data)
u8 crc[6];
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.dpms != DRM_MODE_DPMS_ON)
continue;
@@ -2674,7 +2682,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
active = cursor_position(dev, crtc->pipe, &x, &y);
seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base),
- x, y, crtc->cursor_width, crtc->cursor_height,
+ x, y, crtc->base.cursor->state->crtc_w,
+ crtc->base.cursor->state->crtc_h,
crtc->cursor_addr, yesno(active));
}
@@ -2850,7 +2859,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
for_each_pipe(dev_priv, pipe) {
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
entry = &ddb->plane[pipe][plane];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
entry->start, entry->end,
@@ -2867,6 +2876,115 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
return 0;
}
+static void drrs_status_per_crtc(struct seq_file *m,
+ struct drm_device *dev, struct intel_crtc *intel_crtc)
+{
+ struct intel_encoder *intel_encoder;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_drrs *drrs = &dev_priv->drrs;
+ int vrefresh = 0;
+
+ for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
+ /* Encoder connected on this CRTC */
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ seq_puts(m, "eDP:\n");
+ break;
+ case INTEL_OUTPUT_DSI:
+ seq_puts(m, "DSI:\n");
+ break;
+ case INTEL_OUTPUT_HDMI:
+ seq_puts(m, "HDMI:\n");
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ seq_puts(m, "DP:\n");
+ break;
+ default:
+ seq_printf(m, "Other encoder (id=%d).\n",
+ intel_encoder->type);
+ return;
+ }
+ }
+
+ if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
+ seq_puts(m, "\tVBT: DRRS_type: Static");
+ else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
+ seq_puts(m, "\tVBT: DRRS_type: Seamless");
+ else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
+ seq_puts(m, "\tVBT: DRRS_type: None");
+ else
+ seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
+
+ seq_puts(m, "\n\n");
+
+ if (intel_crtc->config->has_drrs) {
+ struct intel_panel *panel;
+
+ mutex_lock(&drrs->mutex);
+ /* DRRS Supported */
+ seq_puts(m, "\tDRRS Supported: Yes\n");
+
+ /* disable_drrs() will make drrs->dp NULL */
+ if (!drrs->dp) {
+ seq_puts(m, "Idleness DRRS: Disabled");
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+
+ panel = &drrs->dp->attached_connector->panel;
+ seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
+ drrs->busy_frontbuffer_bits);
+
+ seq_puts(m, "\n\t\t");
+ if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
+ seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
+ vrefresh = panel->fixed_mode->vrefresh;
+ } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
+ seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
+ vrefresh = panel->downclock_mode->vrefresh;
+ } else {
+ seq_printf(m, "DRRS_State: Unknown(%d)\n",
+ drrs->refresh_rate_type);
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+ seq_printf(m, "\t\tVrefresh: %d", vrefresh);
+
+ seq_puts(m, "\n\t\t");
+ mutex_unlock(&drrs->mutex);
+ } else {
+ /* DRRS not supported. Print the VBT parameter*/
+ seq_puts(m, "\tDRRS Supported : No");
+ }
+ seq_puts(m, "\n");
+}
+
+static int i915_drrs_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct intel_crtc *intel_crtc;
+ int active_crtc_cnt = 0;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ drm_modeset_lock(&intel_crtc->base.mutex, NULL);
+
+ if (intel_crtc->active) {
+ active_crtc_cnt++;
+ seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
+
+ drrs_status_per_crtc(m, dev, intel_crtc);
+ }
+
+ drm_modeset_unlock(&intel_crtc->base.mutex);
+ }
+
+ if (!active_crtc_cnt)
+ seq_puts(m, "No active crtc found\n");
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -4189,7 +4307,7 @@ i915_max_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rp_state_cap, hw_max, hw_min;
+ u32 hw_max, hw_min;
int ret;
if (INTEL_INFO(dev)->gen < 6)
@@ -4206,18 +4324,10 @@ i915_max_freq_set(void *data, u64 val)
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- if (IS_VALLEYVIEW(dev)) {
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
- } else {
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- hw_max = dev_priv->rps.max_freq;
- hw_min = (rp_state_cap >> 16) & 0xff;
- }
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4226,10 +4336,7 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val;
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4267,7 +4374,7 @@ i915_min_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rp_state_cap, hw_max, hw_min;
+ u32 hw_max, hw_min;
int ret;
if (INTEL_INFO(dev)->gen < 6)
@@ -4284,18 +4391,10 @@ i915_min_freq_set(void *data, u64 val)
/*
* Turbo will still be enabled, but won't go below the set value.
*/
- if (IS_VALLEYVIEW(dev)) {
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
- } else {
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- hw_max = dev_priv->rps.max_freq;
- hw_min = (rp_state_cap >> 16) & 0xff;
- }
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4304,10 +4403,7 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val;
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4374,6 +4470,112 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
+static int i915_sseu_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
+
+ if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
+ return -ENODEV;
+
+ seq_puts(m, "SSEU Device Info\n");
+ seq_printf(m, " Available Slice Total: %u\n",
+ INTEL_INFO(dev)->slice_total);
+ seq_printf(m, " Available Subslice Total: %u\n",
+ INTEL_INFO(dev)->subslice_total);
+ seq_printf(m, " Available Subslice Per Slice: %u\n",
+ INTEL_INFO(dev)->subslice_per_slice);
+ seq_printf(m, " Available EU Total: %u\n",
+ INTEL_INFO(dev)->eu_total);
+ seq_printf(m, " Available EU Per Subslice: %u\n",
+ INTEL_INFO(dev)->eu_per_subslice);
+ seq_printf(m, " Has Slice Power Gating: %s\n",
+ yesno(INTEL_INFO(dev)->has_slice_pg));
+ seq_printf(m, " Has Subslice Power Gating: %s\n",
+ yesno(INTEL_INFO(dev)->has_subslice_pg));
+ seq_printf(m, " Has EU Power Gating: %s\n",
+ yesno(INTEL_INFO(dev)->has_eu_pg));
+
+ seq_puts(m, "SSEU Device Status\n");
+ if (IS_CHERRYVIEW(dev)) {
+ const int ss_max = 2;
+ int ss;
+ u32 sig1[ss_max], sig2[ss_max];
+
+ sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
+ sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
+ sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
+ sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
+
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ if (sig1[ss] & CHV_SS_PG_ENABLE)
+ /* skip disabled subslice */
+ continue;
+
+ s_tot = 1;
+ ss_per++;
+ eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+ ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+ eu_tot += eu_cnt;
+ eu_per = max(eu_per, eu_cnt);
+ }
+ ss_tot = ss_per;
+ } else if (IS_SKYLAKE(dev)) {
+ const int s_max = 3, ss_max = 4;
+ int s, ss;
+ u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
+
+ s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
+ s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
+ s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
+ eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
+ eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
+ eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
+ eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
+ eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
+ eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < s_max; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ s_tot++;
+ ss_per = INTEL_INFO(dev)->subslice_per_slice;
+ ss_tot += ss_per;
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
+ eu_mask[ss%2]);
+ eu_tot += eu_cnt;
+ eu_per = max(eu_per, eu_cnt);
+ }
+ }
+ }
+ seq_printf(m, " Enabled Slice Total: %u\n", s_tot);
+ seq_printf(m, " Enabled Subslice Total: %u\n", ss_tot);
+ seq_printf(m, " Enabled Subslice Per Slice: %u\n", ss_per);
+ seq_printf(m, " Enabled EU Total: %u\n", eu_tot);
+ seq_printf(m, " Enabled EU Per Subslice: %u\n", eu_per);
+
+ return 0;
+}
+
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
@@ -4487,6 +4689,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_ddb_info", i915_ddb_info, 0},
+ {"i915_sseu_status", i915_sseu_status, 0},
+ {"i915_drrs_status", i915_drrs_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1a46787129e7..68e0c85a17cf 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -36,6 +36,7 @@
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "i915_trace.h"
#include <linux/pci.h>
#include <linux/console.h>
@@ -67,6 +68,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_CHIPSET_ID:
value = dev->pdev->device;
break;
+ case I915_PARAM_REVISION:
+ value = dev->pdev->revision;
+ break;
case I915_PARAM_HAS_GEM:
value = 1;
break;
@@ -149,6 +153,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_MMAP_VERSION:
value = 1;
break;
+ case I915_PARAM_SUBSLICE_TOTAL:
+ value = INTEL_INFO(dev)->subslice_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_EU_TOTAL:
+ value = INTEL_INFO(dev)->eu_total;
+ if (!value)
+ return -ENODEV;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -605,16 +619,128 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
}
}
+ /* Initialize slice/subslice/EU info */
if (IS_CHERRYVIEW(dev)) {
- u32 fuse, mask_eu;
+ u32 fuse, eu_dis;
fuse = I915_READ(CHV_FUSE_GT);
- mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
- CHV_FGT_EU_DIS_SS0_R1_MASK |
- CHV_FGT_EU_DIS_SS1_R0_MASK |
- CHV_FGT_EU_DIS_SS1_R1_MASK);
- info->eu_total = 16 - hweight32(mask_eu);
+
+ info->slice_total = 1;
+
+ if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+ CHV_FGT_EU_DIS_SS0_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+ CHV_FGT_EU_DIS_SS1_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ info->subslice_total = info->subslice_per_slice;
+ /*
+ * CHV expected to always have a uniform distribution of EU
+ * across subslices.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ info->eu_total / info->subslice_total :
+ 0;
+ /*
+ * CHV supports subslice power gating on devices with more than
+ * one subslice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ info->has_slice_pg = 0;
+ info->has_subslice_pg = (info->subslice_total > 1);
+ info->has_eu_pg = (info->eu_per_subslice > 2);
+ } else if (IS_SKYLAKE(dev)) {
+ const int s_max = 3, ss_max = 4, eu_max = 8;
+ int s, ss;
+ u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+ fuse2 = I915_READ(GEN8_FUSE2);
+ s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
+ GEN8_F2_S_ENA_SHIFT;
+ ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
+ GEN9_F2_SS_DIS_SHIFT;
+
+ eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
+ eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
+ eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
+
+ info->slice_total = hweight32(s_enable);
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ info->subslice_per_slice = ss_max - hweight32(ss_disable);
+ info->subslice_total = info->slice_total *
+ info->subslice_per_slice;
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < s_max; s++) {
+ if (!(s_enable & (0x1 << s)))
+ /* skip disabled slice */
+ continue;
+
+ for (ss = 0; ss < ss_max; ss++) {
+ u32 n_disabled;
+
+ if (ss_disable & (0x1 << ss))
+ /* skip disabled subslice */
+ continue;
+
+ n_disabled = hweight8(eu_disable[s] >>
+ (ss * eu_max));
+
+ /*
+ * Record which subslice(s) has(have) 7 EUs. we
+ * can tune the hash used to spread work among
+ * subslices if they are unbalanced.
+ */
+ if (eu_max - n_disabled == 7)
+ info->subslice_7eu[s] |= 1 << ss;
+
+ info->eu_total += eu_max - n_disabled;
+ }
+ }
+
+ /*
+ * SKL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ DIV_ROUND_UP(info->eu_total,
+ info->subslice_total) : 0;
+ /*
+ * SKL supports slice power gating on devices with more than
+ * one slice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ info->has_slice_pg = (info->slice_total > 1) ? 1 : 0;
+ info->has_subslice_pg = 0;
+ info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0;
}
+ DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
+ DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
+ DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
+ DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
+ DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+ DRM_DEBUG_DRIVER("has slice power gating: %s\n",
+ info->has_slice_pg ? "y" : "n");
+ DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
+ info->has_subslice_pg ? "y" : "n");
+ DRM_DEBUG_DRIVER("has EU power gating: %s\n",
+ info->has_eu_pg ? "y" : "n");
}
/**
@@ -637,17 +763,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
info = (struct intel_device_info *) flags;
- /* Refuse to load on gen6+ without kms enabled. */
- if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
- DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
- DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
- return -ENODEV;
- }
-
- /* UMS needs agp support. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
- return -EINVAL;
-
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
@@ -717,20 +832,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_regs;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over. */
- ret = i915_kick_out_firmware_fb(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
- goto out_gtt;
- }
+ /* WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over. */
+ ret = i915_kick_out_firmware_fb(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ goto out_gtt;
+ }
- ret = i915_kick_out_vgacon(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting VGA console\n");
- goto out_gtt;
- }
+ ret = i915_kick_out_vgacon(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting VGA console\n");
+ goto out_gtt;
}
pci_set_master(dev->pdev);
@@ -834,14 +947,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_power_domains_init(dev_priv);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev);
- if (ret < 0) {
- DRM_ERROR("failed to init modeset\n");
- goto out_power_well;
- }
+ ret = i915_load_modeset_init(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to init modeset\n");
+ goto out_power_well;
}
+ /*
+ * Notify a valid surface after modesetting,
+ * when running inside a VM.
+ */
+ if (intel_vgpu_active(dev))
+ I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+
i915_setup_sysfs(dev);
if (INTEL_INFO(dev)->num_pipes) {
@@ -921,28 +1039,25 @@ int i915_driver_unload(struct drm_device *dev)
acpi_video_unregister();
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_fbdev_fini(dev);
+ intel_fbdev_fini(dev);
drm_vblank_cleanup(dev);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_modeset_cleanup(dev);
-
- /*
- * free the memory space allocated for the child device
- * config parsed from VBT
- */
- if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
- }
+ intel_modeset_cleanup(dev);
- vga_switcheroo_unregister_client(dev->pdev);
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ /*
+ * free the memory space allocated for the child device
+ * config parsed from VBT
+ */
+ if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+ kfree(dev_priv->vbt.child_dev);
+ dev_priv->vbt.child_dev = NULL;
+ dev_priv->vbt.child_dev_num = 0;
}
+ vga_switcheroo_unregister_client(dev->pdev);
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
@@ -952,17 +1067,15 @@ int i915_driver_unload(struct drm_device *dev)
intel_opregion_fini(dev);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Flush any outstanding unpin_work. */
- flush_workqueue(dev_priv->wq);
+ /* Flush any outstanding unpin_work. */
+ flush_workqueue(dev_priv->wq);
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_ringbuffer(dev);
- i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
- i915_gem_cleanup_stolen(dev);
- }
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
+ i915_gem_context_fini(dev);
+ mutex_unlock(&dev->struct_mutex);
+ i915_gem_cleanup_stolen(dev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
@@ -1023,8 +1136,7 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_modeset_preclose(dev, file);
+ intel_modeset_preclose(dev, file);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -1087,7 +1199,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5c66b568bb81..c24c3f1ff8a3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -346,7 +346,6 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
};
static const struct intel_device_info intel_cherryview_info = {
- .is_preliminary = 1,
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -369,6 +368,19 @@ static const struct intel_device_info intel_skylake_info = {
IVB_CURSOR_OFFSETS,
};
+static const struct intel_device_info intel_skylake_gt3_info = {
+ .is_preliminary = 1,
+ .is_skylake = 1,
+ .gen = 9, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -406,7 +418,9 @@ static const struct intel_device_info intel_skylake_info = {
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
INTEL_CHV_IDS(&intel_cherryview_info), \
- INTEL_SKL_IDS(&intel_skylake_info)
+ INTEL_SKL_GT1_IDS(&intel_skylake_info), \
+ INTEL_SKL_GT2_IDS(&intel_skylake_info), \
+ INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info) \
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -553,6 +567,7 @@ static int i915_drm_suspend(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
pci_power_t opregion_target_state;
+ int error;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
@@ -567,37 +582,32 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(dev->pdev);
- /* If KMS is active, we do the leavevt stuff here */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- int error;
-
- error = i915_gem_suspend(dev);
- if (error) {
- dev_err(&dev->pdev->dev,
- "GEM idle failed, resume might fail\n");
- return error;
- }
+ error = i915_gem_suspend(dev);
+ if (error) {
+ dev_err(&dev->pdev->dev,
+ "GEM idle failed, resume might fail\n");
+ return error;
+ }
- intel_suspend_gt_powersave(dev);
+ intel_suspend_gt_powersave(dev);
- /*
- * Disable CRTCs directly since we want to preserve sw state
- * for _thaw. Also, power gate the CRTC power wells.
- */
- drm_modeset_lock_all(dev);
- for_each_crtc(dev, crtc)
- intel_crtc_control(crtc, false);
- drm_modeset_unlock_all(dev);
+ /*
+ * Disable CRTCs directly since we want to preserve sw state
+ * for _thaw. Also, power gate the CRTC power wells.
+ */
+ drm_modeset_lock_all(dev);
+ for_each_crtc(dev, crtc)
+ intel_crtc_control(crtc, false);
+ drm_modeset_unlock_all(dev);
- intel_dp_mst_suspend(dev);
+ intel_dp_mst_suspend(dev);
- intel_runtime_pm_disable_interrupts(dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_hpd_cancel_work(dev_priv);
- intel_suspend_encoders(dev_priv);
+ intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev);
- }
+ intel_suspend_hw(dev);
i915_gem_suspend_gtt_mappings(dev);
@@ -679,53 +689,48 @@ static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
i915_restore_state(dev);
intel_opregion_setup(dev);
- /* KMS EnterVT equivalent */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_init_pch_refclk(dev);
- drm_mode_config_reset(dev);
+ intel_init_pch_refclk(dev);
+ drm_mode_config_reset(dev);
- mutex_lock(&dev->struct_mutex);
- if (i915_gem_init_hw(dev)) {
- DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
- }
- mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
+ if (i915_gem_init_hw(dev)) {
+ DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ }
+ mutex_unlock(&dev->struct_mutex);
- /* We need working interrupts for modeset enabling ... */
- intel_runtime_pm_enable_interrupts(dev_priv);
+ /* We need working interrupts for modeset enabling ... */
+ intel_runtime_pm_enable_interrupts(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
- drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, true);
- drm_modeset_unlock_all(dev);
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, true);
+ drm_modeset_unlock_all(dev);
- intel_dp_mst_resume(dev);
+ intel_dp_mst_resume(dev);
- /*
- * ... but also need to make sure that hotplug processing
- * doesn't cause havoc. Like in the driver load code we don't
- * bother with the tiny race here where we might loose hotplug
- * notifications.
- * */
- intel_hpd_init(dev_priv);
- /* Config may have changed between suspend and resume */
- drm_helper_hpd_irq_event(dev);
- }
+ /*
+ * ... but also need to make sure that hotplug processing
+ * doesn't cause havoc. Like in the driver load code we don't
+ * bother with the tiny race here where we might loose hotplug
+ * notifications.
+ * */
+ intel_hpd_init(dev_priv);
+ /* Config may have changed between suspend and resume */
+ drm_helper_hpd_irq_event(dev);
intel_opregion_init(dev);
@@ -861,38 +866,29 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
- dev_priv->gpu_error.reload_in_reset = true;
- ret = i915_gem_init_hw(dev);
+ /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
+ dev_priv->gpu_error.reload_in_reset = true;
- dev_priv->gpu_error.reload_in_reset = false;
+ ret = i915_gem_init_hw(dev);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- DRM_ERROR("Failed hw init on reset %d\n", ret);
- return ret;
- }
+ dev_priv->gpu_error.reload_in_reset = false;
- /*
- * FIXME: This races pretty badly against concurrent holders of
- * ring interrupts. This is possible since we've started to drop
- * dev->struct_mutex in select places when waiting for the gpu.
- */
-
- /*
- * rps/rc6 re-init is necessary to restore state lost after the
- * reset and the re-install of gt irqs. Skip for ironlake per
- * previous concerns that it doesn't respond well to some forms
- * of re-init after reset.
- */
- if (INTEL_INFO(dev)->gen > 5)
- intel_enable_gt_powersave(dev);
- } else {
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ DRM_ERROR("Failed hw init on reset %d\n", ret);
+ return ret;
}
+ /*
+ * rps/rc6 re-init is necessary to restore state lost after the
+ * reset and the re-install of gt irqs. Skip for ironlake per
+ * previous concerns that it doesn't respond well to some forms
+ * of re-init after reset.
+ */
+ if (INTEL_INFO(dev)->gen > 5)
+ intel_enable_gt_powersave(dev);
+
return 0;
}
@@ -1640,11 +1636,9 @@ static int __init i915_init(void)
if (!(driver.driver_features & DRIVER_MODESET)) {
driver.get_vblank_timestamp = NULL;
-#ifndef CONFIG_DRM_I915_UMS
/* Silently fail loading to not upset userspace. */
DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
return 0;
-#endif
}
/*
@@ -1660,10 +1654,8 @@ static int __init i915_init(void)
static void __exit i915_exit(void)
{
-#ifndef CONFIG_DRM_I915_UMS
if (!(driver.driver_features & DRIVER_MODESET))
return; /* Never loaded a driver. */
-#endif
drm_pci_exit(&driver, &i915_pci_driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b4faa2df9d3d..8ae6f7f06b3a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,6 +31,7 @@
#define _I915_DRV_H_
#include <uapi/drm/i915_drm.h>
+#include <uapi/drm/drm_fourcc.h>
#include "i915_reg.h"
#include "intel_bios.h"
@@ -55,7 +56,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150130"
+#define DRIVER_DATE "20150327"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -69,6 +70,9 @@
#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
#endif
+#undef WARN_ON_ONCE
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")")
+
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
@@ -222,9 +226,14 @@ enum hpd_pin {
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-#define for_each_plane(pipe, p) \
- for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
-#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
+#define for_each_plane(__dev_priv, __pipe, __p) \
+ for ((__p) = 0; \
+ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p)++)
+#define for_each_sprite(__dev_priv, __p, __s) \
+ for ((__s) = 0; \
+ (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s)++)
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -237,6 +246,12 @@ enum hpd_pin {
&(dev)->mode_config.encoder_list, \
base.head)
+#define for_each_intel_connector(dev, intel_connector) \
+ list_for_each_entry(intel_connector, \
+ &dev->mode_config.connector_list, \
+ base.head)
+
+
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
@@ -412,6 +427,8 @@ struct drm_i915_error_state {
u32 forcewake;
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
+ u32 fault_data0; /* gen8, gen9 */
+ u32 fault_data1; /* gen8, gen9 */
u32 done_reg;
u32 gac_eco;
u32 gam_ecochk;
@@ -529,7 +546,7 @@ struct drm_i915_display_funcs {
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
- struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
@@ -538,7 +555,7 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
- void (*modeset_global_resources)(struct drm_device *dev);
+ void (*modeset_global_resources)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -692,7 +709,18 @@ struct intel_device_info {
int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
- unsigned int eu_total;
+
+ /* Slice/subslice/EU info */
+ u8 slice_total;
+ u8 subslice_total;
+ u8 subslice_per_slice;
+ u8 eu_total;
+ u8 eu_per_subslice;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
};
#undef DEFINE_FLAG
@@ -771,11 +799,20 @@ struct intel_context {
struct list_head link;
};
+enum fb_op_origin {
+ ORIGIN_GTT,
+ ORIGIN_CPU,
+ ORIGIN_CS,
+ ORIGIN_FLIP,
+};
+
struct i915_fbc {
- unsigned long size;
+ unsigned long uncompressed_size;
unsigned threshold;
unsigned int fb_id;
- enum plane plane;
+ unsigned int possible_framebuffer_bits;
+ unsigned int busy_bits;
+ struct intel_crtc *crtc;
int y;
struct drm_mm_node compressed_fb;
@@ -787,14 +824,6 @@ struct i915_fbc {
* possible. */
bool enabled;
- /* On gen8 some rings cannont perform fbc clean operation so for now
- * we are doing this on SW with mmio.
- * This variable works in the opposite information direction
- * of ring->fbc_dirty telling software on frontbuffer tracking
- * to perform the cache clean on sw side.
- */
- bool need_sw_cache_clean;
-
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
@@ -888,150 +917,21 @@ struct intel_gmbus {
};
struct i915_suspend_saved_registers {
- u8 saveLBB;
- u32 saveDSPACNTR;
- u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 savePIPEACONF;
- u32 savePIPEBCONF;
- u32 savePIPEASRC;
- u32 savePIPEBSRC;
- u32 saveFPA0;
- u32 saveFPA1;
- u32 saveDPLL_A;
- u32 saveDPLL_A_MD;
- u32 saveHTOTAL_A;
- u32 saveHBLANK_A;
- u32 saveHSYNC_A;
- u32 saveVTOTAL_A;
- u32 saveVBLANK_A;
- u32 saveVSYNC_A;
- u32 saveBCLRPAT_A;
- u32 saveTRANSACONF;
- u32 saveTRANS_HTOTAL_A;
- u32 saveTRANS_HBLANK_A;
- u32 saveTRANS_HSYNC_A;
- u32 saveTRANS_VTOTAL_A;
- u32 saveTRANS_VBLANK_A;
- u32 saveTRANS_VSYNC_A;
- u32 savePIPEASTAT;
- u32 saveDSPASTRIDE;
- u32 saveDSPASIZE;
- u32 saveDSPAPOS;
- u32 saveDSPAADDR;
- u32 saveDSPASURF;
- u32 saveDSPATILEOFF;
- u32 savePFIT_PGM_RATIOS;
- u32 saveBLC_HIST_CTL;
- u32 saveBLC_PWM_CTL;
- u32 saveBLC_PWM_CTL2;
- u32 saveBLC_CPU_PWM_CTL;
- u32 saveBLC_CPU_PWM_CTL2;
- u32 saveFPB0;
- u32 saveFPB1;
- u32 saveDPLL_B;
- u32 saveDPLL_B_MD;
- u32 saveHTOTAL_B;
- u32 saveHBLANK_B;
- u32 saveHSYNC_B;
- u32 saveVTOTAL_B;
- u32 saveVBLANK_B;
- u32 saveVSYNC_B;
- u32 saveBCLRPAT_B;
- u32 saveTRANSBCONF;
- u32 saveTRANS_HTOTAL_B;
- u32 saveTRANS_HBLANK_B;
- u32 saveTRANS_HSYNC_B;
- u32 saveTRANS_VTOTAL_B;
- u32 saveTRANS_VBLANK_B;
- u32 saveTRANS_VSYNC_B;
- u32 savePIPEBSTAT;
- u32 saveDSPBSTRIDE;
- u32 saveDSPBSIZE;
- u32 saveDSPBPOS;
- u32 saveDSPBADDR;
- u32 saveDSPBSURF;
- u32 saveDSPBTILEOFF;
- u32 saveVGA0;
- u32 saveVGA1;
- u32 saveVGA_PD;
- u32 saveVGACNTRL;
- u32 saveADPA;
u32 saveLVDS;
u32 savePP_ON_DELAYS;
u32 savePP_OFF_DELAYS;
- u32 saveDVOA;
- u32 saveDVOB;
- u32 saveDVOC;
u32 savePP_ON;
u32 savePP_OFF;
u32 savePP_CONTROL;
u32 savePP_DIVISOR;
- u32 savePFIT_CONTROL;
- u32 save_palette_a[256];
- u32 save_palette_b[256];
u32 saveFBC_CONTROL;
- u32 saveIER;
- u32 saveIIR;
- u32 saveIMR;
- u32 saveDEIER;
- u32 saveDEIMR;
- u32 saveGTIER;
- u32 saveGTIMR;
- u32 saveFDI_RXA_IMR;
- u32 saveFDI_RXB_IMR;
u32 saveCACHE_MODE_0;
u32 saveMI_ARB_STATE;
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF2[3];
- u8 saveMSR;
- u8 saveSR[8];
- u8 saveGR[25];
- u8 saveAR_INDEX;
- u8 saveAR[21];
- u8 saveDACMASK;
- u8 saveCR[37];
uint64_t saveFENCE[I915_MAX_NUM_FENCES];
- u32 saveCURACNTR;
- u32 saveCURAPOS;
- u32 saveCURABASE;
- u32 saveCURBCNTR;
- u32 saveCURBPOS;
- u32 saveCURBBASE;
- u32 saveCURSIZE;
- u32 saveDP_B;
- u32 saveDP_C;
- u32 saveDP_D;
- u32 savePIPEA_GMCH_DATA_M;
- u32 savePIPEB_GMCH_DATA_M;
- u32 savePIPEA_GMCH_DATA_N;
- u32 savePIPEB_GMCH_DATA_N;
- u32 savePIPEA_DP_LINK_M;
- u32 savePIPEB_DP_LINK_M;
- u32 savePIPEA_DP_LINK_N;
- u32 savePIPEB_DP_LINK_N;
- u32 saveFDI_RXA_CTL;
- u32 saveFDI_TXA_CTL;
- u32 saveFDI_RXB_CTL;
- u32 saveFDI_TXB_CTL;
- u32 savePFA_CTL_1;
- u32 savePFB_CTL_1;
- u32 savePFA_WIN_SZ;
- u32 savePFB_WIN_SZ;
- u32 savePFA_WIN_POS;
- u32 savePFB_WIN_POS;
- u32 savePCH_DREF_CONTROL;
- u32 saveDISP_ARB_CTL;
- u32 savePIPEA_DATA_M1;
- u32 savePIPEA_DATA_N1;
- u32 savePIPEA_LINK_M1;
- u32 savePIPEA_LINK_N1;
- u32 savePIPEB_DATA_M1;
- u32 savePIPEB_DATA_N1;
- u32 savePIPEB_LINK_M1;
- u32 savePIPEB_LINK_N1;
- u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
@@ -1128,13 +1028,12 @@ struct intel_gen6_power_mgmt {
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 idle_freq; /* Frequency to request when we are idle */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
u32 cz_freq;
- u32 ei_interrupt_count;
-
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -1171,9 +1070,6 @@ struct intel_ilk_power_mgmt {
int c_m;
int r_t;
-
- struct drm_i915_gem_object *pwrctx;
- struct drm_i915_gem_object *renderctx;
};
struct drm_i915_private;
@@ -1455,6 +1351,7 @@ struct intel_vbt_data {
bool edp_initialized;
bool edp_support;
int edp_bpp;
+ bool edp_low_vswing;
struct edp_power_seq edp_pps;
struct {
@@ -1515,6 +1412,25 @@ struct ilk_wm_values {
enum intel_ddb_partitioning partitioning;
};
+struct vlv_wm_values {
+ struct {
+ uint16_t primary;
+ uint16_t sprite[2];
+ uint8_t cursor;
+ } pipe[3];
+
+ struct {
+ uint16_t plane;
+ uint8_t cursor;
+ } sr;
+
+ struct {
+ uint8_t cursor;
+ uint8_t sprite[2];
+ uint8_t primary;
+ } ddl[3];
+};
+
struct skl_ddb_entry {
uint16_t start, end; /* in number of blocks, 'end' is exclusive */
};
@@ -1641,6 +1557,10 @@ struct i915_workarounds {
u32 count;
};
+struct i915_virtual_gpu {
+ bool active;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1653,6 +1573,8 @@ struct drm_i915_private {
struct intel_uncore uncore;
+ struct i915_virtual_gpu vgpu;
+
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
@@ -1871,6 +1793,7 @@ struct drm_i915_private {
union {
struct ilk_wm_values hw;
struct skl_wm_values skl_hw;
+ struct vlv_wm_values vlv;
};
} wm;
@@ -2142,7 +2065,7 @@ struct drm_i915_gem_request {
u32 tail;
/**
- * Context related to this request
+ * Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
* context, we must increment the context's refcount, to guarantee that
* it persists while any request is linked to it. Requests themselves
@@ -2152,6 +2075,7 @@ struct drm_i915_gem_request {
* context.
*/
struct intel_context *ctx;
+ struct intel_ringbuffer *ringbuf;
/** Batch buffer related to this request if any */
struct drm_i915_gem_object *batch_obj;
@@ -2166,6 +2090,9 @@ struct drm_i915_gem_request {
/** file_priv list entry for this request */
struct list_head client_list;
+ /** process identifier submitting this request */
+ struct pid *pid;
+
uint32_t uniq;
/**
@@ -2352,6 +2279,7 @@ struct drm_i915_cmd_table {
})
#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
+#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
@@ -2374,9 +2302,6 @@ struct drm_i915_cmd_table {
#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
INTEL_DEVID(dev) == 0x0152 || \
INTEL_DEVID(dev) == 0x015a)
-#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
- INTEL_DEVID(dev) == 0x0106 || \
- INTEL_DEVID(dev) == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
@@ -2400,6 +2325,12 @@ struct drm_i915_cmd_table {
INTEL_DEVID(dev) == 0x0A1E)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
+#define SKL_REVID_A0 (0x0)
+#define SKL_REVID_B0 (0x1)
+#define SKL_REVID_C0 (0x2)
+#define SKL_REVID_D0 (0x3)
+#define SKL_REVID_E0 (0x4)
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@@ -2499,6 +2430,7 @@ struct drm_i915_cmd_table {
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
#define GT_FREQUENCY_MULTIPLIER 50
+#define GEN9_FREQ_SCALER 3
#include "i915_trace.h"
@@ -2507,14 +2439,11 @@ extern int i915_max_ioctl;
extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
extern int i915_resume_legacy(struct drm_device *dev);
-extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
-extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_params.c */
struct i915_params {
int modeset;
int panel_ignore_lid;
- unsigned int powersave;
int semaphores;
unsigned int lvds_downclock;
int lvds_channel_mode;
@@ -2534,11 +2463,12 @@ struct i915_params {
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
+ bool load_detect_test;
bool reset;
bool disable_display;
bool disable_vtd_wa;
int use_mmio_flip;
- bool mmio_debug;
+ int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
};
@@ -2591,6 +2521,10 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
+static inline bool intel_vgpu_active(struct drm_device *dev)
+{
+ return to_i915(dev)->vgpu.active;
+}
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2669,12 +2603,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
-unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
- long target,
- unsigned flags);
-#define I915_SHRINK_PURGEABLE 0x1
-#define I915_SHRINK_UNBOUND 0x2
-#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2691,20 +2619,16 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
-int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view);
-static inline
-int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags)
-{
- return i915_gem_object_pin_view(obj, vm, alignment, flags,
- &i915_ggtt_view_normal);
-}
+int __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags);
+int __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ uint32_t alignment,
+ uint64_t flags);
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
@@ -2844,8 +2768,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_engine_cs *pipelined);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
+ struct intel_engine_cs *pipelined,
+ const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
@@ -2868,60 +2794,46 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view);
-static inline
-unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view);
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
- return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL);
+ return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
}
+
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view);
-static inline
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
-}
+ struct i915_address_space *vm);
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-static inline
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
-}
-
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
-static inline
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_lookup_or_create_vma_view(obj, vm,
- &i915_ggtt_view_normal);
-}
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
-static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
- struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->pin_count > 0)
- return true;
- return false;
+static inline struct i915_vma *
+i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
}
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
@@ -2944,13 +2856,7 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
-}
-
-static inline unsigned long
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
-{
- return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
+ return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
}
static inline unsigned long
@@ -2974,7 +2880,13 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
}
-void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
+void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
+static inline void
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+}
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -3046,6 +2958,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size);
+/* i915_gem_shrinker.c */
+unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target,
+ unsigned flags);
+#define I915_SHRINK_PURGEABLE 0x1
+#define I915_SHRINK_UNBOUND 0x2
+#define I915_SHRINK_BOUND 0x4
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
+
+
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
@@ -3121,10 +3044,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
-/* i915_ums.c */
-void i915_save_display_reg(struct drm_device *dev);
-void i915_restore_display_reg(struct drm_device *dev);
-
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
void i915_teardown_sysfs(struct drm_device *dev_priv);
@@ -3196,8 +3115,7 @@ extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
-extern void gen6_set_rps(struct drm_device *dev, u8 val);
-extern void valleyview_set_rps(struct drm_device *dev, u8 val);
+extern void intel_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern void intel_detect_pch(struct drm_device *dev);
@@ -3210,8 +3128,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-void intel_notify_mmio_flip(struct intel_engine_cs *ring);
-
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 27ea6bdebce7..d07c0b1fb498 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1,5 +1,5 @@
/*
- * Copyright © 2008 Intel Corporation
+ * Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,9 +29,9 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include <linux/oom.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -52,15 +52,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
- struct shrink_control *sc);
-static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
- struct shrink_control *sc);
-static int i915_gem_shrinker_oom(struct notifier_block *nb,
- unsigned long event,
- void *ptr);
-static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
@@ -350,7 +341,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
- int ret;
+ int ret = 0;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
@@ -359,6 +350,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -369,13 +361,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
unwritten = copy_from_user(vaddr, user_data, args->size);
mutex_lock(&dev->struct_mutex);
- if (unwritten)
- return -EFAULT;
+ if (unwritten) {
+ ret = -EFAULT;
+ goto out;
+ }
}
drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(dev);
- return 0;
+
+out:
+ intel_fb_obj_flush(obj, false);
+ return ret;
}
void *i915_gem_object_alloc(struct drm_device *dev)
@@ -809,6 +806,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+
while (remain > 0) {
/* Operation in this page
*
@@ -829,7 +828,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
- goto out_unpin;
+ goto out_flush;
}
remain -= page_length;
@@ -837,6 +836,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
offset += page_length;
}
+out_flush:
+ intel_fb_obj_flush(obj, false);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out:
@@ -951,6 +952,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
if (ret)
return ret;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+
i915_gem_object_pin_pages(obj);
offset = args->offset;
@@ -1029,6 +1032,7 @@ out:
if (needs_clflush_after)
i915_gem_chipset_flush(dev);
+ intel_fb_obj_flush(obj, false);
return ret;
}
@@ -1922,12 +1926,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
- return obj->madv == I915_MADV_DONTNEED;
-}
-
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
@@ -2033,85 +2031,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0;
}
-unsigned long
-i915_gem_shrink(struct drm_i915_private *dev_priv,
- long target, unsigned flags)
-{
- const struct {
- struct list_head *list;
- unsigned int bit;
- } phases[] = {
- { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
- { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
- { NULL, 0 },
- }, *phase;
- unsigned long count = 0;
-
- /*
- * As we may completely rewrite the (un)bound list whilst unbinding
- * (due to retiring requests) we have to strictly process only
- * one element of the list at the time, and recheck the list
- * on every iteration.
- *
- * In particular, we must hold a reference whilst removing the
- * object as we may end up waiting for and/or retiring the objects.
- * This might release the final reference (held by the active list)
- * and result in the object being freed from under us. This is
- * similar to the precautions the eviction code must take whilst
- * removing objects.
- *
- * Also note that although these lists do not hold a reference to
- * the object we can safely grab one here: The final object
- * unreferencing and the bound_list are both protected by the
- * dev->struct_mutex and so we won't ever be able to observe an
- * object on the bound_list with a reference count equals 0.
- */
- for (phase = phases; phase->list; phase++) {
- struct list_head still_in_list;
-
- if ((flags & phase->bit) == 0)
- continue;
-
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(phase->list)) {
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma, *v;
-
- obj = list_first_entry(phase->list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
-
- if (flags & I915_SHRINK_PURGEABLE &&
- !i915_gem_object_is_purgeable(obj))
- continue;
-
- drm_gem_object_reference(&obj->base);
-
- /* For the unbound phase, this should be a no-op! */
- list_for_each_entry_safe(vma, v,
- &obj->vma_list, vma_link)
- if (i915_vma_unbind(vma))
- break;
-
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- drm_gem_object_unreference(&obj->base);
- }
- list_splice(&still_in_list, phase->list);
- }
-
- return count;
-}
-
-static unsigned long
-i915_gem_shrink_all(struct drm_i915_private *dev_priv)
-{
- i915_gem_evict_everything(dev_priv->dev);
- return i915_gem_shrink(dev_priv, LONG_MAX,
- I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
-}
-
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
@@ -2492,6 +2411,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
list_add_tail(&request->client_list,
&file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
+
+ request->pid = get_pid(task_pid(current));
}
trace_i915_gem_request_add(request);
@@ -2572,6 +2493,8 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
list_del(&request->list);
i915_gem_request_remove_from_client(request);
+ put_pid(request->pid);
+
i915_gem_request_unreference(request);
}
@@ -2744,7 +2667,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
*/
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
- struct intel_ringbuffer *ringbuf;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
@@ -2755,23 +2677,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
trace_i915_gem_request_retire(request);
- /* This is one of the few common intersection points
- * between legacy ringbuffer submission and execlists:
- * we need to tell them apart in order to find the correct
- * ringbuffer to which the request belongs to.
- */
- if (i915.enable_execlists) {
- struct intel_context *ctx = request->ctx;
- ringbuf = ctx->engine[ring->id].ringbuf;
- } else
- ringbuf = ring->buffer;
-
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
*/
- ringbuf->last_retired_head = request->postfix;
+ request->ringbuf->last_retired_head = request->postfix;
i915_gem_free_request(request);
}
@@ -3516,9 +3427,9 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view,
unsigned alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view)
+ uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3530,6 +3441,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
+ if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return ERR_PTR(-EINVAL);
+
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
@@ -3568,7 +3482,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
+ vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
+ i915_gem_obj_lookup_or_create_vma(obj, vm);
+
if (IS_ERR(vma))
goto err_unpin;
@@ -3598,6 +3514,17 @@ search_free:
if (ret)
goto err_remove_node;
+ /* allocate before insert / bind */
+ if (vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
+ VM_TO_TRACE_NAME(vma->vm));
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ goto err_remove_node;
+ }
+
trace_i915_vma_bind(vma, flags);
ret = i915_vma_bind(vma, obj->cache_level,
flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
@@ -3768,7 +3695,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -3950,7 +3877,8 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_engine_cs *pipelined)
+ struct intel_engine_cs *pipelined,
+ const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
bool was_pin_display;
@@ -3986,7 +3914,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
+ ret = i915_gem_object_ggtt_pin(obj, view, alignment,
+ view->type == I915_GGTT_VIEW_NORMAL ?
+ PIN_MAPPABLE : 0);
if (ret)
goto err_unpin_display;
@@ -4014,9 +3944,11 @@ err_unpin_display:
}
void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- i915_gem_object_ggtt_unpin(obj);
+ i915_gem_object_ggtt_unpin_view(obj, view);
+
obj->pin_display = is_pin_display(obj);
}
@@ -4083,7 +4015,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -4165,12 +4097,12 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
return false;
}
-int
-i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view)
+static int
+i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view,
+ uint32_t alignment,
+ uint64_t flags)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
@@ -4186,17 +4118,29 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
return -EINVAL;
- vma = i915_gem_obj_to_vma_view(obj, vm, view);
+ if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return -EINVAL;
+
+ vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
+ i915_gem_obj_to_vma(obj, vm);
+
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
if (i915_vma_misplaced(vma, alignment, flags)) {
+ unsigned long offset;
+ offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
+ i915_gem_obj_offset(obj, vm);
WARN(vma->pin_count,
- "bo is already pinned with incorrect alignment:"
+ "bo is already pinned in %s with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_offset_view(obj, vm, view->type),
+ ggtt_view ? "ggtt" : "ppgtt",
+ offset,
alignment,
!!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
@@ -4210,8 +4154,12 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
- flags, view);
+ /* In true PPGTT, bind has possibly changed PDEs, which
+ * means we must do a context switch before the GPU can
+ * accurately read some of the VMAs.
+ */
+ vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
+ flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
@@ -4237,7 +4185,7 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
- mappable = (vma->node.start + obj->base.size <=
+ mappable = (vma->node.start + fence_size <=
dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
@@ -4252,16 +4200,41 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
return 0;
}
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags)
+{
+ return i915_gem_object_do_pin(obj, vm,
+ i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
+ alignment, flags);
+}
+
+int
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ uint32_t alignment,
+ uint64_t flags)
+{
+ if (WARN_ONCE(!view, "no view specified"))
+ return -EINVAL;
+
+ return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+ alignment, flags | PIN_GLOBAL);
+}
+
void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+ struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
BUG_ON(!vma);
- BUG_ON(vma->pin_count == 0);
- BUG_ON(!i915_gem_obj_ggtt_bound(obj));
+ WARN_ON(vma->pin_count == 0);
+ WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
- if (--vma->pin_count == 0)
+ if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
obj->pin_mappable = false;
}
@@ -4382,7 +4355,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
+ if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED;
@@ -4557,15 +4530,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
intel_runtime_pm_put(dev_priv);
}
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->ggtt_view.type == view->type)
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm)
return vma;
+ }
+ return NULL;
+}
+
+struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
+{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+ struct i915_vma *vma;
+
+ if (WARN_ONCE(!view, "no view specified"))
+ return ERR_PTR(-EINVAL);
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
+ return vma;
return NULL;
}
@@ -4612,10 +4603,6 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_retire_requests(dev);
- /* Under UMS, be paranoid and evict. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_gem_evict_everything(dev);
-
i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex);
@@ -4986,18 +4973,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
- I915_WRITE(MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
- }
-
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
- /* Old X drivers will take 0-2 for front, back, depth buffers */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- dev_priv->fence_reg_start = 3;
-
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
dev_priv->num_fence_regs = 32;
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
@@ -5005,6 +4982,10 @@ i915_gem_load(struct drm_device *dev)
else
dev_priv->num_fence_regs = 8;
+ if (intel_vgpu_active(dev))
+ dev_priv->num_fence_regs =
+ I915_READ(vgtif_reg(avail_rs.fence_num));
+
/* Initialize fence registers to zero */
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
i915_gem_restore_fences(dev);
@@ -5014,13 +4995,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
- dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
- dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
- register_shrinker(&dev_priv->mm.shrinker);
-
- dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- register_oom_notifier(&dev_priv->mm.oom_notifier);
+ i915_gem_shrinker_init(dev_priv);
i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
@@ -5112,106 +5087,70 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
}
}
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
-{
- if (!mutex_is_locked(mutex))
- return false;
-
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
-}
-
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+/* All the new VM stuff */
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
- if (to_i915(dev)->mm.shrinker_no_lock_stealing)
- return false;
+ WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
- *unlock = false;
- } else
- *unlock = true;
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm)
+ return vma->node.start;
+ }
- return true;
+ WARN(1, "%s vma for this object not found.\n",
+ i915_is_ggtt(vm) ? "global" : "ppgtt");
+ return -1;
}
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- int count = 0;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (drm_mm_node_allocated(&vma->node))
- count++;
-
- return count;
-}
-
-static unsigned long
-i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
-{
- struct drm_i915_private *dev_priv =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long count;
- bool unlock;
-
- if (!i915_gem_shrinker_lock(dev, &unlock))
- return 0;
-
- count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
- if (obj->pages_pin_count == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!i915_gem_obj_is_pinned(obj) &&
- obj->pages_pin_count == num_vma_bound(obj))
- count += obj->base.size >> PAGE_SHIFT;
- }
-
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
+ return vma->node.start;
- return count;
+ WARN(1, "global vma for this object not found.\n");
+ return -1;
}
-/* All the new VM stuff */
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view)
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (vma->vm == vm && vma->ggtt_view.type == view)
- return vma->node.start;
-
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ return true;
}
- WARN(1, "%s vma for this object not found.\n",
- i915_is_ggtt(vm) ? "global" : "ppgtt");
- return -1;
+
+ return false;
}
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view)
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, vma_link)
- if (vma->vm == vm &&
- vma->ggtt_view.type == view &&
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
@@ -5239,118 +5178,26 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
BUG_ON(list_empty(&o->vma_list));
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
if (vma->vm == vm)
return vma->node.size;
-
+ }
return 0;
}
-static unsigned long
-i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
-{
- struct drm_i915_private *dev_priv =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
- unsigned long freed;
- bool unlock;
-
- if (!i915_gem_shrinker_lock(dev, &unlock))
- return SHRINK_STOP;
-
- freed = i915_gem_shrink(dev_priv,
- sc->nr_to_scan,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE);
- if (freed < sc->nr_to_scan)
- freed += i915_gem_shrink(dev_priv,
- sc->nr_to_scan - freed,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
- return freed;
-}
-
-static int
-i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, mm.oom_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long timeout = msecs_to_jiffies(5000) + 1;
- unsigned long pinned, bound, unbound, freed_pages;
- bool was_interruptible;
- bool unlock;
-
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
- schedule_timeout_killable(1);
- if (fatal_signal_pending(current))
- return NOTIFY_DONE;
- }
- if (timeout == 0) {
- pr_err("Unable to purge GPU memory due lock contention.\n");
- return NOTIFY_DONE;
- }
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- freed_pages = i915_gem_shrink_all(dev_priv);
-
- dev_priv->mm.interruptible = was_interruptible;
-
- /* Because we may be allocating inside our own driver, we cannot
- * assert that there are no objects with pinned pages that are not
- * being pointed to by hardware.
- */
- unbound = bound = pinned = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
- if (!obj->base.filp) /* not backed by a freeable object */
- continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
- else
- unbound += obj->base.size;
- }
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->base.filp)
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
- else
- bound += obj->base.size;
+ if (vma->pin_count > 0)
+ return true;
}
-
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
- if (freed_pages || unbound || bound)
- pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
- freed_pages << PAGE_SHIFT, pinned);
- if (unbound || bound)
- pr_err("%lu and %lu bytes still available in the "
- "bound and unbound GPU page lists.\n",
- bound, unbound);
-
- *(unsigned long *)ptr += freed_pages;
- return NOTIFY_DONE;
+ return false;
}
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == ggtt &&
- vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- return vma;
-
- return NULL;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8603bf48d3ee..f3e84c44d009 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -296,11 +296,15 @@ void i915_gem_context_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- /* In execlists mode we will unreference the context when the execlist
- * queue is cleared and the requests destroyed.
- */
- if (i915.enable_execlists)
+ if (i915.enable_execlists) {
+ struct intel_context *ctx;
+
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ intel_lr_context_reset(dev, ctx);
+ }
+
return;
+ }
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
@@ -565,6 +569,66 @@ mi_set_context(struct intel_engine_cs *ring,
return ret;
}
+static inline bool should_skip_switch(struct intel_engine_cs *ring,
+ struct intel_context *from,
+ struct intel_context *to)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (to->remap_slice)
+ return false;
+
+ if (to->ppgtt) {
+ if (from == to && !test_bit(ring->id,
+ &to->ppgtt->pd_dirty_rings))
+ return true;
+ } else if (dev_priv->mm.aliasing_ppgtt) {
+ if (from == to && !test_bit(ring->id,
+ &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings))
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!to->ppgtt)
+ return false;
+
+ if (INTEL_INFO(ring->dev)->gen < 8)
+ return true;
+
+ if (ring != &dev_priv->ring[RCS])
+ return true;
+
+ return false;
+}
+
+static bool
+needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
+ u32 hw_flags)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!to->ppgtt)
+ return false;
+
+ if (!IS_GEN8(ring->dev))
+ return false;
+
+ if (ring != &dev_priv->ring[RCS])
+ return false;
+
+ if (hw_flags & MI_RESTORE_INHIBIT)
+ return true;
+
+ return false;
+}
+
static int do_switch(struct intel_engine_cs *ring,
struct intel_context *to)
{
@@ -580,7 +644,7 @@ static int do_switch(struct intel_engine_cs *ring,
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
- if (from == to && !to->remap_slice)
+ if (should_skip_switch(ring, from, to))
return 0;
/* Trying to pin first makes error handling easier. */
@@ -598,11 +662,18 @@ static int do_switch(struct intel_engine_cs *ring,
*/
from = ring->last_context;
- if (to->ppgtt) {
+ if (needs_pd_load_pre(ring, to)) {
+ /* Older GENs and non render rings still want the load first,
+ * "PP_DCLV followed by PP_DIR_BASE register through Load
+ * Register Immediate commands in Ring Buffer before submitting
+ * a context."*/
trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
+
+ /* Doing a PD load always reloads the page dirs */
+ clear_bit(ring->id, &to->ppgtt->pd_dirty_rings);
}
if (ring != &dev_priv->ring[RCS]) {
@@ -633,13 +704,41 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out;
}
- if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
+ if (!to->legacy_hw_ctx.initialized) {
hw_flags |= MI_RESTORE_INHIBIT;
+ /* NB: If we inhibit the restore, the context is not allowed to
+ * die because future work may end up depending on valid address
+ * space. This means we must enforce that a page table load
+ * occur when this occurs. */
+ } else if (to->ppgtt &&
+ test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
+ hw_flags |= MI_FORCE_RESTORE;
+
+ /* We should never emit switch_mm more than once */
+ WARN_ON(needs_pd_load_pre(ring, to) &&
+ needs_pd_load_post(ring, to, hw_flags));
ret = mi_set_context(ring, to, hw_flags);
if (ret)
goto unpin_out;
+ /* GEN8 does *not* require an explicit reload if the PDPs have been
+ * setup, and we do not wish to move them.
+ */
+ if (needs_pd_load_post(ring, to, hw_flags)) {
+ trace_switch_mm(ring, to);
+ ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+ /* The hardware context switch is emitted, but we haven't
+ * actually changed the state - so it's probably safe to bail
+ * here. Still, let the user know something dangerous has
+ * happened.
+ */
+ if (ret) {
+ DRM_ERROR("Failed to change address space on context switch\n");
+ goto unpin_out;
+ }
+ }
+
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
@@ -677,7 +776,7 @@ static int do_switch(struct intel_engine_cs *ring,
i915_gem_context_unreference(from);
}
- uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
+ uninitialized = !to->legacy_hw_ctx.initialized;
to->legacy_hw_ctx.initialized = true;
done:
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 82a1f4b57778..7998da27c500 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -230,6 +230,13 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &i915_dmabuf_ops;
+ exp_info.size = gem_obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = gem_obj;
+
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
@@ -237,8 +244,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags,
- NULL);
+ return dma_buf_export(&exp_info);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e3a49d94da3a..d09e35ed9c9a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -63,6 +63,10 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
*
* This function is used by the object/vma binding code.
*
+ * Since this function is only used to free up virtual address space it only
+ * ignores pinned vmas, and not object where the backing storage itself is
+ * pinned. Hence obj->pages_pin_count does not protect against eviction.
+ *
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 38a742532c4f..a3190e793ed4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -251,7 +251,6 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (HAS_LLC(obj->base.dev) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
- !obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -337,6 +336,51 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
return 0;
}
+static void
+clflush_write32(void *addr, uint32_t value)
+{
+ /* This is not a fast path, so KISS. */
+ drm_clflush_virt_range(addr, sizeof(uint32_t));
+ *(uint32_t *)addr = value;
+ drm_clflush_virt_range(addr, sizeof(uint32_t));
+}
+
+static int
+relocate_entry_clflush(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_relocation_entry *reloc,
+ uint64_t target_offset)
+{
+ struct drm_device *dev = obj->base.dev;
+ uint32_t page_offset = offset_in_page(reloc->offset);
+ uint64_t delta = (int)reloc->delta + target_offset;
+ char *vaddr;
+ int ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
+ clflush_write32(vaddr + page_offset, lower_32_bits(delta));
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+ if (page_offset == 0) {
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+ }
+
+ clflush_write32(vaddr + page_offset, upper_32_bits(delta));
+ }
+
+ kunmap_atomic(vaddr);
+
+ return 0;
+}
+
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
@@ -426,8 +470,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (use_cpu_reloc(obj))
ret = relocate_entry_cpu(obj, reloc, target_offset);
- else
+ else if (obj->map_and_fenceable)
ret = relocate_entry_gtt(obj, reloc, target_offset);
+ else if (cpu_has_clflush)
+ ret = relocate_entry_clflush(obj, reloc, target_offset);
+ else {
+ WARN_ONCE(1, "Impossible case in relocation handling\n");
+ ret = -ENODEV;
+ }
if (ret)
return ret;
@@ -525,6 +575,12 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
return ret;
}
+static bool only_mappable_for_reloc(unsigned int flags)
+{
+ return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
+ __EXEC_OBJECT_NEEDS_MAP;
+}
+
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_engine_cs *ring,
@@ -536,14 +592,21 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
int ret;
flags = 0;
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
- flags |= PIN_GLOBAL | PIN_MAPPABLE;
- if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
- flags |= PIN_GLOBAL;
- if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
- flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+ if (!drm_mm_node_allocated(&vma->node)) {
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
+ flags |= PIN_GLOBAL | PIN_MAPPABLE;
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+ flags |= PIN_GLOBAL;
+ if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
+ flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+ }
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
+ if ((ret == -ENOSPC || ret == -E2BIG) &&
+ only_mappable_for_reloc(entry->flags))
+ ret = i915_gem_object_pin(obj, vma->vm,
+ entry->alignment,
+ flags & ~(PIN_GLOBAL | PIN_MAPPABLE));
if (ret)
return ret;
@@ -605,13 +668,14 @@ eb_vma_misplaced(struct i915_vma *vma)
vma->node.start & (entry->alignment - 1))
return true;
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
- return true;
-
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS)
return true;
+ /* avoid costly ping-pong once a batch bo ended up non-mappable */
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+ return !only_mappable_for_reloc(entry->flags);
+
return false;
}
@@ -971,7 +1035,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->dirty = 1;
i915_gem_request_assign(&obj->last_write_req, req);
- intel_fb_obj_invalidate(obj, ring);
+ intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1076,16 +1140,15 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
u32 batch_len,
- bool is_master,
- u32 *flags)
+ bool is_master)
{
struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
struct drm_i915_gem_object *shadow_batch_obj;
- bool need_reloc = false;
+ struct i915_vma *vma;
int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
- batch_obj->base.size);
+ PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
@@ -1095,40 +1158,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
batch_start_offset,
batch_len,
is_master);
- if (ret) {
- if (ret == -EACCES)
- return batch_obj;
- } else {
- struct i915_vma *vma;
+ if (ret)
+ goto err;
- memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+ ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
+ if (ret)
+ goto err;
- vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
- vma->exec_entry = shadow_exec_entry;
- vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
- drm_gem_object_reference(&shadow_batch_obj->base);
- i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
- list_add_tail(&vma->exec_list, &eb->vmas);
+ memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
- shadow_batch_obj->base.pending_read_domains =
- batch_obj->base.pending_read_domains;
+ vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+ vma->exec_entry = shadow_exec_entry;
+ vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
+ drm_gem_object_reference(&shadow_batch_obj->base);
+ list_add_tail(&vma->exec_list, &eb->vmas);
- /*
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
- * bit from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically
- * don't want that set when the command parser is
- * enabled.
- *
- * FIXME: with aliasing ppgtt, buffers that should only
- * be in ggtt still end up in the aliasing ppgtt. remove
- * this check when that is fixed.
- */
- if (USES_FULL_PPGTT(dev))
- *flags |= I915_DISPATCH_SECURE;
- }
+ shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+
+ return shadow_batch_obj;
- return ret ? ERR_PTR(ret) : shadow_batch_obj;
+err:
+ if (ret == -EACCES) /* unhandled chained batch */
+ return batch_obj;
+ else
+ return ERR_PTR(ret);
}
int
@@ -1138,7 +1191,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags)
+ u64 exec_start, u32 dispatch_flags)
{
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1198,6 +1251,13 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
if (ret)
goto error;
+ if (ctx->ppgtt)
+ WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+ "%s didn't clear reload\n", ring->name);
+ else if (dev_priv->mm.aliasing_ppgtt)
+ WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings &
+ (1<<ring->id), "%s didn't clear reload\n", ring->name);
+
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
@@ -1266,19 +1326,19 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
- flags);
+ dispatch_flags);
if (ret)
goto error;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
- flags);
+ dispatch_flags);
if (ret)
return ret;
}
- trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
+ trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1353,7 +1413,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u64 exec_start = args->batch_start_offset;
- u32 flags;
+ u32 dispatch_flags;
int ret;
bool need_relocs;
@@ -1364,15 +1424,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
- flags = 0;
+ dispatch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
if (!file->is_master || !capable(CAP_SYS_ADMIN))
return -EPERM;
- flags |= I915_DISPATCH_SECURE;
+ dispatch_flags |= I915_DISPATCH_SECURE;
}
if (args->flags & I915_EXEC_IS_PINNED)
- flags |= I915_DISPATCH_PINNED;
+ dispatch_flags |= I915_DISPATCH_PINNED;
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1494,12 +1554,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
batch_obj,
args->batch_start_offset,
args->batch_len,
- file->is_master,
- &flags);
+ file->is_master);
if (IS_ERR(batch_obj)) {
ret = PTR_ERR(batch_obj);
goto err;
}
+
+ /*
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+ * bit from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically
+ * don't want that set when the command parser is
+ * enabled.
+ *
+ * FIXME: with aliasing ppgtt, buffers that should only
+ * be in ggtt still end up in the aliasing ppgtt. remove
+ * this check when that is fixed.
+ */
+ if (USES_FULL_PPGTT(dev))
+ dispatch_flags |= I915_DISPATCH_SECURE;
+
+ exec_start = 0;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
@@ -1507,14 +1582,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
- if (flags & I915_DISPATCH_SECURE) {
+ if (dispatch_flags & I915_DISPATCH_SECURE) {
/*
* So on first glance it looks freaky that we pin the batch here
* outside of the reservation loop. But:
* - The batch is already pinned into the relevant ppgtt, so we
* already have the backing storage fully allocated.
* - No other BO uses the global gtt (well contexts, but meh),
- * so we don't really have issues with mutliple objects not
+ * so we don't really have issues with multiple objects not
* fitting due to fragmentation.
* So this is actually safe.
*/
@@ -1527,7 +1602,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
- &eb->vmas, batch_obj, exec_start, flags);
+ &eb->vmas, batch_obj, exec_start,
+ dispatch_flags);
/*
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
@@ -1535,7 +1611,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* needs to be adjusted to also track the ggtt batch vma properly as
* active.
*/
- if (flags & I915_DISPATCH_SECURE)
+ if (dispatch_flags & I915_DISPATCH_SECURE)
i915_gem_object_ggtt_unpin(batch_obj);
err:
/* the request owns the ref now */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index dccdc8aad2e2..0239fbff7bf7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -27,6 +27,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
@@ -66,8 +67,9 @@
* i915_ggtt_view_type and struct i915_ggtt_view.
*
* A new flavour of core GEM functions which work with GGTT bound objects were
- * added with the _view suffix. They take the struct i915_ggtt_view parameter
- * encapsulating all metadata required to implement a view.
+ * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
+ * renaming in large amounts of code. They take the struct i915_ggtt_view
+ * parameter encapsulating all metadata required to implement a view.
*
* As a helper for callers which are only interested in the normal view,
* globally const i915_ggtt_view_normal singleton instance exists. All old core
@@ -91,6 +93,9 @@
*/
const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_rotated = {
+ .type = I915_GGTT_VIEW_ROTATED
+};
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
@@ -103,6 +108,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
+ if (intel_vgpu_active(dev))
+ has_full_ppgtt = false; /* emulation is too hard */
+
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
@@ -138,17 +146,16 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return has_aliasing_ppgtt ? 1 : 0;
}
-
static void ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);
-static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid)
+static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
{
- gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+ gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr;
switch (level) {
@@ -166,11 +173,11 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
return pte;
}
-static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
{
- gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+ gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE_INDEX;
@@ -179,11 +186,11 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
return pde;
}
-static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -201,11 +208,11 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -225,11 +232,11 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 flags)
+static gen6_pte_t byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 flags)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
if (!(flags & PTE_READ_ONLY))
@@ -241,11 +248,11 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@@ -254,11 +261,11 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -275,6 +282,162 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
+#define i915_dma_unmap_single(px, dev) \
+ __i915_dma_unmap_single((px)->daddr, dev)
+
+static inline void __i915_dma_unmap_single(dma_addr_t daddr,
+ struct drm_device *dev)
+{
+ struct device *device = &dev->pdev->dev;
+
+ dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+}
+
+/**
+ * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
+ * @px: Page table/dir/etc to get a DMA map for
+ * @dev: drm device
+ *
+ * Page table allocations are unified across all gens. They always require a
+ * single 4k allocation, as well as a DMA mapping. If we keep the structs
+ * symmetric here, the simple macro covers us for every page table type.
+ *
+ * Return: 0 if success.
+ */
+#define i915_dma_map_single(px, dev) \
+ i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
+
+static inline int i915_dma_map_page_single(struct page *page,
+ struct drm_device *dev,
+ dma_addr_t *daddr)
+{
+ struct device *device = &dev->pdev->dev;
+
+ *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device, *daddr))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void unmap_and_free_pt(struct i915_page_table_entry *pt,
+ struct drm_device *dev)
+{
+ if (WARN_ON(!pt->page))
+ return;
+
+ i915_dma_unmap_single(pt, dev);
+ __free_page(pt->page);
+ kfree(pt->used_ptes);
+ kfree(pt);
+}
+
+static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
+{
+ struct i915_page_table_entry *pt;
+ const size_t count = INTEL_INFO(dev)->gen >= 8 ?
+ GEN8_PTES : GEN6_PTES;
+ int ret = -ENOMEM;
+
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ if (!pt)
+ return ERR_PTR(-ENOMEM);
+
+ pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
+ GFP_KERNEL);
+
+ if (!pt->used_ptes)
+ goto fail_bitmap;
+
+ pt->page = alloc_page(GFP_KERNEL);
+ if (!pt->page)
+ goto fail_page;
+
+ ret = i915_dma_map_single(pt, dev);
+ if (ret)
+ goto fail_dma;
+
+ return pt;
+
+fail_dma:
+ __free_page(pt->page);
+fail_page:
+ kfree(pt->used_ptes);
+fail_bitmap:
+ kfree(pt);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * alloc_pt_range() - Allocate a multiple page tables
+ * @pd: The page directory which will have at least @count entries
+ * available to point to the allocated page tables.
+ * @pde: First page directory entry for which we are allocating.
+ * @count: Number of pages to allocate.
+ * @dev: DRM device.
+ *
+ * Allocates multiple page table pages and sets the appropriate entries in the
+ * page table structure within the page directory. Function cleans up after
+ * itself on any failures.
+ *
+ * Return: 0 if allocation succeeded.
+ */
+static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
+ struct drm_device *dev)
+{
+ int i, ret;
+
+ /* 512 is the max page tables per page_directory on any platform. */
+ if (WARN_ON(pde + count > I915_PDES))
+ return -EINVAL;
+
+ for (i = pde; i < pde + count; i++) {
+ struct i915_page_table_entry *pt = alloc_pt_single(dev);
+
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto err_out;
+ }
+ WARN(pd->page_table[i],
+ "Leaking page directory entry %d (%p)\n",
+ i, pd->page_table[i]);
+ pd->page_table[i] = pt;
+ }
+
+ return 0;
+
+err_out:
+ while (i-- > pde)
+ unmap_and_free_pt(pd->page_table[i], dev);
+ return ret;
+}
+
+static void unmap_and_free_pd(struct i915_page_directory_entry *pd)
+{
+ if (pd->page) {
+ __free_page(pd->page);
+ kfree(pd);
+ }
+}
+
+static struct i915_page_directory_entry *alloc_pd_single(void)
+{
+ struct i915_page_directory_entry *pd;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pd->page) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pd;
+}
+
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val)
@@ -304,10 +467,10 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
int i, ret;
/* bit of a hack to find the actual last used pd */
- int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
+ int used_pd = ppgtt->num_pd_entries / I915_PDES;
for (i = used_pd - 1; i >= 0; i--) {
- dma_addr_t addr = ppgtt->pd_dma_addr[i];
+ dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
ret = gen8_write_pdp(ring, i, addr);
if (ret)
return ret;
@@ -323,7 +486,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen8_gtt_pte_t *pt_vaddr, scratch_pte;
+ gen8_pte_t *pt_vaddr, scratch_pte;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -334,11 +497,28 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
- struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
+ struct i915_page_directory_entry *pd;
+ struct i915_page_table_entry *pt;
+ struct page *page_table;
+
+ if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
+ continue;
+
+ pd = ppgtt->pdp.page_directory[pdpe];
+
+ if (WARN_ON(!pd->page_table[pde]))
+ continue;
+
+ pt = pd->page_table[pde];
+
+ if (WARN_ON(!pt->page))
+ continue;
+
+ page_table = pt->page;
last_pte = pte + num_entries;
- if (last_pte > GEN8_PTES_PER_PAGE)
- last_pte = GEN8_PTES_PER_PAGE;
+ if (last_pte > GEN8_PTES)
+ last_pte = GEN8_PTES;
pt_vaddr = kmap_atomic(page_table);
@@ -352,7 +532,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
pte = 0;
- if (++pde == GEN8_PDES_PER_PAGE) {
+ if (++pde == I915_PDES) {
pdpe++;
pde = 0;
}
@@ -366,7 +546,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen8_gtt_pte_t *pt_vaddr;
+ gen8_pte_t *pt_vaddr;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -375,21 +555,26 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
- if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
+ if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
break;
- if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
+ if (pt_vaddr == NULL) {
+ struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
+ struct i915_page_table_entry *pt = pd->page_table[pde];
+ struct page *page_table = pt->page;
+
+ pt_vaddr = kmap_atomic(page_table);
+ }
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
- if (++pte == GEN8_PTES_PER_PAGE) {
+ if (++pte == GEN8_PTES) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
- if (++pde == GEN8_PDES_PER_PAGE) {
+ if (++pde == I915_PDES) {
pdpe++;
pde = 0;
}
@@ -403,29 +588,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
-static void gen8_free_page_tables(struct page **pt_pages)
+static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev)
{
int i;
- if (pt_pages == NULL)
+ if (!pd->page)
return;
- for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
- if (pt_pages[i])
- __free_pages(pt_pages[i], 0);
+ for (i = 0; i < I915_PDES; i++) {
+ if (WARN_ON(!pd->page_table[i]))
+ continue;
+
+ unmap_and_free_pt(pd->page_table[i], dev);
+ pd->page_table[i] = NULL;
+ }
}
-static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
+static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
- gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
- kfree(ppgtt->gen8_pt_pages[i]);
- kfree(ppgtt->gen8_pt_dma_addr[i]);
- }
+ if (WARN_ON(!ppgtt->pdp.page_directory[i]))
+ continue;
- __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+ gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+ unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
+ }
}
static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
@@ -436,14 +625,23 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_pages; i++) {
/* TODO: In the future we'll support sparse mappings, so this
* will have to change. */
- if (!ppgtt->pd_dma_addr[i])
+ if (!ppgtt->pdp.page_directory[i]->daddr)
continue;
- pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
+ pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ for (j = 0; j < I915_PDES; j++) {
+ struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
+ struct i915_page_table_entry *pt;
+ dma_addr_t addr;
+
+ if (WARN_ON(!pd->page_table[j]))
+ continue;
+
+ pt = pd->page_table[j];
+ addr = pt->daddr;
+
if (addr)
pci_unmap_page(hwdev, addr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
@@ -460,86 +658,47 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
gen8_ppgtt_free(ppgtt);
}
-static struct page **__gen8_alloc_page_tables(void)
+static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
- struct page **pt_pages;
- int i;
-
- pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
- if (!pt_pages)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
- pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!pt_pages[i])
- goto bail;
- }
-
- return pt_pages;
-
-bail:
- gen8_free_page_tables(pt_pages);
- kfree(pt_pages);
- return ERR_PTR(-ENOMEM);
-}
-
-static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
-{
- struct page **pt_pages[GEN8_LEGACY_PDPS];
int i, ret;
- for (i = 0; i < max_pdp; i++) {
- pt_pages[i] = __gen8_alloc_page_tables();
- if (IS_ERR(pt_pages[i])) {
- ret = PTR_ERR(pt_pages[i]);
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ ret = alloc_pt_range(ppgtt->pdp.page_directory[i],
+ 0, I915_PDES, ppgtt->base.dev);
+ if (ret)
goto unwind_out;
- }
}
- /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
- * "atomic" - for cleanup purposes.
- */
- for (i = 0; i < max_pdp; i++)
- ppgtt->gen8_pt_pages[i] = pt_pages[i];
-
return 0;
unwind_out:
- while (i--) {
- gen8_free_page_tables(pt_pages[i]);
- kfree(pt_pages[i]);
- }
+ while (i--)
+ gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
- return ret;
+ return -ENOMEM;
}
-static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
+static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
{
int i;
- for (i = 0; i < ppgtt->num_pd_pages; i++) {
- ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!ppgtt->gen8_pt_dma_addr[i])
- return -ENOMEM;
+ for (i = 0; i < max_pdp; i++) {
+ ppgtt->pdp.page_directory[i] = alloc_pd_single();
+ if (IS_ERR(ppgtt->pdp.page_directory[i]))
+ goto unwind_out;
}
- return 0;
-}
+ ppgtt->num_pd_pages = max_pdp;
+ BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
-static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
-{
- ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
- if (!ppgtt->pd_pages)
- return -ENOMEM;
+ return 0;
- ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
- BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+unwind_out:
+ while (i--)
+ unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
- return 0;
+ return -ENOMEM;
}
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
@@ -551,18 +710,16 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
- if (ret) {
- __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
- return ret;
- }
+ ret = gen8_ppgtt_allocate_page_tables(ppgtt);
+ if (ret)
+ goto err_out;
- ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
+ ppgtt->num_pd_entries = max_pdp * I915_PDES;
- ret = gen8_ppgtt_allocate_dma(ppgtt);
- if (ret)
- gen8_ppgtt_free(ppgtt);
+ return 0;
+err_out:
+ gen8_ppgtt_free(ppgtt);
return ret;
}
@@ -573,14 +730,14 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
int ret;
pd_addr = pci_map_page(ppgtt->base.dev->pdev,
- &ppgtt->pd_pages[pd], 0,
+ ppgtt->pdp.page_directory[pd]->page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
if (ret)
return ret;
- ppgtt->pd_dma_addr[pd] = pd_addr;
+ ppgtt->pdp.page_directory[pd]->daddr = pd_addr;
return 0;
}
@@ -590,22 +747,23 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
const int pt)
{
dma_addr_t pt_addr;
- struct page *p;
+ struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd];
+ struct i915_page_table_entry *ptab = pdir->page_table[pt];
+ struct page *p = ptab->page;
int ret;
- p = ppgtt->gen8_pt_pages[pd][pt];
pt_addr = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
if (ret)
return ret;
- ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
+ ptab->daddr = pt_addr;
return 0;
}
-/**
+/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
@@ -618,26 +776,30 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
- const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+ const int min_pt_pages = I915_PDES * max_pdp;
int i, j, ret;
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
- /* 1. Do all our allocations for page directories and page tables. */
- ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
+ /* 1. Do all our allocations for page directories and page tables.
+ * We allocate more than was asked so that we can point the unused parts
+ * to valid entries that point to scratch page. Dynamic page tables
+ * will fix this eventually.
+ */
+ ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES);
if (ret)
return ret;
/*
* 2. Create DMA mappings for the page directories and page tables.
*/
- for (i = 0; i < max_pdp; i++) {
+ for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
if (ret)
goto bail;
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ for (j = 0; j < I915_PDES; j++) {
ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
if (ret)
goto bail;
@@ -652,11 +814,13 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
* will never need to touch the PDEs again.
*/
- for (i = 0; i < max_pdp; i++) {
- gen8_ppgtt_pde_t *pd_vaddr;
- pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
+ struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
+ gen8_pde_t *pd_vaddr;
+ pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
+ for (j = 0; j < I915_PDES; j++) {
+ struct i915_page_table_entry *pt = pd->page_table[j];
+ dma_addr_t addr = pt->daddr;
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
@@ -670,9 +834,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
- ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ /* This is the area that we advertise as usable for the caller */
+ ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE;
+
+ /* Set all ptes to a valid scratch page. Also above requested space */
+ ppgtt->base.clear_range(&ppgtt->base, 0,
+ ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE,
+ true);
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
@@ -691,22 +860,23 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
- gen6_gtt_pte_t __iomem *pd_addr;
- gen6_gtt_pte_t scratch_pte;
+ gen6_pte_t __iomem *pd_addr;
+ gen6_pte_t scratch_pte;
uint32_t pd_entry;
int pte, pde;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
- pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
- ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+ pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
- ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
+ ppgtt->pd.pd_offset,
+ ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
u32 expected;
- gen6_gtt_pte_t *pt_vaddr;
- dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
+ gen6_pte_t *pt_vaddr;
+ dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
pd_entry = readl(pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
@@ -717,10 +887,10 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
- for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
+ for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va =
- (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
+ (pde * PAGE_SIZE * GEN6_PTES) +
(pte * PAGE_SIZE);
int i;
bool found = false;
@@ -743,33 +913,43 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
}
}
-static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
+/* Write pde (index) from the page directory @pd to the page table @pt */
+static void gen6_write_pde(struct i915_page_directory_entry *pd,
+ const int pde, struct i915_page_table_entry *pt)
{
- struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
- gen6_gtt_pte_t __iomem *pd_addr;
- uint32_t pd_entry;
- int i;
+ /* Caller needs to make sure the write completes if necessary */
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(pd, struct i915_hw_ppgtt, pd);
+ u32 pd_entry;
- WARN_ON(ppgtt->pd_offset & 0x3f);
- pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
- ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+ pd_entry |= GEN6_PDE_VALID;
- pt_addr = ppgtt->pt_dma_addr[i];
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
+ writel(pd_entry, ppgtt->pd_addr + pde);
+}
- writel(pd_entry, pd_addr + i);
- }
- readl(pd_addr);
+/* Write all the page tables found in the ppgtt structure to incrementing page
+ * directories. */
+static void gen6_write_page_range(struct drm_i915_private *dev_priv,
+ struct i915_page_directory_entry *pd,
+ uint32_t start, uint32_t length)
+{
+ struct i915_page_table_entry *pt;
+ uint32_t pde, temp;
+
+ gen6_for_each_pde(pt, pd, start, length, temp, pde)
+ gen6_write_pde(pd, pde, pt);
+
+ /* Make sure write is complete before other code can use this page
+ * table. Also require for WC mapped PTEs */
+ readl(dev_priv->gtt.gsm);
}
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{
- BUG_ON(ppgtt->pd_offset & 0x3f);
+ BUG_ON(ppgtt->pd.pd_offset & 0x3f);
- return (ppgtt->pd_offset / 64) << 16;
+ return (ppgtt->pd.pd_offset / 64) << 16;
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
@@ -797,6 +977,16 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
+static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ return 0;
+}
+
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring)
{
@@ -908,21 +1098,21 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+ gen6_pte_t *pt_vaddr, scratch_pte;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned act_pt = first_entry / GEN6_PTES;
+ unsigned first_pte = first_entry % GEN6_PTES;
unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
- if (last_pte > I915_PPGTT_PT_ENTRIES)
- last_pte = I915_PPGTT_PT_ENTRIES;
+ if (last_pte > GEN6_PTES)
+ last_pte = GEN6_PTES;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
@@ -942,22 +1132,22 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen6_gtt_pte_t *pt_vaddr;
+ gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
- unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned act_pt = first_entry / GEN6_PTES;
+ unsigned act_pte = first_entry % GEN6_PTES;
struct sg_page_iter sg_iter;
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true, flags);
- if (++act_pte == I915_PPGTT_PT_ENTRIES) {
+ if (++act_pte == GEN6_PTES) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
act_pt++;
@@ -968,26 +1158,134 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
}
-static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
+/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
+ * are switching between contexts with the same LRCA, we also must do a force
+ * restore.
+ */
+static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+ /* If current vm != vm, */
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+ struct i915_page_table_entry *pt)
{
+ gen6_pte_t *pt_vaddr, scratch_pte;
int i;
- if (ppgtt->pt_dma_addr) {
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(ppgtt->base.dev->pdev,
- ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
+ WARN_ON(vm->scratch.addr == 0);
+
+ scratch_pte = vm->pte_encode(vm->scratch.addr,
+ I915_CACHE_LLC, true, 0);
+
+ pt_vaddr = kmap_atomic(pt->page);
+
+ for (i = 0; i < GEN6_PTES; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ kunmap_atomic(pt_vaddr);
+}
+
+static int gen6_alloc_va_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ DECLARE_BITMAP(new_page_tables, I915_PDES);
+ struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_page_table_entry *pt;
+ const uint32_t start_save = start, length_save = length;
+ uint32_t pde, temp;
+ int ret;
+
+ WARN_ON(upper_32_bits(start));
+
+ bitmap_zero(new_page_tables, I915_PDES);
+
+ /* The allocation is done in two stages so that we can bail out with
+ * minimal amount of pain. The first stage finds new page tables that
+ * need allocation. The second stage marks use ptes within the page
+ * tables.
+ */
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ if (pt != ppgtt->scratch_pt) {
+ WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
+ continue;
+ }
+
+ /* We've already allocated a page table */
+ WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
+
+ pt = alloc_pt_single(dev);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto unwind_out;
+ }
+
+ gen6_initialize_pt(vm, pt);
+
+ ppgtt->pd.page_table[pde] = pt;
+ set_bit(pde, new_page_tables);
+ trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
}
+
+ start = start_save;
+ length = length_save;
+
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
+
+ bitmap_zero(tmp_bitmap, GEN6_PTES);
+ bitmap_set(tmp_bitmap, gen6_pte_index(start),
+ gen6_pte_count(start, length));
+
+ if (test_and_clear_bit(pde, new_page_tables))
+ gen6_write_pde(&ppgtt->pd, pde, pt);
+
+ trace_i915_page_table_entry_map(vm, pde, pt,
+ gen6_pte_index(start),
+ gen6_pte_count(start, length),
+ GEN6_PTES);
+ bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
+ GEN6_PTES);
+ }
+
+ WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
+
+ /* Make sure write is complete before other code can use this page
+ * table. Also require for WC mapped PTEs */
+ readl(dev_priv->gtt.gsm);
+
+ mark_tlbs_dirty(ppgtt);
+ return 0;
+
+unwind_out:
+ for_each_set_bit(pde, new_page_tables, I915_PDES) {
+ struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde];
+
+ ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+ unmap_and_free_pt(pt, vm->dev);
+ }
+
+ mark_tlbs_dirty(ppgtt);
+ return ret;
}
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
- kfree(ppgtt->pt_dma_addr);
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- __free_page(ppgtt->pt_pages[i]);
- kfree(ppgtt->pt_pages);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ struct i915_page_table_entry *pt = ppgtt->pd.page_table[i];
+
+ if (pt != ppgtt->scratch_pt)
+ unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
+ }
+
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ unmap_and_free_pd(&ppgtt->pd);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -997,7 +1295,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
drm_mm_remove_node(&ppgtt->node);
- gen6_ppgtt_unmap_pages(ppgtt);
gen6_ppgtt_free(ppgtt);
}
@@ -1013,6 +1310,12 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->scratch_pt))
+ return PTR_ERR(ppgtt->scratch_pt);
+
+ gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
@@ -1026,88 +1329,43 @@ alloc:
0, dev_priv->gtt.base.total,
0);
if (ret)
- return ret;
+ goto err_out;
retried = true;
goto alloc;
}
- if (ppgtt->node.start < dev_priv->gtt.mappable_end)
- DRM_DEBUG("Forced to use aperture for PDEs\n");
-
- ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
- return ret;
-}
-
-static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
-{
- int i;
-
- ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
- GFP_KERNEL);
+ if (ret)
+ goto err_out;
- if (!ppgtt->pt_pages)
- return -ENOMEM;
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!ppgtt->pt_pages[i]) {
- gen6_ppgtt_free(ppgtt);
- return -ENOMEM;
- }
- }
+ if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+ DRM_DEBUG("Forced to use aperture for PDEs\n");
+ ppgtt->num_pd_entries = I915_PDES;
return 0;
+
+err_out:
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ return ret;
}
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
- int ret;
-
- ret = gen6_ppgtt_allocate_page_directories(ppgtt);
- if (ret)
- return ret;
-
- ret = gen6_ppgtt_allocate_page_tables(ppgtt);
- if (ret) {
- drm_mm_remove_node(&ppgtt->node);
- return ret;
- }
-
- ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!ppgtt->pt_dma_addr) {
- drm_mm_remove_node(&ppgtt->node);
- gen6_ppgtt_free(ppgtt);
- return -ENOMEM;
- }
-
- return 0;
+ return gen6_ppgtt_allocate_page_directories(ppgtt);
}
-static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
+static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
+ uint64_t start, uint64_t length)
{
- struct drm_device *dev = ppgtt->base.dev;
- int i;
-
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
-
- pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
- PCI_DMA_BIDIRECTIONAL);
-
- if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
- gen6_ppgtt_unmap_pages(ppgtt);
- return -EIO;
- }
-
- ppgtt->pt_dma_addr[i] = pt_addr;
- }
+ struct i915_page_table_entry *unused;
+ uint32_t pde, temp;
- return 0;
+ gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
+ ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1123,40 +1381,57 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
} else
BUG();
+ if (intel_vgpu_active(dev))
+ ppgtt->switch_mm = vgpu_mm_switch;
+
ret = gen6_ppgtt_alloc(ppgtt);
if (ret)
return ret;
- ret = gen6_ppgtt_setup_page_tables(ppgtt);
- if (ret) {
- gen6_ppgtt_free(ppgtt);
- return ret;
+ if (aliasing) {
+ /* preallocate all pts */
+ ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
+ ppgtt->base.dev);
+
+ if (ret) {
+ gen6_ppgtt_cleanup(&ppgtt->base);
+ return ret;
+ }
}
+ ppgtt->base.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+ ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
- ppgtt->pd_offset =
- ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
+ ppgtt->pd.pd_offset =
+ ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+
+ if (aliasing)
+ ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ else
+ gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
- ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
- gen6_write_pdes(ppgtt);
DRM_DEBUG("Adding PPGTT at offset %x\n",
- ppgtt->pd_offset << 10);
+ ppgtt->pd.pd_offset << 10);
return 0;
}
-static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt,
+ bool aliasing)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1164,7 +1439,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
if (INTEL_INFO(dev)->gen < 8)
- return gen6_ppgtt_init(ppgtt);
+ return gen6_ppgtt_init(ppgtt, aliasing);
else
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
}
@@ -1173,7 +1448,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
- ret = __hw_ppgtt_init(dev, ppgtt);
+ ret = __hw_ppgtt_init(dev, ppgtt, false);
if (ret == 0) {
kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
@@ -1420,15 +1695,20 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
return;
}
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- /* TODO: Perhaps it shouldn't be gen6 specific */
- if (i915_is_ggtt(vm)) {
- if (dev_priv->mm.aliasing_ppgtt)
- gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
- continue;
- }
+ if (USES_PPGTT(dev)) {
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ /* TODO: Perhaps it shouldn't be gen6 specific */
+
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt,
+ base);
+
+ if (i915_is_ggtt(vm))
+ ppgtt = dev_priv->mm.aliasing_ppgtt;
- gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
+ gen6_write_page_range(dev_priv, &ppgtt->pd,
+ 0, ppgtt->base.total);
+ }
}
i915_ggtt_flush(dev_priv);
@@ -1447,7 +1727,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
-static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
+static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
#ifdef writeq
writeq(pte, addr);
@@ -1464,8 +1744,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
- gen8_gtt_pte_t __iomem *gtt_entries =
- (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ gen8_pte_t __iomem *gtt_entries =
+ (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
@@ -1510,8 +1790,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
- gen6_gtt_pte_t __iomem *gtt_entries =
- (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ gen6_pte_t __iomem *gtt_entries =
+ (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
@@ -1549,8 +1829,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
- (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ gen8_pte_t scratch_pte, __iomem *gtt_base =
+ (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
@@ -1575,8 +1855,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
@@ -1633,11 +1913,15 @@ static void ggtt_bind_vma(struct i915_vma *vma,
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
+ struct sg_table *pages = obj->pages;
/* Currently applicable only to VLV */
if (obj->gt_ro)
flags |= PTE_READ_ONLY;
+ if (i915_is_ggtt(vma->vm))
+ pages = vma->ggtt_view.pages;
+
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
@@ -1652,7 +1936,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
- vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
+ vma->vm->insert_entries(vma->vm, pages,
vma->node.start,
cache_level, flags);
vma->bound |= GLOBAL_BIND;
@@ -1663,8 +1947,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
- appgtt->base.insert_entries(&appgtt->base,
- vma->ggtt_view.pages,
+ appgtt->base.insert_entries(&appgtt->base, pages,
vma->node.start,
cache_level, flags);
vma->bound |= LOCAL_BIND;
@@ -1753,6 +2036,16 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
/* Subtract the guard page ... */
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
+
+ dev_priv->gtt.base.start = start;
+ dev_priv->gtt.base.total = end - start;
+
+ if (intel_vgpu_active(dev)) {
+ ret = intel_vgt_balloon(dev);
+ if (ret)
+ return ret;
+ }
+
if (!HAS_LLC(dev))
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
@@ -1772,9 +2065,6 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
vma->bound |= GLOBAL_BIND;
}
- dev_priv->gtt.base.start = start;
- dev_priv->gtt.base.total = end - start;
-
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -1793,9 +2083,11 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
if (!ppgtt)
return -ENOMEM;
- ret = __hw_ppgtt_init(dev, ppgtt);
- if (ret != 0)
+ ret = __hw_ppgtt_init(dev, ppgtt, true);
+ if (ret) {
+ kfree(ppgtt);
return ret;
+ }
dev_priv->mm.aliasing_ppgtt = ppgtt;
}
@@ -1826,6 +2118,9 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
}
if (drm_mm_initialized(&vm->mm)) {
+ if (intel_vgpu_active(dev))
+ intel_vgt_deballoon();
+
drm_mm_takedown(&vm->mm);
list_del(&vm->global_link);
}
@@ -2078,7 +2373,7 @@ static int gen8_gmch_probe(struct drm_device *dev,
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
- *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
+ *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
if (IS_CHERRYVIEW(dev))
chv_setup_private_ppat(dev_priv);
@@ -2123,7 +2418,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
- *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
+ *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
ret = ggtt_probe_common(dev, gtt_size);
@@ -2228,11 +2523,16 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
-static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+static struct i915_vma *
+__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view)
{
- struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ struct i915_vma *vma;
+
+ if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return ERR_PTR(-EINVAL);
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -2241,10 +2541,11 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
- vma->ggtt_view = *view;
if (INTEL_INFO(vm->dev)->gen >= 6) {
if (i915_is_ggtt(vm)) {
+ vma->ggtt_view = *ggtt_view;
+
vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma;
} else {
@@ -2253,6 +2554,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
}
} else {
BUG_ON(!i915_is_ggtt(vm));
+ vma->ggtt_view = *ggtt_view;
vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma;
}
@@ -2265,38 +2567,170 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
}
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = __i915_gem_vma_create(obj, vm,
+ i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
struct i915_vma *vma;
- vma = i915_gem_obj_to_vma_view(obj, vm, view);
+ if (WARN_ON(!view))
+ return ERR_PTR(-EINVAL);
+
+ vma = i915_gem_obj_to_ggtt_view(obj, view);
+
+ if (IS_ERR(vma))
+ return vma;
+
if (!vma)
- vma = __i915_gem_vma_create(obj, vm, view);
+ vma = __i915_gem_vma_create(obj, ggtt, view);
return vma;
+
+}
+
+static void
+rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height,
+ struct sg_table *st)
+{
+ unsigned int column, row;
+ unsigned int src_idx;
+ struct scatterlist *sg = st->sgl;
+
+ st->nents = 0;
+
+ for (column = 0; column < width; column++) {
+ src_idx = width * (height - 1) + column;
+ for (row = 0; row < height; row++) {
+ st->nents++;
+ /* We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+ sg_set_page(sg, NULL, PAGE_SIZE, 0);
+ sg_dma_address(sg) = in[src_idx];
+ sg_dma_len(sg) = PAGE_SIZE;
+ sg = sg_next(sg);
+ src_idx -= width;
+ }
+ }
+}
+
+static struct sg_table *
+intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
+ unsigned long size, pages, rot_pages;
+ struct sg_page_iter sg_iter;
+ unsigned long i;
+ dma_addr_t *page_addr_list;
+ struct sg_table *st;
+ unsigned int tile_pitch, tile_height;
+ unsigned int width_pages, height_pages;
+ int ret = -ENOMEM;
+
+ pages = obj->base.size / PAGE_SIZE;
+
+ /* Calculate tiling geometry. */
+ tile_height = intel_tile_height(dev, rot_info->pixel_format,
+ rot_info->fb_modifier);
+ tile_pitch = PAGE_SIZE / tile_height;
+ width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
+ height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
+ rot_pages = width_pages * height_pages;
+ size = rot_pages * PAGE_SIZE;
+
+ /* Allocate a temporary list of source pages for random access. */
+ page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
+ if (!page_addr_list)
+ return ERR_PTR(ret);
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ /* Populate source page list from the object. */
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
+ i++;
+ }
+
+ /* Rotate the pages. */
+ rotate_pages(page_addr_list, width_pages, height_pages, st);
+
+ DRM_DEBUG_KMS(
+ "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
+ size, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, width_pages, height_pages,
+ rot_pages);
+
+ drm_free_large(page_addr_list);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ drm_free_large(page_addr_list);
+
+ DRM_DEBUG_KMS(
+ "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
+ size, ret, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, width_pages, height_pages,
+ rot_pages);
+ return ERR_PTR(ret);
}
-static inline
-int i915_get_vma_pages(struct i915_vma *vma)
+static inline int
+i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
+ int ret = 0;
+
if (vma->ggtt_view.pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
vma->ggtt_view.pages = vma->obj->pages;
+ else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
+ vma->ggtt_view.pages =
+ intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
else
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
if (!vma->ggtt_view.pages) {
- DRM_ERROR("Failed to get pages for VMA view type %u!\n",
+ DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
vma->ggtt_view.type);
- return -EINVAL;
+ ret = -EINVAL;
+ } else if (IS_ERR(vma->ggtt_view.pages)) {
+ ret = PTR_ERR(vma->ggtt_view.pages);
+ vma->ggtt_view.pages = NULL;
+ DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
}
- return 0;
+ return ret;
}
/**
@@ -2312,10 +2746,12 @@ int i915_get_vma_pages(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
- int ret = i915_get_vma_pages(vma);
+ if (i915_is_ggtt(vma->vm)) {
+ int ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
vma->bind_vma(vma, cache_level, flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index e377c7d27bd4..fc03c99317c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -36,13 +36,13 @@
struct drm_i915_file_private;
-typedef uint32_t gen6_gtt_pte_t;
-typedef uint64_t gen8_gtt_pte_t;
-typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
+typedef uint32_t gen6_pte_t;
+typedef uint64_t gen8_pte_t;
+typedef uint64_t gen8_pde_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
@@ -51,9 +51,16 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN6_PTE_UNCACHED (1 << 1)
#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PPGTT_PD_ENTRIES 512
-#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
+#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
+#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
+#define I915_PDES 512
+#define I915_PDE_MASK (I915_PDES - 1)
+#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
+
+#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
+#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PDE_SHIFT 22
#define GEN6_PDE_VALID (1 << 0)
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
@@ -88,9 +95,8 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PDE_MASK 0x1ff
#define GEN8_PTE_SHIFT 12
#define GEN8_PTE_MASK 0x1ff
-#define GEN8_LEGACY_PDPS 4
-#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
-#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
+#define GEN8_LEGACY_PDPES 4
+#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -111,15 +117,28 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
+ I915_GGTT_VIEW_ROTATED
+};
+
+struct intel_rotation_info {
+ unsigned int height;
+ unsigned int pitch;
+ uint32_t pixel_format;
+ uint64_t fb_modifier;
};
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
struct sg_table *pages;
+
+ union {
+ struct intel_rotation_info rotation_info;
+ };
};
extern const struct i915_ggtt_view i915_ggtt_view_normal;
+extern const struct i915_ggtt_view i915_ggtt_view_rotated;
enum i915_cache_level;
@@ -187,6 +206,28 @@ struct i915_vma {
u32 flags);
};
+struct i915_page_table_entry {
+ struct page *page;
+ dma_addr_t daddr;
+
+ unsigned long *used_ptes;
+};
+
+struct i915_page_directory_entry {
+ struct page *page; /* NULL for GEN6-GEN7 */
+ union {
+ uint32_t pd_offset;
+ dma_addr_t daddr;
+ };
+
+ struct i915_page_table_entry *page_table[I915_PDES]; /* PDEs */
+};
+
+struct i915_page_directory_pointer_entry {
+ /* struct page *page; */
+ struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES];
+};
+
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
@@ -223,9 +264,12 @@ struct i915_address_space {
struct list_head inactive_list;
/* FIXME: Need a more generic return type */
- gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 flags); /* Create a valid PTE */
+ gen6_pte_t (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 flags); /* Create a valid PTE */
+ int (*allocate_va_range)(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
@@ -269,30 +313,90 @@ struct i915_hw_ppgtt {
struct i915_address_space base;
struct kref ref;
struct drm_mm_node node;
+ unsigned long pd_dirty_rings;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
- struct page **pt_pages;
- struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
- };
- struct page *pd_pages;
- union {
- uint32_t pd_offset;
- dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
- };
- union {
- dma_addr_t *pt_dma_addr;
- dma_addr_t *gen8_pt_dma_addr[4];
+ struct i915_page_directory_pointer_entry pdp;
+ struct i915_page_directory_entry pd;
};
+ struct i915_page_table_entry *scratch_pt;
+
struct drm_i915_file_private *file_priv;
+ gen6_pte_t __iomem *pd_addr;
+
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
+/* For each pde iterates over every pde between from start until start + length.
+ * If start, and start+length are not perfectly divisible, the macro will round
+ * down, and up as needed. The macro modifies pde, start, and length. Dev is
+ * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
+ * and length = 2G effectively iterates over every PDE in the system.
+ *
+ * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
+ */
+#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
+ for (iter = gen6_pde_index(start); \
+ pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
+ iter++, \
+ temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
+ temp = min_t(unsigned, temp, length), \
+ start += temp, length -= temp)
+
+static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
+{
+ const uint32_t mask = NUM_PTE(pde_shift) - 1;
+
+ return (address >> PAGE_SHIFT) & mask;
+}
+
+/* Helper to counts the number of PTEs within the given length. This count
+ * does not cross a page table boundary, so the max value would be
+ * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
+*/
+static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
+ uint32_t pde_shift)
+{
+ const uint64_t mask = ~((1 << pde_shift) - 1);
+ uint64_t end;
+
+ WARN_ON(length == 0);
+ WARN_ON(offset_in_page(addr|length));
+
+ end = addr + length;
+
+ if ((addr & mask) != (end & mask))
+ return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
+
+ return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
+}
+
+static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
+{
+ return (addr >> shift) & I915_PDE_MASK;
+}
+
+static inline uint32_t gen6_pte_index(uint32_t addr)
+{
+ return i915_pte_index(addr, GEN6_PDE_SHIFT);
+}
+
+static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
+{
+ return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
+}
+
+static inline uint32_t gen6_pde_index(uint32_t addr)
+{
+ return i915_pde_index(addr, GEN6_PDE_SHIFT);
+}
+
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -321,4 +425,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+static inline bool
+i915_ggtt_view_equal(const struct i915_ggtt_view *a,
+ const struct i915_ggtt_view *b)
+{
+ if (WARN_ON(!a || !b))
+ return false;
+
+ return a->type == b->type;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
new file mode 100644
index 000000000000..f7929e769250
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/oom.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
+
+/**
+ * i915_gem_shrink - Shrink buffer object caches
+ * @dev_priv: i915 device
+ * @target: amount of memory to make available, in pages
+ * @flags: control flags for selecting cache types
+ *
+ * This function is the main interface to the shrinker. It will try to release
+ * up to @target pages of main memory backing storage from buffer objects.
+ * Selection of the specific caches can be done with @flags. This is e.g. useful
+ * when purgeable objects should be removed from caches preferentially.
+ *
+ * Note that it's not guaranteed that released amount is actually available as
+ * free system memory - the pages might still be in-used to due to other reasons
+ * (like cpu mmaps) or the mm core has reused them before we could grab them.
+ * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
+ * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
+ *
+ * Also note that any kind of pinning (both per-vma address space pins and
+ * backing storage pins at the buffer object level) result in the shrinker code
+ * having to skip the object.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long
+i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target, unsigned flags)
+{
+ const struct {
+ struct list_head *list;
+ unsigned int bit;
+ } phases[] = {
+ { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+ { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+ { NULL, 0 },
+ }, *phase;
+ unsigned long count = 0;
+
+ /*
+ * As we may completely rewrite the (un)bound list whilst unbinding
+ * (due to retiring requests) we have to strictly process only
+ * one element of the list at the time, and recheck the list
+ * on every iteration.
+ *
+ * In particular, we must hold a reference whilst removing the
+ * object as we may end up waiting for and/or retiring the objects.
+ * This might release the final reference (held by the active list)
+ * and result in the object being freed from under us. This is
+ * similar to the precautions the eviction code must take whilst
+ * removing objects.
+ *
+ * Also note that although these lists do not hold a reference to
+ * the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
+ */
+ for (phase = phases; phase->list; phase++) {
+ struct list_head still_in_list;
+
+ if ((flags & phase->bit) == 0)
+ continue;
+
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(phase->list)) {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma, *v;
+
+ obj = list_first_entry(phase->list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
+
+ if (flags & I915_SHRINK_PURGEABLE &&
+ obj->madv != I915_MADV_DONTNEED)
+ continue;
+
+ drm_gem_object_reference(&obj->base);
+
+ /* For the unbound phase, this should be a no-op! */
+ list_for_each_entry_safe(vma, v,
+ &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, phase->list);
+ }
+
+ return count;
+}
+
+/**
+ * i915_gem_shrink - Shrink buffer object caches completely
+ * @dev_priv: i915 device
+ *
+ * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
+ * caches completely. It also first waits for and retires all outstanding
+ * requests to also be able to release backing storage for active objects.
+ *
+ * This should only be used in code to intentionally quiescent the gpu or as a
+ * last-ditch effort when memory seems to have run out.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+ i915_gem_evict_everything(dev_priv->dev);
+ return i915_gem_shrink(dev_priv, LONG_MAX,
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
+}
+
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+
+ if (to_i915(dev)->mm.shrinker_no_lock_stealing)
+ return false;
+
+ *unlock = false;
+ } else
+ *unlock = true;
+
+ return true;
+}
+
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int count = 0;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ count++;
+
+ return count;
+}
+
+static unsigned long
+i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long count;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return 0;
+
+ count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+ if (obj->pages_pin_count == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!i915_gem_obj_is_pinned(obj) &&
+ obj->pages_pin_count == num_vma_bound(obj))
+ count += obj->base.size >> PAGE_SHIFT;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static unsigned long
+i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long freed;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
+
+ freed = i915_gem_shrink(dev_priv,
+ sc->nr_to_scan,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_PURGEABLE);
+ if (freed < sc->nr_to_scan)
+ freed += i915_gem_shrink(dev_priv,
+ sc->nr_to_scan - freed,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND);
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return freed;
+}
+
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.oom_notifier);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long timeout = msecs_to_jiffies(5000) + 1;
+ unsigned long pinned, bound, unbound, freed_pages;
+ bool was_interruptible;
+ bool unlock;
+
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
+ schedule_timeout_killable(1);
+ if (fatal_signal_pending(current))
+ return NOTIFY_DONE;
+ }
+ if (timeout == 0) {
+ pr_err("Unable to purge GPU memory due lock contention.\n");
+ return NOTIFY_DONE;
+ }
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ freed_pages = i915_gem_shrink_all(dev_priv);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ /* Because we may be allocating inside our own driver, we cannot
+ * assert that there are no objects with pinned pages that are not
+ * being pointed to by hardware.
+ */
+ unbound = bound = pinned = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (!obj->base.filp) /* not backed by a freeable object */
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ unbound += obj->base.size;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!obj->base.filp)
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ bound += obj->base.size;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ if (freed_pages || unbound || bound)
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed_pages << PAGE_SHIFT, pinned);
+ if (unbound || bound)
+ pr_err("%lu and %lu bytes still available in the "
+ "bound and unbound GPU page lists.\n",
+ bound, unbound);
+
+ *(unsigned long *)ptr += freed_pages;
+ return NOTIFY_DONE;
+}
+
+/**
+ * i915_gem_shrinker_init - Initialize i915 shrinker
+ * @dev_priv: i915 device
+ *
+ * This function registers and sets up the i915 shrinker and OOM handler.
+ */
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
+{
+ dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+ dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
+ dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.shrinker);
+
+ dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+ register_oom_notifier(&dev_priv->mm.oom_notifier);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9c6f93ec886b..f8da71682c96 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -231,7 +231,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->fbc.size = size / dev_priv->fbc.threshold;
+ dev_priv->fbc.uncompressed_size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@@ -253,7 +253,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_c
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- if (size < dev_priv->fbc.size)
+ if (size <= dev_priv->fbc.uncompressed_size)
return 0;
/* Release any current block */
@@ -266,7 +266,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->fbc.size == 0)
+ if (dev_priv->fbc.uncompressed_size == 0)
return;
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
@@ -276,7 +276,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
kfree(dev_priv->fbc.compressed_llb);
}
- dev_priv->fbc.size = 0;
+ dev_priv->fbc.uncompressed_size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 48ddbf44c862..1d4e60df8883 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -386,6 +386,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+ error->fault_data1, error->fault_data0);
+
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
@@ -555,7 +560,14 @@ static void i915_error_state_free(struct kref *error_ref)
}
i915_error_object_free(error->semaphore_obj);
+
+ for (i = 0; i < error->vm_count; i++)
+ kfree(error->active_bo[i]);
+
kfree(error->active_bo);
+ kfree(error->active_bo_count);
+ kfree(error->pinned_bo);
+ kfree(error->pinned_bo_count);
kfree(error->overlay);
kfree(error->display);
kfree(error);
@@ -994,12 +1006,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_error_ggtt_object_create(dev_priv,
ring->scratch.obj);
- if (request->file_priv) {
+ if (request->pid) {
struct task_struct *task;
rcu_read_lock();
- task = pid_task(request->file_priv->file->pid,
- PIDTYPE_PID);
+ task = pid_task(request->pid, PIDTYPE_PID);
if (task) {
strcpy(error->ring[i].comm, task->comm);
error->ring[i].pid = task->pid;
@@ -1165,6 +1176,11 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (IS_GEN7(dev))
error->err_int = I915_READ(GEN7_ERR_INT);
+ if (INTEL_INFO(dev)->gen >= 8) {
+ error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
+ error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+ }
+
if (IS_GEN6(dev)) {
error->forcewake = I915_READ(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ede5bbbd8a08..6d494432b19f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -277,6 +277,7 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
I915_WRITE(reg, dev_priv->pm_rps_events);
I915_WRITE(reg, dev_priv->pm_rps_events);
POSTING_READ(reg);
+ dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -330,12 +331,10 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
~dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
-
- dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
+
+ synchronize_irq(dev->irq);
}
/**
@@ -492,31 +491,6 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-/**
- * i915_pipe_enabled - check if a pipe is enabled
- * @dev: DRM device
- * @pipe: pipe to check
- *
- * Reading certain registers when the pipe is disabled can hang the chip.
- * Use this routine to make sure the PLL is running and the pipe is active
- * before reading such registers if unsure.
- */
-static int
-i915_pipe_enabled(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Locking is horribly broken here, but whatever. */
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- return intel_crtc->active;
- } else {
- return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
- }
-}
-
/*
* This timing diagram depicts the video signal in and
* around the vertical blanking period.
@@ -582,34 +556,16 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ const struct drm_display_mode *mode =
+ &intel_crtc->config->base.adjusted_mode;
- if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %c\n", pipe_name(pipe));
- return 0;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- const struct drm_display_mode *mode =
- &intel_crtc->config->base.adjusted_mode;
-
- htotal = mode->crtc_htotal;
- hsync_start = mode->crtc_hsync_start;
- vbl_start = mode->crtc_vblank_start;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vbl_start = DIV_ROUND_UP(vbl_start, 2);
- } else {
- enum transcoder cpu_transcoder = (enum transcoder) pipe;
-
- htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
- hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
- vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
- if ((I915_READ(PIPECONF(cpu_transcoder)) &
- PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
- vbl_start = DIV_ROUND_UP(vbl_start, 2);
- }
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vbl_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
/* Convert to pixel count */
vbl_start *= htotal;
@@ -648,12 +604,6 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe);
- if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %c\n", pipe_name(pipe));
- return 0;
- }
-
return I915_READ(reg);
}
@@ -840,7 +790,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return -EINVAL;
}
- if (!crtc->enabled) {
+ if (!crtc->state->enable) {
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
return -EBUSY;
}
@@ -1046,129 +996,73 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue);
}
-static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
- struct intel_rps_ei *rps_ei)
+static void vlv_c0_read(struct drm_i915_private *dev_priv,
+ struct intel_rps_ei *ei)
{
- u32 cz_ts, cz_freq_khz;
- u32 render_count, media_count;
- u32 elapsed_render, elapsed_media, elapsed_time;
- u32 residency = 0;
-
- cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
- cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
-
- render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
- media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
-
- if (rps_ei->cz_clock == 0) {
- rps_ei->cz_clock = cz_ts;
- rps_ei->render_c0 = render_count;
- rps_ei->media_c0 = media_count;
-
- return dev_priv->rps.cur_freq;
- }
-
- elapsed_time = cz_ts - rps_ei->cz_clock;
- rps_ei->cz_clock = cz_ts;
+ ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+ ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
+ ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
+}
- elapsed_render = render_count - rps_ei->render_c0;
- rps_ei->render_c0 = render_count;
+static bool vlv_c0_above(struct drm_i915_private *dev_priv,
+ const struct intel_rps_ei *old,
+ const struct intel_rps_ei *now,
+ int threshold)
+{
+ u64 time, c0;
- elapsed_media = media_count - rps_ei->media_c0;
- rps_ei->media_c0 = media_count;
+ if (old->cz_clock == 0)
+ return false;
- /* Convert all the counters into common unit of milli sec */
- elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
- elapsed_render /= cz_freq_khz;
- elapsed_media /= cz_freq_khz;
+ time = now->cz_clock - old->cz_clock;
+ time *= threshold * dev_priv->mem_freq;
- /*
- * Calculate overall C0 residency percentage
- * only if elapsed time is non zero
+ /* Workload can be split between render + media, e.g. SwapBuffers
+ * being blitted in X after being rendered in mesa. To account for
+ * this we need to combine both engines into our activity counter.
*/
- if (elapsed_time) {
- residency =
- ((max(elapsed_render, elapsed_media) * 100)
- / elapsed_time);
- }
+ c0 = now->render_c0 - old->render_c0;
+ c0 += now->media_c0 - old->media_c0;
+ c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
- return residency;
+ return c0 >= time;
}
-/**
- * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
- * busy-ness calculated from C0 counters of render & media power wells
- * @dev_priv: DRM device private
- *
- */
-static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- u32 residency_C0_up = 0, residency_C0_down = 0;
- int new_delay, adj;
-
- dev_priv->rps.ei_interrupt_count++;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
-
- if (dev_priv->rps.up_ei.cz_clock == 0) {
- vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
- vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
- return dev_priv->rps.cur_freq;
- }
+ vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
+ dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+}
+static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+ struct intel_rps_ei now;
+ u32 events = 0;
- /*
- * To down throttle, C0 residency should be less than down threshold
- * for continous EI intervals. So calculate down EI counters
- * once in VLV_INT_COUNT_FOR_DOWN_EI
- */
- if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
+ if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ return 0;
- dev_priv->rps.ei_interrupt_count = 0;
+ vlv_c0_read(dev_priv, &now);
+ if (now.cz_clock == 0)
+ return 0;
- residency_C0_down = vlv_c0_residency(dev_priv,
- &dev_priv->rps.down_ei);
- } else {
- residency_C0_up = vlv_c0_residency(dev_priv,
- &dev_priv->rps.up_ei);
+ if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
+ if (!vlv_c0_above(dev_priv,
+ &dev_priv->rps.down_ei, &now,
+ VLV_RP_DOWN_EI_THRESHOLD))
+ events |= GEN6_PM_RP_DOWN_THRESHOLD;
+ dev_priv->rps.down_ei = now;
}
- new_delay = dev_priv->rps.cur_freq;
-
- adj = dev_priv->rps.last_adj;
- /* C0 residency is greater than UP threshold. Increase Frequency */
- if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
- if (adj > 0)
- adj *= 2;
- else
- adj = 1;
-
- if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
- new_delay = dev_priv->rps.cur_freq + adj;
-
- /*
- * For better performance, jump directly
- * to RPe if we're below it.
- */
- if (new_delay < dev_priv->rps.efficient_freq)
- new_delay = dev_priv->rps.efficient_freq;
-
- } else if (!dev_priv->rps.ei_interrupt_count &&
- (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
- if (adj < 0)
- adj *= 2;
- else
- adj = -1;
- /*
- * This means, C0 residency is less than down threshold over
- * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
- */
- if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
- new_delay = dev_priv->rps.cur_freq + adj;
+ if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+ if (vlv_c0_above(dev_priv,
+ &dev_priv->rps.up_ei, &now,
+ VLV_RP_UP_EI_THRESHOLD))
+ events |= GEN6_PM_RP_UP_THRESHOLD;
+ dev_priv->rps.up_ei = now;
}
- return new_delay;
+ return events;
}
static void gen6_pm_rps_work(struct work_struct *work)
@@ -1198,6 +1092,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
+ pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
+
adj = dev_priv->rps.last_adj;
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
@@ -1220,8 +1116,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
else
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
- } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
@@ -1243,10 +1137,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
- if (IS_VALLEYVIEW(dev_priv->dev))
- valleyview_set_rps(dev_priv->dev, new_delay);
- else
- gen6_set_rps(dev_priv->dev, new_delay);
+ intel_set_rps(dev_priv->dev, new_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -1748,11 +1639,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
- /* TODO: RPS on GEN9+ is not supported yet. */
- if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
- "GEN9+: unexpected RPS IRQ\n"))
- return;
-
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
@@ -2662,9 +2548,6 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
@@ -2684,9 +2567,6 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -2699,9 +2579,6 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
@@ -2715,9 +2592,6 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@@ -2769,9 +2643,6 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@@ -3236,15 +3107,24 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask)
{
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
spin_lock_irq(&dev_priv->irq_lock);
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
- ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
- ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
+ if (pipe_mask & 1 << PIPE_A)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
+ dev_priv->de_irq_mask[PIPE_A],
+ ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
+ if (pipe_mask & 1 << PIPE_B)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
+ dev_priv->de_irq_mask[PIPE_B],
+ ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
+ if (pipe_mask & 1 << PIPE_C)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
+ dev_priv->de_irq_mask[PIPE_C],
+ ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3718,14 +3598,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
I915_WRITE16(IMR, dev_priv->irq_mask);
I915_WRITE16(IER,
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT);
POSTING_READ16(IER);
@@ -3887,14 +3765,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev)) {
@@ -4362,7 +4238,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
@@ -4392,10 +4268,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
- dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
- }
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 44f2262a5553..bb64415a1c3e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -27,7 +27,6 @@
struct i915_params i915 __read_mostly = {
.modeset = -1,
.panel_ignore_lid = 1,
- .powersave = 1,
.semaphores = -1,
.lvds_downclock = 0,
.lvds_channel_mode = 0,
@@ -44,6 +43,7 @@ struct i915_params i915 __read_mostly = {
.enable_ips = 1,
.fastboot = 0,
.prefault_disable = 0,
+ .load_detect_test = 0,
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
@@ -65,10 +65,6 @@ MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
-module_param_named(powersave, i915.powersave, int, 0600);
-MODULE_PARM_DESC(powersave,
- "Enable powersavings, fbc, downclocking, etc. (default: true)");
-
module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync "
@@ -144,11 +140,16 @@ module_param_named(fastboot, i915.fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot,
"Try to skip unnecessary mode sets at boot time (default: false)");
-module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
+module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
"For developers only.");
+module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600);
+MODULE_PARM_DESC(load_detect_test,
+ "Force-enable the VGA load detect code for testing (default:false). "
+ "For developers only.");
+
module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
MODULE_PARM_DESC(invert_brightness,
"Invert backlight brightness "
@@ -171,10 +172,10 @@ module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
-module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
+module_param_named(mmio_debug, i915.mmio_debug, int, 0600);
MODULE_PARM_DESC(mmio_debug,
- "Enable the MMIO debug code (default: false). This may negatively "
- "affect performance.");
+ "Enable the MMIO debug code for the first N failures (default: off). "
+ "This may negatively affect performance.");
module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 33b3d0a24071..b522eb6e59a4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -139,7 +139,21 @@
#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
+#define GEN8_R_PWR_CLK_STATE 0x20C8
+#define GEN8_RPCS_ENABLE (1 << 31)
+#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
+#define GEN8_RPCS_S_CNT_SHIFT 15
+#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT)
+#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11)
+#define GEN8_RPCS_SS_CNT_SHIFT 8
+#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
+#define GEN8_RPCS_EU_MAX_SHIFT 4
+#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT)
+#define GEN8_RPCS_EU_MIN_SHIFT 0
+#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
+
#define GAM_ECOCHK 0x4090
+#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
#define ECOCHK_SNB_BIT (1<<10)
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
@@ -552,6 +566,9 @@
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
+#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */
+#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */
+#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */
#define _DP_SSC(val, pipe) ((val) << (2 * (pipe)))
#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe))
#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe))
@@ -586,6 +603,19 @@ enum punit_power_well {
PUNIT_POWER_WELL_NUM,
};
+enum skl_disp_power_wells {
+ SKL_DISP_PW_MISC_IO,
+ SKL_DISP_PW_DDI_A_E,
+ SKL_DISP_PW_DDI_B,
+ SKL_DISP_PW_DDI_C,
+ SKL_DISP_PW_DDI_D,
+ SKL_DISP_PW_1 = 14,
+ SKL_DISP_PW_2,
+};
+
+#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
+#define SKL_POWER_WELL_REQ(pw) (1 << (((pw) * 2) + 1))
+
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
@@ -614,6 +644,11 @@ enum punit_power_well {
#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137
#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8
+#define PUNIT_REG_DDR_SETUP2 0x139
+#define FORCE_DDR_FREQ_REQ_ACK (1 << 8)
+#define FORCE_DDR_LOW_FREQ (1 << 1)
+#define FORCE_DDR_HIGH_FREQ (1 << 0)
+
#define PUNIT_GPU_STATUS_REG 0xdb
#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
@@ -638,7 +673,6 @@ enum punit_power_well {
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
#define VLV_RP_UP_EI_THRESHOLD 90
#define VLV_RP_DOWN_EI_THRESHOLD 70
-#define VLV_INT_COUNT_FOR_DOWN_EI 5
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
@@ -1002,6 +1036,7 @@ enum punit_power_well {
#define DPIO_CHV_FIRST_MOD (0 << 8)
#define DPIO_CHV_SECOND_MOD (1 << 8)
#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
+#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0)
#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
#define _CHV_PLL_DW6_CH0 0x8018
@@ -1011,6 +1046,19 @@ enum punit_power_well {
#define DPIO_CHV_PROP_COEFF_SHIFT 0
#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+#define _CHV_PLL_DW8_CH0 0x8020
+#define _CHV_PLL_DW8_CH1 0x81A0
+#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0
+#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0)
+#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
+
+#define _CHV_PLL_DW9_CH0 0x8024
+#define _CHV_PLL_DW9_CH1 0x81A4
+#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
+#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1)
+#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
+#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
+
#define _CHV_CMN_DW5_CH0 0x8114
#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
@@ -1258,6 +1306,9 @@ enum punit_power_well {
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
+#define GEN8_FAULT_TLB_DATA0 0x04b10
+#define GEN8_FAULT_TLB_DATA1 0x04b14
+
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -1314,6 +1365,8 @@ enum punit_power_well {
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
+#define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2))
+#define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2))
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
@@ -1470,6 +1523,7 @@ enum punit_power_well {
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
#define GEN6_BLITTER_ECOSKPD 0x221d0
#define GEN6_BLITTER_LOCK_SHIFT 16
@@ -1482,6 +1536,8 @@ enum punit_power_well {
/* Fuse readout registers for GT */
#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168)
+#define CHV_FGT_DISABLE_SS0 (1 << 10)
+#define CHV_FGT_DISABLE_SS1 (1 << 11)
#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
@@ -1491,6 +1547,17 @@ enum punit_power_well {
#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+#define GEN8_FUSE2 0x9120
+#define GEN8_F2_S_ENA_SHIFT 25
+#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
+
+#define GEN9_F2_SS_DIS_SHIFT 20
+#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
+
+#define GEN8_EU_DISABLE0 0x9134
+#define GEN8_EU_DISABLE1 0x9138
+#define GEN8_EU_DISABLE2 0x913c
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -2048,6 +2115,14 @@ enum punit_power_well {
#define CDCLK_FREQ_SHIFT 4
#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
#define CZCLK_FREQ_MASK 0xf
+
+#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C)
+#define PFI_CREDIT_63 (9 << 28) /* chv only */
+#define PFI_CREDIT_31 (8 << 28) /* chv only */
+#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
+#define PFI_CREDIT_RESEND (1 << 27)
+#define VGA_FAST_MODE_DISABLE (1 << 14)
+
#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
/*
@@ -2376,6 +2451,12 @@ enum punit_power_well {
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
+#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
+#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+ INTERVAL_1_33_US(us) : \
+ INTERVAL_1_28_US(us))
+
/*
* Logical Context regs
*/
@@ -2968,7 +3049,7 @@ enum punit_power_well {
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
-/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
@@ -3865,6 +3946,7 @@ enum punit_power_well {
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0<<5)
@@ -4013,7 +4095,7 @@ enum punit_power_well {
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
-#define DSPARB 0x70030
+#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
#define DSPARB_BSTART_MASK (0x7f)
@@ -4021,6 +4103,9 @@ enum punit_power_well {
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
+#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */
+
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
@@ -4044,8 +4129,8 @@ enum punit_power_well {
#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_CURSORA_MASK (0x3f<<8)
-#define DSPFW_PLANEC_SHIFT_OLD 0
-#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */
+#define DSPFW_PLANEC_OLD_SHIFT 0
+#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */
#define DSPFW_SPRITEA_SHIFT 0
#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
@@ -4084,25 +4169,25 @@ enum punit_power_well {
#define DSPFW_SPRITED_WM1_SHIFT 24
#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK (0xff<<16)
+#define DSPFW_SPRITED_MASK_VLV (0xff<<16)
#define DSPFW_SPRITEC_WM1_SHIFT 8
#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK (0xff<<0)
+#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK (0xff<<16)
+#define DSPFW_SPRITEF_MASK_VLV (0xff<<16)
#define DSPFW_SPRITEE_WM1_SHIFT 8
#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK (0xff<<0)
+#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK (0xff<<16)
+#define DSPFW_PLANEC_MASK_VLV (0xff<<16)
#define DSPFW_CURSORC_WM1_SHIFT 8
#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
#define DSPFW_CURSORC_SHIFT 0
@@ -4111,7 +4196,7 @@ enum punit_power_well {
/* vlv/chv high order bits */
#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (1<<24)
+#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_HI_SHIFT 23
#define DSPFW_SPRITEF_HI_MASK (1<<23)
#define DSPFW_SPRITEE_HI_SHIFT 22
@@ -4132,7 +4217,7 @@ enum punit_power_well {
#define DSPFW_PLANEA_HI_MASK (1<<0)
#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (1<<24)
+#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
@@ -4153,21 +4238,17 @@ enum punit_power_well {
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
/* drain latency register values*/
-#define DRAIN_LATENCY_PRECISION_16 16
-#define DRAIN_LATENCY_PRECISION_32 32
-#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_PRECISION_HIGH (1<<31)
-#define DDL_CURSOR_PRECISION_LOW (0<<31)
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
-#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
-#define DDL_PLANE_PRECISION_HIGH (1<<7)
-#define DDL_PLANE_PRECISION_LOW (0<<7)
#define DDL_PLANE_SHIFT 0
+#define DDL_PRECISION_HIGH (1<<7)
+#define DDL_PRECISION_LOW (0<<7)
#define DRAIN_LATENCY_MASK 0x7f
+#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400)
+#define CBR_PND_DEADLINE_DISABLE (1<<31)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
@@ -5221,14 +5302,22 @@ enum punit_power_well {
#define HSW_NDE_RSTWRN_OPT 0x46408
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define FF_SLICE_CS_CHICKEN2 0x02e4
+#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
+
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
+# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
-#define HIZ_CHICKEN 0x7018
-# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
+#define HIZ_CHICKEN 0x7018
+# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
+# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
+
+#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308
+#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
#define GEN7_L3SQCREG1 0xB010
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -5245,11 +5334,16 @@ enum punit_power_well {
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+#define GEN8_L3SQCREG4 0xb118
+#define GEN8_LQSC_RO_PERF_DIS (1<<27)
+
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
-#define HDC_FORCE_NON_COHERENT (1<<4)
-#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
+#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
+#define HDC_FORCE_NON_COHERENT (1<<4)
+#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
@@ -5258,6 +5352,9 @@ enum punit_power_well {
#define HSW_SCRATCH1 0xb038
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
+#define BDW_SCRATCH1 0xb11c
+#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
+
/* PCH */
/* south display engine interrupt: IBX */
@@ -5980,6 +6077,7 @@ enum punit_power_well {
#define HSW_IDICR 0x9008
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
#define HSW_EDRAM_PRESENT 0x120010
+#define EDRAM_ENABLED 0x1
#define GEN6_UCGCTL1 0x9400
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
@@ -6003,6 +6101,7 @@ enum punit_power_well {
#define GEN6_RSTCTL 0x9420
#define GEN8_UCGCTL6 0x9430
+#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_GFXPAUSE 0xA000
@@ -6010,6 +6109,7 @@ enum punit_power_well {
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
#define HSW_FREQUENCY(x) ((x)<<24)
+#define GEN9_FREQUENCY(x) ((x)<<23)
#define GEN6_OFFSET(x) ((x)<<19)
#define GEN6_AGGRESSIVE_TURBO (0<<15)
#define GEN6_RC_VIDEO_FREQ 0xA00C
@@ -6028,8 +6128,10 @@ enum punit_power_well {
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
#define HSW_CAGF_SHIFT 7
+#define GEN9_CAGF_SHIFT 23
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
+#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
@@ -6120,8 +6222,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
-#define VLV_RENDER_C0_COUNT_REG 0x138118
-#define VLV_MEDIA_C0_COUNT_REG 0x13811C
+#define VLV_RENDER_C0_COUNT 0x138118
+#define VLV_MEDIA_C0_COUNT 0x13811C
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
@@ -6155,6 +6257,37 @@ enum punit_power_well {
#define GEN6_RC6 3
#define GEN6_RC7 4
+#define CHV_POWER_SS0_SIG1 0xa720
+#define CHV_POWER_SS1_SIG1 0xa728
+#define CHV_SS_PG_ENABLE (1<<1)
+#define CHV_EU08_PG_ENABLE (1<<9)
+#define CHV_EU19_PG_ENABLE (1<<17)
+#define CHV_EU210_PG_ENABLE (1<<25)
+
+#define CHV_POWER_SS0_SIG2 0xa724
+#define CHV_POWER_SS1_SIG2 0xa72c
+#define CHV_EU311_PG_ENABLE (1<<1)
+
+#define GEN9_SLICE0_PGCTL_ACK 0x804c
+#define GEN9_SLICE1_PGCTL_ACK 0x8050
+#define GEN9_SLICE2_PGCTL_ACK 0x8054
+#define GEN9_PGCTL_SLICE_ACK (1 << 0)
+
+#define GEN9_SLICE0_SS01_EU_PGCTL_ACK 0x805c
+#define GEN9_SLICE0_SS23_EU_PGCTL_ACK 0x8060
+#define GEN9_SLICE1_SS01_EU_PGCTL_ACK 0x8064
+#define GEN9_SLICE1_SS23_EU_PGCTL_ACK 0x8068
+#define GEN9_SLICE2_SS01_EU_PGCTL_ACK 0x806c
+#define GEN9_SLICE2_SS23_EU_PGCTL_ACK 0x8070
+#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
+#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
+#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
+#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
+#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
+#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
+#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
+#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
+
#define GEN7_MISCCPCTL (0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
@@ -6185,6 +6318,7 @@ enum punit_power_well {
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
+#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
#define GEN8_ROW_CHICKEN 0xe4f0
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
@@ -6200,8 +6334,12 @@ enum punit_power_well {
#define HALF_SLICE_CHICKEN3 0xe184
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
+#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+#define GEN9_HALF_SLICE_CHICKEN7 0xe194
+#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
+
/* Audio */
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
@@ -6351,6 +6489,13 @@ enum punit_power_well {
#define HSW_PWR_WELL_FORCE_ON (1<<19)
#define HSW_PWR_WELL_CTL6 0x45414
+/* SKL Fuse Status */
+#define SKL_FUSE_STATUS 0x42000
+#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
+#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
+#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
+#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
+
/* Per-pipe DDI Function Control */
#define TRANS_DDI_FUNC_CTL_A 0x60400
#define TRANS_DDI_FUNC_CTL_B 0x61400
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 9f19ed38cdc3..cf67f82f7b7f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,166 +29,6 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE8(index_port, reg);
- return I915_READ8(data_port);
-}
-
-static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
- return I915_READ8(VGA_AR_DATA_READ);
-}
-
-static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
- I915_WRITE8(VGA_AR_DATA_WRITE, val);
-}
-
-static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE8(index_port, reg);
- I915_WRITE8(data_port, val);
-}
-
-static void i915_save_vga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
- u16 cr_index, cr_data, st01;
-
- /* VGA state */
- dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
- dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
- dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
- dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
-
- /* VGA color palette registers */
- dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
-
- /* MSR bits */
- dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
- if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* CRT controller regs */
- i915_write_indexed(dev, cr_index, cr_data, 0x11,
- i915_read_indexed(dev, cr_index, cr_data, 0x11) &
- (~0x80));
- for (i = 0; i <= 0x24; i++)
- dev_priv->regfile.saveCR[i] =
- i915_read_indexed(dev, cr_index, cr_data, i);
- /* Make sure we don't turn off CR group 0 writes */
- dev_priv->regfile.saveCR[0x11] &= ~0x80;
-
- /* Attribute controller registers */
- I915_READ8(st01);
- dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
- for (i = 0; i <= 0x14; i++)
- dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
- I915_READ8(st01);
-
- /* Graphics controller registers */
- for (i = 0; i < 9; i++)
- dev_priv->regfile.saveGR[i] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
-
- dev_priv->regfile.saveGR[0x10] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->regfile.saveGR[0x11] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->regfile.saveGR[0x18] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
-
- /* Sequencer registers */
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveSR[i] =
- i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
-}
-
-static void i915_restore_vga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
- u16 cr_index, cr_data, st01;
-
- /* VGA state */
- I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
-
- I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
- I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
- POSTING_READ(VGA_PD);
- udelay(150);
-
- /* MSR bits */
- I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
- if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* Sequencer registers, don't write SR07 */
- for (i = 0; i < 7; i++)
- i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->regfile.saveSR[i]);
-
- /* CRT controller regs */
- /* Enable CR group 0 writes */
- i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
- for (i = 0; i <= 0x24; i++)
- i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
-
- /* Graphics controller regs */
- for (i = 0; i < 9; i++)
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->regfile.saveGR[i]);
-
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->regfile.saveGR[0x10]);
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->regfile.saveGR[0x11]);
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->regfile.saveGR[0x18]);
-
- /* Attribute controller registers */
- I915_READ8(st01); /* switch back to index mode */
- for (i = 0; i <= 0x14; i++)
- i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
- I915_READ8(st01); /* switch back to index mode */
- I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
- I915_READ8(st01);
-
- /* VGA color palette registers */
- I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
-}
-
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -197,11 +37,6 @@ static void i915_save_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
- /* This is only meaningful in non-KMS mode */
- /* Don't regfile.save them in KMS mode */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_save_display_reg(dev);
-
/* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
@@ -224,9 +59,6 @@ static void i915_save_display(struct drm_device *dev)
/* save FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_save_vga(dev);
}
static void i915_restore_display(struct drm_device *dev)
@@ -238,11 +70,7 @@ static void i915_restore_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_restore_display_reg(dev);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- mask = ~LVDS_PORT_EN;
+ mask = ~LVDS_PORT_EN;
/* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -270,10 +98,7 @@ static void i915_restore_display(struct drm_device *dev)
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_restore_vga(dev);
- else
- i915_redisable_vga(dev);
+ i915_redisable_vga(dev);
}
int i915_save_state(struct drm_device *dev)
@@ -285,24 +110,6 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveDEIER = I915_READ(DEIER);
- dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
- dev_priv->regfile.saveGTIER = I915_READ(GTIER);
- dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
- dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
- dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
- I915_READ(RSTDBYCTL);
- dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
- } else {
- dev_priv->regfile.saveIER = I915_READ(IER);
- dev_priv->regfile.saveIMR = I915_READ(IMR);
- }
- }
-
if (IS_GEN4(dev))
pci_read_config_word(dev->pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
@@ -341,24 +148,6 @@ int i915_restore_state(struct drm_device *dev)
dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
- I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
- I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
- I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
- I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
- I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
- I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
- I915_WRITE(RSTDBYCTL,
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
- } else {
- I915_WRITE(IER, dev_priv->regfile.saveIER);
- I915_WRITE(IMR, dev_priv->regfile.saveIMR);
- }
- }
-
/* Cache mode state */
if (INTEL_INFO(dev)->gen < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 49f5ade0edb7..247626885f49 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -127,10 +127,19 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
+static ssize_t
+show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = dev_get_drvdata(kdev);
+ u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
+ return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
+}
+
static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
+static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr,
@@ -153,6 +162,16 @@ static struct attribute_group rc6p_attr_group = {
.name = power_group_name,
.attrs = rc6p_attrs
};
+
+static struct attribute *media_rc6_attrs[] = {
+ &dev_attr_media_rc6_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group media_rc6_attr_group = {
+ .name = power_group_name,
+ .attrs = media_rc6_attrs
+};
#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -300,7 +319,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
u32 rpstat = I915_READ(GEN6_RPSTAT1);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_GEN9(dev_priv))
+ ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -402,10 +423,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -464,10 +482,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -493,38 +508,17 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val, rp_state_cap;
- ssize_t ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ u32 val;
- if (attr == &dev_attr_gt_RP0_freq_mhz) {
- if (IS_VALLEYVIEW(dev))
- val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
- else
- val = intel_gpu_freq(dev_priv,
- ((rp_state_cap & 0x0000ff) >> 0));
- } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
- if (IS_VALLEYVIEW(dev))
- val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
- else
- val = intel_gpu_freq(dev_priv,
- ((rp_state_cap & 0x00ff00) >> 8));
- } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
- if (IS_VALLEYVIEW(dev))
- val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
- else
- val = intel_gpu_freq(dev_priv,
- ((rp_state_cap & 0xff0000) >> 16));
- } else {
+ if (attr == &dev_attr_gt_RP0_freq_mhz)
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ else if (attr == &dev_attr_gt_RP1_freq_mhz)
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ else if (attr == &dev_attr_gt_RPn_freq_mhz)
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ else
BUG();
- }
+
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
@@ -633,6 +627,12 @@ void i915_setup_sysfs(struct drm_device *dev)
if (ret)
DRM_ERROR("RC6p residency sysfs setup failed\n");
}
+ if (IS_VALLEYVIEW(dev)) {
+ ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+ &media_rc6_attr_group);
+ if (ret)
+ DRM_ERROR("Media RC6 residency sysfs setup failed\n");
+ }
#endif
if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index d776621c8521..5fda6c70b423 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -114,7 +114,7 @@ TRACE_EVENT(i915_vma_bind,
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
- __field(u32, offset)
+ __field(u64, offset)
__field(u32, size)
__field(unsigned, flags)
),
@@ -127,7 +127,7 @@ TRACE_EVENT(i915_vma_bind,
__entry->flags = flags;
),
- TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
+ TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "",
__entry->vm)
@@ -140,7 +140,7 @@ TRACE_EVENT(i915_vma_unbind,
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
- __field(u32, offset)
+ __field(u64, offset)
__field(u32, size)
),
@@ -151,10 +151,109 @@ TRACE_EVENT(i915_vma_unbind,
__entry->size = vma->node.size;
),
- TP_printk("obj=%p, offset=%08x size=%x vm=%p",
+ TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
+#define VM_TO_TRACE_NAME(vm) \
+ (i915_is_ggtt(vm) ? "G" : \
+ "P")
+
+DECLARE_EVENT_CLASS(i915_va,
+ TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
+ TP_ARGS(vm, start, length, name),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u64, start)
+ __field(u64, end)
+ __string(name, name)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->start = start;
+ __entry->end = start + length - 1;
+ __assign_str(name, name);
+ ),
+
+ TP_printk("vm=%p (%s), 0x%llx-0x%llx",
+ __entry->vm, __get_str(name), __entry->start, __entry->end)
+);
+
+DEFINE_EVENT(i915_va, i915_va_alloc,
+ TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
+ TP_ARGS(vm, start, length, name)
+);
+
+DECLARE_EVENT_CLASS(i915_page_table_entry,
+ TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
+ TP_ARGS(vm, pde, start, pde_shift),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, pde)
+ __field(u64, start)
+ __field(u64, end)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->pde = pde;
+ __entry->start = start;
+ __entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1;
+ ),
+
+ TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
+ __entry->vm, __entry->pde, __entry->start, __entry->end)
+);
+
+DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc,
+ TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
+ TP_ARGS(vm, pde, start, pde_shift)
+);
+
+/* Avoid extra math because we only support two sizes. The format is defined by
+ * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
+#define TRACE_PT_SIZE(bits) \
+ ((((bits) == 1024) ? 288 : 144) + 1)
+
+DECLARE_EVENT_CLASS(i915_page_table_entry_update,
+ TP_PROTO(struct i915_address_space *vm, u32 pde,
+ struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ TP_ARGS(vm, pde, pt, first, count, bits),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, pde)
+ __field(u32, first)
+ __field(u32, last)
+ __dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->pde = pde;
+ __entry->first = first;
+ __entry->last = first + count - 1;
+ scnprintf(__get_str(cur_ptes),
+ TRACE_PT_SIZE(bits),
+ "%*pb",
+ bits,
+ pt->used_ptes);
+ ),
+
+ TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
+ __entry->vm, __entry->pde, __entry->last, __entry->first,
+ __get_str(cur_ptes))
+);
+
+DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
+ TP_PROTO(struct i915_address_space *vm, u32 pde,
+ struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ TP_ARGS(vm, pde, pt, first, count, bits)
+);
+
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
TP_ARGS(obj, old_read, old_write),
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
deleted file mode 100644
index d10fe3e9c49f..000000000000
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- *
- * Copyright 2008 (c) Intel Corporation
- * Jesse Barnes <jbarnes@virtuousgeek.org>
- * Copyright 2013 (c) Intel Corporation
- * Daniel Vetter <daniel.vetter@ffwll.ch>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "intel_drv.h"
-#include "i915_reg.h"
-
-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpll_reg;
-
- /* On IVB, 3rd pipe shares PLL with another one */
- if (pipe > 1)
- return false;
-
- if (HAS_PCH_SPLIT(dev))
- dpll_reg = PCH_DPLL(pipe);
- else
- dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
-
- return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
-}
-
-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- array[i] = I915_READ(reg + (i << 2));
-}
-
-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- I915_WRITE(reg + (i << 2), array[i]);
-}
-
-void i915_save_display_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- /* Cursor state */
- dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
- dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
- } else {
- dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
- dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
- dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
- dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
- dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
- dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
- dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
- }
-
- dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
- }
- i915_save_palette(dev, PIPE_A);
- dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
-
- /* Pipe & plane B info */
- dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
- } else {
- dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
- dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
- dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
- dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
- dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
- dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
- dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
- }
-
- dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
- }
- i915_save_palette(dev, PIPE_B);
- dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
- }
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
- else
- dev_priv->regfile.saveADPA = I915_READ(ADPA);
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->regfile.saveDP_B = I915_READ(DP_B);
- dev_priv->regfile.saveDP_C = I915_READ(DP_C);
- dev_priv->regfile.saveDP_D = I915_READ(DP_D);
- dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
- dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
- dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
- dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
- dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
- dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
- dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
- dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
- }
- /* FIXME: regfile.save TV & SDVO state */
-
- /* Panel fitter */
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- }
-
- /* Backlight */
- if (INTEL_INFO(dev)->gen <= 4)
- pci_read_config_byte(dev->pdev, PCI_LBPC,
- &dev_priv->regfile.saveLBB);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- } else {
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
- dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- }
-
- return;
-}
-
-void i915_restore_display_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_a_reg, fpa0_reg, fpa1_reg;
- int dpll_b_reg, fpb0_reg, fpb1_reg;
- int i;
-
- /* Backlight */
- if (INTEL_INFO(dev)->gen <= 4)
- pci_write_config_byte(dev->pdev, PCI_LBPC,
- dev_priv->regfile.saveLBB);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
- /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
- * otherwise we get blank eDP screen after S3 on some machines
- */
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
- } else {
- if (INTEL_INFO(dev)->gen >= 4)
- I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
- I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
- }
-
- /* Panel fitter */
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
- I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
- }
-
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
- }
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
- break;
- }
-
-
- if (HAS_PCH_SPLIT(dev)) {
- dpll_a_reg = _PCH_DPLL_A;
- dpll_b_reg = _PCH_DPLL_B;
- fpa0_reg = _PCH_FPA0;
- fpb0_reg = _PCH_FPB0;
- fpa1_reg = _PCH_FPA1;
- fpb1_reg = _PCH_FPB1;
- } else {
- dpll_a_reg = _DPLL_A;
- dpll_b_reg = _DPLL_B;
- fpa0_reg = _FPA0;
- fpb0_reg = _FPB0;
- fpa1_reg = _FPA1;
- fpb1_reg = _FPB1;
- }
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- /* Prime the clock */
- if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- }
- I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
- /* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
- POSTING_READ(_DPLL_A_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
-
- I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
-
- I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
-
- I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
- I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
- I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
- I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
- I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
- I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
- I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
- }
-
- I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
-
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
- I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
-
- /* Pipe & plane B info */
- if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- }
- I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
- /* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
- POSTING_READ(_DPLL_B_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
-
- I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
-
- I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
-
- I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
- I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
- I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
- I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
- I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
- I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
- I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
- }
-
- I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
-
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
- I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
-
- /* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
- I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
- I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
- }
- /* FIXME: restore TV & SDVO state */
-
- return;
-}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
new file mode 100644
index 000000000000..5eee75bff170
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_drv.h"
+#include "i915_vgpu.h"
+
+/**
+ * DOC: Intel GVT-g guest support
+ *
+ * Intel GVT-g is a graphics virtualization technology which shares the
+ * GPU among multiple virtual machines on a time-sharing basis. Each
+ * virtual machine is presented a virtual GPU (vGPU), which has equivalent
+ * features as the underlying physical GPU (pGPU), so i915 driver can run
+ * seamlessly in a virtual machine. This file provides vGPU specific
+ * optimizations when running in a virtual machine, to reduce the complexity
+ * of vGPU emulation and to improve the overall performance.
+ *
+ * A primary function introduced here is so-called "address space ballooning"
+ * technique. Intel GVT-g partitions global graphics memory among multiple VMs,
+ * so each VM can directly access a portion of the memory without hypervisor's
+ * intervention, e.g. filling textures or queuing commands. However with the
+ * partitioning an unmodified i915 driver would assume a smaller graphics
+ * memory starting from address ZERO, then requires vGPU emulation module to
+ * translate the graphics address between 'guest view' and 'host view', for
+ * all registers and command opcodes which contain a graphics memory address.
+ * To reduce the complexity, Intel GVT-g introduces "address space ballooning",
+ * by telling the exact partitioning knowledge to each guest i915 driver, which
+ * then reserves and prevents non-allocated portions from allocation. Thus vGPU
+ * emulation module only needs to scan and validate graphics addresses without
+ * complexity of address translation.
+ *
+ */
+
+/**
+ * i915_check_vgpu - detect virtual GPU
+ * @dev: drm device *
+ *
+ * This function is called at the initialization stage, to detect whether
+ * running on a vGPU.
+ */
+void i915_check_vgpu(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ uint64_t magic;
+ uint32_t version;
+
+ BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ magic = readq(dev_priv->regs + vgtif_reg(magic));
+ if (magic != VGT_MAGIC)
+ return;
+
+ version = INTEL_VGT_IF_VERSION_ENCODE(
+ readw(dev_priv->regs + vgtif_reg(version_major)),
+ readw(dev_priv->regs + vgtif_reg(version_minor)));
+ if (version != INTEL_VGT_IF_VERSION) {
+ DRM_INFO("VGT interface version mismatch!\n");
+ return;
+ }
+
+ dev_priv->vgpu.active = true;
+ DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+}
+
+struct _balloon_info_ {
+ /*
+ * There are up to 2 regions per mappable/unmappable graphic
+ * memory that might be ballooned. Here, index 0/1 is for mappable
+ * graphic memory, 2/3 for unmappable graphic memory.
+ */
+ struct drm_mm_node space[4];
+};
+
+static struct _balloon_info_ bl_info;
+
+/**
+ * intel_vgt_deballoon - deballoon reserved graphics address trunks
+ *
+ * This function is called to deallocate the ballooned-out graphic memory, when
+ * driver is unloaded or when ballooning fails.
+ */
+void intel_vgt_deballoon(void)
+{
+ int i;
+
+ DRM_DEBUG("VGT deballoon.\n");
+
+ for (i = 0; i < 4; i++) {
+ if (bl_info.space[i].allocated)
+ drm_mm_remove_node(&bl_info.space[i]);
+ }
+
+ memset(&bl_info, 0, sizeof(bl_info));
+}
+
+static int vgt_balloon_space(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long start, unsigned long end)
+{
+ unsigned long size = end - start;
+
+ if (start == end)
+ return -EINVAL;
+
+ DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
+ start, end, size / 1024);
+
+ node->start = start;
+ node->size = size;
+
+ return drm_mm_reserve_node(mm, node);
+}
+
+/**
+ * intel_vgt_balloon - balloon out reserved graphics address trunks
+ * @dev: drm device
+ *
+ * This function is called at the initialization stage, to balloon out the
+ * graphic address space allocated to other vGPUs, by marking these spaces as
+ * reserved. The ballooning related knowledge(starting address and size of
+ * the mappable/unmappable graphic memory) is described in the vgt_if structure
+ * in a reserved mmio range.
+ *
+ * To give an example, the drawing below depicts one typical scenario after
+ * ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned
+ * out each for the mappable and the non-mappable part. From the vGPU1 point of
+ * view, the total size is the same as the physical one, with the start address
+ * of its graphic space being zero. Yet there are some portions ballooned out(
+ * the shadow part, which are marked as reserved by drm allocator). From the
+ * host point of view, the graphic address space is partitioned by multiple
+ * vGPUs in different VMs.
+ *
+ * vGPU1 view Host view
+ * 0 ------> +-----------+ +-----------+
+ * ^ |///////////| | vGPU3 |
+ * | |///////////| +-----------+
+ * | |///////////| | vGPU2 |
+ * | +-----------+ +-----------+
+ * mappable GM | available | ==> | vGPU1 |
+ * | +-----------+ +-----------+
+ * | |///////////| | |
+ * v |///////////| | Host |
+ * +=======+===========+ +===========+
+ * ^ |///////////| | vGPU3 |
+ * | |///////////| +-----------+
+ * | |///////////| | vGPU2 |
+ * | +-----------+ +-----------+
+ * unmappable GM | available | ==> | vGPU1 |
+ * | +-----------+ +-----------+
+ * | |///////////| | |
+ * | |///////////| | Host |
+ * v |///////////| | |
+ * total GM size ------> +-----------+ +-----------+
+ *
+ * Returns:
+ * zero on success, non-zero if configuration invalid or ballooning failed
+ */
+int intel_vgt_balloon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
+
+ unsigned long mappable_base, mappable_size, mappable_end;
+ unsigned long unmappable_base, unmappable_size, unmappable_end;
+ int ret;
+
+ mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
+ mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
+ unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
+ unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
+
+ mappable_end = mappable_base + mappable_size;
+ unmappable_end = unmappable_base + unmappable_size;
+
+ DRM_INFO("VGT ballooning configuration:\n");
+ DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
+ mappable_base, mappable_size / 1024);
+ DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
+ unmappable_base, unmappable_size / 1024);
+
+ if (mappable_base < ggtt_vm->start ||
+ mappable_end > dev_priv->gtt.mappable_end ||
+ unmappable_base < dev_priv->gtt.mappable_end ||
+ unmappable_end > ggtt_vm_end) {
+ DRM_ERROR("Invalid ballooning configuration!\n");
+ return -EINVAL;
+ }
+
+ /* Unmappable graphic memory ballooning */
+ if (unmappable_base > dev_priv->gtt.mappable_end) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[2],
+ dev_priv->gtt.mappable_end,
+ unmappable_base);
+
+ if (ret)
+ goto err;
+ }
+
+ /*
+ * No need to partition out the last physical page,
+ * because it is reserved to the guard page.
+ */
+ if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[3],
+ unmappable_end,
+ ggtt_vm_end - PAGE_SIZE);
+ if (ret)
+ goto err;
+ }
+
+ /* Mappable graphic memory ballooning */
+ if (mappable_base > ggtt_vm->start) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[0],
+ ggtt_vm->start, mappable_base);
+
+ if (ret)
+ goto err;
+ }
+
+ if (mappable_end < dev_priv->gtt.mappable_end) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[1],
+ mappable_end,
+ dev_priv->gtt.mappable_end);
+
+ if (ret)
+ goto err;
+ }
+
+ DRM_INFO("VGT balloon successfully\n");
+ return 0;
+
+err:
+ DRM_ERROR("VGT balloon fail\n");
+ intel_vgt_deballoon();
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
new file mode 100644
index 000000000000..97a88b5f6a26
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _I915_VGPU_H_
+#define _I915_VGPU_H_
+
+/* The MMIO offset of the shared info between guest and host emulator */
+#define VGT_PVINFO_PAGE 0x78000
+#define VGT_PVINFO_SIZE 0x1000
+
+/*
+ * The following structure pages are defined in GEN MMIO space
+ * for virtualization. (One page for now)
+ */
+#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
+#define VGT_VERSION_MAJOR 1
+#define VGT_VERSION_MINOR 0
+
+#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
+#define INTEL_VGT_IF_VERSION \
+ INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
+
+struct vgt_if {
+ uint64_t magic; /* VGT_MAGIC */
+ uint16_t version_major;
+ uint16_t version_minor;
+ uint32_t vgt_id; /* ID of vGT instance */
+ uint32_t rsv1[12]; /* pad to offset 0x40 */
+ /*
+ * Data structure to describe the balooning info of resources.
+ * Each VM can only have one portion of continuous area for now.
+ * (May support scattered resource in future)
+ * (starting from offset 0x40)
+ */
+ struct {
+ /* Aperture register balooning */
+ struct {
+ uint32_t base;
+ uint32_t size;
+ } mappable_gmadr; /* aperture */
+ /* GMADR register balooning */
+ struct {
+ uint32_t base;
+ uint32_t size;
+ } nonmappable_gmadr; /* non aperture */
+ /* allowed fence registers */
+ uint32_t fence_num;
+ uint32_t rsv2[3];
+ } avail_rs; /* available/assigned resource */
+ uint32_t rsv3[0x200 - 24]; /* pad to half page */
+ /*
+ * The bottom half page is for response from Gfx driver to hypervisor.
+ * Set to reserved fields temporarily by now.
+ */
+ uint32_t rsv4;
+ uint32_t display_ready; /* ready for display owner switch */
+ uint32_t rsv5[0x200 - 2]; /* pad to one page */
+} __packed;
+
+#define vgtif_reg(x) \
+ (VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)
+
+/* vGPU display status to be used by the host side */
+#define VGT_DRV_DISPLAY_NOT_READY 0
+#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
+
+extern void i915_check_vgpu(struct drm_device *dev);
+extern int intel_vgt_balloon(struct drm_device *dev);
+extern void intel_vgt_deballoon(void);
+
+#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 19a9dd5408f3..3903b90fb64e 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -134,9 +134,9 @@ int intel_atomic_commit(struct drm_device *dev,
* FIXME: The proper sequence here will eventually be:
*
* drm_atomic_helper_swap_state(dev, state)
- * drm_atomic_helper_commit_pre_planes(dev, state);
+ * drm_atomic_helper_commit_modeset_disables(dev, state);
* drm_atomic_helper_commit_planes(dev, state);
- * drm_atomic_helper_commit_post_planes(dev, state);
+ * drm_atomic_helper_commit_modeset_enables(dev, state);
* drm_atomic_helper_wait_for_vblanks(dev, state);
* drm_atomic_helper_cleanup_planes(dev, state);
* drm_atomic_state_free(state);
@@ -214,12 +214,18 @@ struct drm_crtc_state *
intel_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *crtc_state;
if (WARN_ON(!intel_crtc->config))
- return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL);
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ else
+ crtc_state = kmemdup(intel_crtc->config,
+ sizeof(*intel_crtc->config), GFP_KERNEL);
- return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config),
- GFP_KERNEL);
+ if (crtc_state)
+ crtc_state->base.crtc = crtc;
+
+ return &crtc_state->base;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 9e6f727dfd19..976b89156570 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -203,16 +203,8 @@ intel_plane_atomic_get_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t *val)
{
- struct drm_mode_config *config = &plane->dev->mode_config;
-
- if (property == config->rotation_property) {
- *val = state->rotation;
- } else {
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
- return -EINVAL;
- }
-
- return 0;
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
}
/**
@@ -233,14 +225,6 @@ intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val)
{
- struct drm_mode_config *config = &plane->dev->mode_config;
-
- if (property == config->rotation_property) {
- state->rotation = val;
- } else {
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
- return -EINVAL;
- }
-
- return 0;
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 3f178258d9f9..c684085cb56a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -662,6 +662,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp_link_params->vswing);
break;
}
+
+ if (bdb->version >= 173) {
+ uint8_t vswing;
+
+ vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
+ dev_priv->vbt.edp_low_vswing = vswing == 0;
+ }
}
static void
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index a6a8710f665f..6afd5be33367 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -554,6 +554,7 @@ struct bdb_edp {
/* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature;
u16 edp_t3_optimization;
+ u64 edp_vswing_preemph; /* v173 */
} __packed;
struct psr_table {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e66e17af0a56..515d7123785d 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -690,7 +690,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -706,9 +706,11 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else
+ else if (INTEL_INFO(dev)->gen < 4)
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(connector, &tmp);
+ else
+ status = connector_status_unknown;
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
} else
status = connector_status_unknown;
@@ -794,6 +796,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_get_property = intel_connector_atomic_get_property,
};
@@ -848,7 +851,7 @@ void intel_crt_init(struct drm_device *dev)
if (!crt)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(crt);
return;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f14e8a2a022d..3eb0efc2dd0d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -139,18 +139,24 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00004014, 0x00000087 },
};
+/* eDP 1.4 low vswing translation parameters */
+static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000a8 },
+ { 0x00002016, 0x000000ab },
+ { 0x00006012, 0x000000a2 },
+ { 0x00008010, 0x00000088 },
+ { 0x00000018, 0x000000ab },
+ { 0x00004014, 0x000000a2 },
+ { 0x00006012, 0x000000a6 },
+ { 0x00000018, 0x000000a2 },
+ { 0x00005013, 0x0000009c },
+ { 0x00000018, 0x00000088 },
+};
+
+
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
/* Idx NT mV T mV db */
- { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
- { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
- { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
- { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
- { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
- { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
- { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
- { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
- { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
- { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
+ { 0x00004014, 0x00000087 }, /* 0: 800 1000 2 */
};
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
@@ -187,7 +193,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- int i, n_hdmi_entries, hdmi_800mV_0dB;
+ int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
+ size;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
@@ -198,60 +205,85 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
if (IS_SKYLAKE(dev)) {
ddi_translations_fdi = NULL;
ddi_translations_dp = skl_ddi_translations_dp;
- ddi_translations_edp = skl_ddi_translations_dp;
+ n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ if (dev_priv->vbt.edp_low_vswing) {
+ ddi_translations_edp = skl_ddi_translations_edp;
+ n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+ } else {
+ ddi_translations_edp = skl_ddi_translations_dp;
+ n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ }
+
+ /*
+ * On SKL, the recommendation from the hw team is to always use
+ * a certain type of level shifter (and thus the corresponding
+ * 800mV+2dB entry). Given that's the only validated entry, we
+ * override what is in the VBT, at least until further notice.
+ */
+ hdmi_level = 0;
ddi_translations_hdmi = skl_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 0;
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 7;
} else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
+ n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 6;
+ hdmi_default_entry = 6;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 7;
}
switch (port) {
case PORT_A:
ddi_translations = ddi_translations_edp;
+ size = n_edp_entries;
break;
case PORT_B:
case PORT_C:
ddi_translations = ddi_translations_dp;
+ size = n_dp_entries;
break;
case PORT_D:
- if (intel_dp_is_edp(dev, PORT_D))
+ if (intel_dp_is_edp(dev, PORT_D)) {
ddi_translations = ddi_translations_edp;
- else
+ size = n_edp_entries;
+ } else {
ddi_translations = ddi_translations_dp;
+ size = n_dp_entries;
+ }
break;
case PORT_E:
if (ddi_translations_fdi)
ddi_translations = ddi_translations_fdi;
else
ddi_translations = ddi_translations_dp;
+ size = n_dp_entries;
break;
default:
BUG();
}
- for (i = 0, reg = DDI_BUF_TRANS(port);
- i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
I915_WRITE(reg, ddi_translations[i].trans1);
reg += 4;
I915_WRITE(reg, ddi_translations[i].trans2);
@@ -261,7 +293,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
/* Choose a good default if VBT is badly populated */
if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
hdmi_level >= n_hdmi_entries)
- hdmi_level = hdmi_800mV_0dB;
+ hdmi_level = hdmi_default_entry;
/* Entry 9 is for HDMI: */
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
@@ -460,17 +492,23 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
}
static struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
+intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *intel_encoder, *ret = NULL;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_encoder *ret = NULL;
+ struct drm_atomic_state *state;
int num_encoders = 0;
+ int i;
- for_each_intel_encoder(dev, intel_encoder) {
- if (intel_encoder->new_crtc == crtc) {
- ret = intel_encoder;
- num_encoders++;
- }
+ state = crtc_state->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i] ||
+ state->connector_states[i]->crtc != crtc_state->base.crtc)
+ continue;
+
+ ret = to_intel_encoder(state->connector_states[i]->best_encoder);
+ num_encoders++;
}
WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
@@ -752,9 +790,18 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
case DPLL_CRTL1_LINK_RATE_810:
link_clock = 81000;
break;
+ case DPLL_CRTL1_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
case DPLL_CRTL1_LINK_RATE_1350:
link_clock = 135000;
break;
+ case DPLL_CRTL1_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CRTL1_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
case DPLL_CRTL1_LINK_RATE_2700:
link_clock = 270000;
break;
@@ -1175,7 +1222,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
{
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
- intel_ddi_get_crtc_new_encoder(intel_crtc);
+ intel_ddi_get_crtc_new_encoder(crtc_state);
int clock = crtc_state->port_clock;
if (IS_SKYLAKE(dev))
@@ -2153,7 +2200,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
struct intel_connector *connector;
enum port port = intel_dig_port->port;
- connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ connector = intel_connector_alloc();
if (!connector)
return NULL;
@@ -2172,7 +2219,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
struct intel_connector *connector;
enum port port = intel_dig_port->port;
- connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ connector = intel_connector_alloc();
if (!connector)
return NULL;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f75173c20f47..d547d9c8dda2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -83,7 +83,8 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb);
+ int x, int y, struct drm_framebuffer *old_fb,
+ struct drm_atomic_state *state);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -391,7 +392,7 @@ static const intel_limit_t intel_limits_chv = {
* them would make no difference.
*/
.dot = { .min = 25000 * 5, .max = 540000 * 5},
- .vco = { .min = 4860000, .max = 6700000 },
+ .vco = { .min = 4800000, .max = 6480000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
.m2 = { .min = 24 << 22, .max = 175 << 22 },
@@ -430,25 +431,41 @@ bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
* intel_pipe_has_type() but looking at encoder->new_crtc instead of
* encoder->crtc.
*/
-static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
+static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
+ int type)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
+ int i, num_connectors = 0;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
+ continue;
- for_each_intel_encoder(dev, encoder)
- if (encoder->new_crtc == crtc && encoder->type == type)
+ num_connectors++;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ if (encoder->type == type)
return true;
+ }
+
+ WARN_ON(num_connectors == 0);
return false;
}
-static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
- int refclk)
+static const intel_limit_t *
+intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -466,20 +483,21 @@ static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
return limit;
}
-static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
+static const intel_limit_t *
+intel_g4x_limit(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
- intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
@@ -487,17 +505,18 @@ static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
+static const intel_limit_t *
+intel_limit(struct intel_crtc_state *crtc_state, int refclk)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
- limit = intel_ironlake_limit(crtc, refclk);
+ limit = intel_ironlake_limit(crtc_state, refclk);
else if (IS_G4X(dev)) {
- limit = intel_g4x_limit(crtc);
+ limit = intel_g4x_limit(crtc_state);
} else if (IS_PINEVIEW(dev)) {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
@@ -506,14 +525,14 @@ static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
+ else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dac;
@@ -600,15 +619,17 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
}
static bool
-i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+i9xx_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -661,15 +682,17 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+pnv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -720,10 +743,12 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+g4x_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int max_n;
@@ -732,7 +757,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int err_most = (target >> 8) + (target >> 9);
found = false;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
@@ -776,11 +801,53 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
return found;
}
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+ const intel_clock_t *calculated_clock,
+ const intel_clock_t *best_clock,
+ unsigned int best_error_ppm,
+ unsigned int *error_ppm)
+{
+ /*
+ * For CHV ignore the error and consider only the P value.
+ * Prefer a bigger P value based on HW requirements.
+ */
+ if (IS_CHERRYVIEW(dev)) {
+ *error_ppm = 0;
+
+ return calculated_clock->p > best_clock->p;
+ }
+
+ if (WARN_ON_ONCE(!target_freq))
+ return false;
+
+ *error_ppm = div_u64(1000000ULL *
+ abs(target_freq - calculated_clock->dot),
+ target_freq);
+ /*
+ * Prefer a better P value over a better (smaller) error if the error
+ * is small. Ensure this preference for future configurations too by
+ * setting the error to 0.
+ */
+ if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+ *error_ppm = 0;
+
+ return true;
+ }
+
+ return *error_ppm + 10 < best_error_ppm;
+}
+
static bool
-vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+vlv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
unsigned int bestppm = 1000000;
@@ -800,7 +867,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
clock.p = clock.p1 * clock.p2;
/* based on hardware requirement, prefer bigger m1,m2 values */
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- unsigned int ppm, diff;
+ unsigned int ppm;
clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
refclk * clock.m1);
@@ -811,20 +878,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
&clock))
continue;
- diff = abs(clock.dot - target);
- ppm = div_u64(1000000ULL * diff, target);
-
- if (ppm < 100 && clock.p > best_clock->p) {
- bestppm = 0;
- *best_clock = clock;
- found = true;
- }
+ if (!vlv_PLL_is_optimal(dev, target,
+ &clock,
+ best_clock,
+ bestppm, &ppm))
+ continue;
- if (bestppm >= 10 && ppm < bestppm - 10) {
- bestppm = ppm;
- *best_clock = clock;
- found = true;
- }
+ *best_clock = clock;
+ bestppm = ppm;
+ found = true;
}
}
}
@@ -834,16 +896,20 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+chv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
+ unsigned int best_error_ppm;
intel_clock_t clock;
uint64_t m2;
int found = false;
memset(best_clock, 0, sizeof(*best_clock));
+ best_error_ppm = 1000000;
/*
* Based on hardware doc, the n always set to 1, and m1 always
@@ -857,6 +923,7 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
for (clock.p2 = limit->p2.p2_fast;
clock.p2 >= limit->p2.p2_slow;
clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ unsigned int error_ppm;
clock.p = clock.p1 * clock.p2;
@@ -873,12 +940,13 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
if (!intel_PLL_is_valid(dev, limit, &clock))
continue;
- /* based on hardware requirement, prefer bigger p
- */
- if (clock.p > best_clock->p) {
- *best_clock = clock;
- found = true;
- }
+ if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ best_error_ppm, &error_ppm))
+ continue;
+
+ *best_clock = clock;
+ best_error_ppm = error_ppm;
+ found = true;
}
}
@@ -897,8 +965,12 @@ bool intel_crtc_active(struct drm_crtc *crtc)
*
* We can ditch the crtc->primary->fb check as soon as we can
* properly reconstruct framebuffers.
+ *
+ * FIXME: The intel_crtc->active here should be switched to
+ * crtc->state->active once we have proper CRTC states wired up
+ * for atomic.
*/
- return intel_crtc->active && crtc->primary->fb &&
+ return intel_crtc->active && crtc->primary->state->fb &&
intel_crtc->config->base.adjusted_mode.crtc_clock;
}
@@ -1301,14 +1373,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
u32 val;
if (INTEL_INFO(dev)->gen >= 9) {
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
val = I915_READ(PLANE_CTL(pipe, sprite));
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
} else if (IS_VALLEYVIEW(dev)) {
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
I915_STATE_WARN(val & SP_ENABLE,
@@ -2190,30 +2262,109 @@ static bool need_vtd_wa(struct drm_device *dev)
return false;
}
-int
-intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
+unsigned int
+intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
+ uint64_t fb_format_modifier)
+{
+ unsigned int tile_height;
+ uint32_t pixel_bytes;
+
+ switch (fb_format_modifier) {
+ case DRM_FORMAT_MOD_NONE:
+ tile_height = 1;
+ break;
+ case I915_FORMAT_MOD_X_TILED:
+ tile_height = IS_GEN2(dev) ? 16 : 8;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ tile_height = 32;
+ break;
+ case I915_FORMAT_MOD_Yf_TILED:
+ pixel_bytes = drm_format_plane_cpp(pixel_format, 0);
+ switch (pixel_bytes) {
+ default:
+ case 1:
+ tile_height = 64;
+ break;
+ case 2:
+ case 4:
+ tile_height = 32;
+ break;
+ case 8:
+ tile_height = 16;
+ break;
+ case 16:
+ WARN_ONCE(1,
+ "128-bit pixels are not supported for display!");
+ tile_height = 16;
+ break;
+ }
+ break;
+ default:
+ MISSING_CASE(fb_format_modifier);
+ tile_height = 1;
+ break;
+ }
+
+ return tile_height;
+}
+
+unsigned int
+intel_fb_align_height(struct drm_device *dev, unsigned int height,
+ uint32_t pixel_format, uint64_t fb_format_modifier)
+{
+ return ALIGN(height, intel_tile_height(dev, pixel_format,
+ fb_format_modifier));
+}
+
+static int
+intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state)
{
- int tile_height;
+ struct intel_rotation_info *info = &view->rotation_info;
- tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
- return ALIGN(height, tile_height);
+ *view = i915_ggtt_view_normal;
+
+ if (!plane_state)
+ return 0;
+
+ if (!intel_rotation_90_or_270(plane_state->rotation))
+ return 0;
+
+ *view = i915_ggtt_view_rotated;
+
+ info->height = fb->height;
+ info->pixel_format = fb->pixel_format;
+ info->pitch = fb->pitches[0];
+ info->fb_modifier = fb->modifier[0];
+
+ if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED ||
+ info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) {
+ DRM_DEBUG_KMS(
+ "Y or Yf tiling is needed for 90/270 rotation!\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_ggtt_view view;
u32 alignment;
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
+ switch (fb->modifier[0]) {
+ case DRM_FORMAT_MOD_NONE:
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2223,7 +2374,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
else
alignment = 64 * 1024;
break;
- case I915_TILING_X:
+ case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else {
@@ -2231,13 +2382,22 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
alignment = 0;
}
break;
- case I915_TILING_Y:
- WARN(1, "Y tiled bo slipped through, driver bug!\n");
- return -EINVAL;
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
+ "Y tiling bo slipped through, driver bug!\n"))
+ return -EINVAL;
+ alignment = 1 * 1024 * 1024;
+ break;
default:
- BUG();
+ MISSING_CASE(fb->modifier[0]);
+ return -EINVAL;
}
+ ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ if (ret)
+ return ret;
+
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
* we should always have valid PTE following the scanout preventing
@@ -2256,7 +2416,8 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
intel_runtime_pm_get(dev_priv);
dev_priv->mm.interruptible = false;
- ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
+ &view);
if (ret)
goto err_interruptible;
@@ -2276,19 +2437,27 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
return 0;
err_unpin:
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin_from_display_plane(obj, &view);
err_interruptible:
dev_priv->mm.interruptible = true;
intel_runtime_pm_put(dev_priv);
return ret;
}
-void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state)
{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_ggtt_view view;
+ int ret;
+
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+ ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ WARN_ONCE(ret, "Couldn't get view from plane state!");
+
i915_gem_object_unpin_fence(obj);
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin_from_display_plane(obj, &view);
}
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -2366,12 +2535,13 @@ static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
}
static bool
-intel_alloc_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_framebuffer *fb = &plane_config->fb->base;
u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
u32 size_aligned = round_up(plane_config->base + plane_config->size,
PAGE_SIZE);
@@ -2390,25 +2560,24 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
obj->tiling_mode = plane_config->tiling;
if (obj->tiling_mode == I915_TILING_X)
- obj->stride = crtc->base.primary->fb->pitches[0];
+ obj->stride = fb->pitches[0];
- mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
- mode_cmd.width = crtc->base.primary->fb->width;
- mode_cmd.height = crtc->base.primary->fb->height;
- mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
+ mode_cmd.pixel_format = fb->pixel_format;
+ mode_cmd.width = fb->width;
+ mode_cmd.height = fb->height;
+ mode_cmd.pitches[0] = fb->pitches[0];
+ mode_cmd.modifier[0] = fb->modifier[0];
+ mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
mutex_lock(&dev->struct_mutex);
-
- if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
+ if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
&mode_cmd, obj)) {
DRM_DEBUG_KMS("intel fb init failed\n");
goto out_unref_obj;
}
-
- obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG_KMS("plane fb obj %p\n", obj);
+ DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
return true;
out_unref_obj:
@@ -2421,35 +2590,37 @@ out_unref_obj:
static void
update_state_fb(struct drm_plane *plane)
{
- if (plane->fb != plane->state->fb)
- drm_atomic_set_fb_for_plane(plane->state, plane->fb);
+ if (plane->fb == plane->state->fb)
+ return;
+
+ if (plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
+ plane->state->fb = plane->fb;
+ if (plane->state->fb)
+ drm_framebuffer_reference(plane->state->fb);
}
static void
-intel_find_plane_obj(struct intel_crtc *intel_crtc,
- struct intel_initial_plane_config *plane_config)
+intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
+ struct drm_plane *primary = intel_crtc->base.primary;
+ struct drm_framebuffer *fb;
- if (!intel_crtc->base.primary->fb)
+ if (!plane_config->fb)
return;
- if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
- struct drm_plane *primary = intel_crtc->base.primary;
-
- primary->state->crtc = &intel_crtc->base;
- primary->crtc = &intel_crtc->base;
- update_state_fb(primary);
-
- return;
+ if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
+ fb = &plane_config->fb->base;
+ goto valid_fb;
}
- kfree(intel_crtc->base.primary->fb);
- intel_crtc->base.primary->fb = NULL;
+ kfree(plane_config->fb);
/*
* Failed to alloc the obj, check to see if we should share
@@ -2464,26 +2635,29 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (!i->active)
continue;
- obj = intel_fb_obj(c->primary->fb);
- if (obj == NULL)
+ fb = c->primary->fb;
+ if (!fb)
continue;
+ obj = intel_fb_obj(fb);
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
- struct drm_plane *primary = intel_crtc->base.primary;
-
- if (obj->tiling_mode != I915_TILING_NONE)
- dev_priv->preserve_bios_swizzle = true;
-
- drm_framebuffer_reference(c->primary->fb);
- primary->fb = c->primary->fb;
- primary->state->crtc = &intel_crtc->base;
- primary->crtc = &intel_crtc->base;
- obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
- break;
+ drm_framebuffer_reference(fb);
+ goto valid_fb;
}
}
- update_state_fb(intel_crtc->base.primary);
+ return;
+
+valid_fb:
+ obj = intel_fb_obj(fb);
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dev_priv->preserve_bios_swizzle = true;
+
+ primary->fb = fb;
+ primary->state->crtc = &intel_crtc->base;
+ primary->crtc = &intel_crtc->base;
+ update_state_fb(primary);
+ obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
}
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2604,9 +2778,6 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
- fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane),
@@ -2708,9 +2879,6 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
- fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -2723,6 +2891,51 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
POSTING_READ(reg);
}
+u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
+ uint32_t pixel_format)
+{
+ u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
+
+ /*
+ * The stride is either expressed as a multiple of 64 bytes
+ * chunks for linear buffers or in number of tiles for tiled
+ * buffers.
+ */
+ switch (fb_modifier) {
+ case DRM_FORMAT_MOD_NONE:
+ return 64;
+ case I915_FORMAT_MOD_X_TILED:
+ if (INTEL_INFO(dev)->gen == 2)
+ return 128;
+ return 512;
+ case I915_FORMAT_MOD_Y_TILED:
+ /* No need to check for old gens and Y tiling since this is
+ * about the display engine and those will be blocked before
+ * we get here.
+ */
+ return 128;
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (bits_per_pixel == 8)
+ return 64;
+ else
+ return 128;
+ default:
+ MISSING_CASE(fb_modifier);
+ return 64;
+ }
+}
+
+unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
+ struct drm_i915_gem_object *obj)
+{
+ const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
+
+ if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
+ view = &i915_ggtt_view_rotated;
+
+ return i915_gem_obj_ggtt_offset_view(obj, view);
+}
+
static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
@@ -2730,10 +2943,10 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int pipe = intel_crtc->pipe;
- u32 plane_ctl, stride;
+ u32 plane_ctl, stride_div;
+ unsigned long surf_addr;
if (!intel_crtc->primary_enabled) {
I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -2777,43 +2990,39 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
BUG();
}
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- /*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- stride = fb->pitches[0] >> 6;
+ switch (fb->modifier[0]) {
+ case DRM_FORMAT_MOD_NONE:
break;
- case I915_TILING_X:
+ case I915_FORMAT_MOD_X_TILED:
plane_ctl |= PLANE_CTL_TILED_X;
- stride = fb->pitches[0] >> 9;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ plane_ctl |= PLANE_CTL_TILED_Y;
+ break;
+ case I915_FORMAT_MOD_Yf_TILED:
+ plane_ctl |= PLANE_CTL_TILED_YF;
break;
default:
- BUG();
+ MISSING_CASE(fb->modifier[0]);
}
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
- I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
-
- DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
- i915_gem_obj_ggtt_offset(obj),
- x, y, fb->width, fb->height,
- fb->pitches[0]);
+ obj = intel_fb_obj(fb);
+ stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+ fb->pixel_format);
+ surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj);
+ I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
I915_WRITE(PLANE_SIZE(pipe, 0),
(intel_crtc->config->pipe_src_h - 1) << 16 |
(intel_crtc->config->pipe_src_w - 1));
- I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
- I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
+ I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
POSTING_READ(PLANE_SURF(pipe, 0));
}
@@ -3064,38 +3273,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
-{
- return crtc->base.enabled && crtc->active &&
- crtc->config->has_pch_encoder;
-}
-
-static void ivb_modeset_global_resources(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *pipe_B_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
- struct intel_crtc *pipe_C_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
- uint32_t temp;
-
- /*
- * When everything is off disable fdi C so that we could enable fdi B
- * with all lanes. Note that we don't care about enabled pipes without
- * an enabled pch encoder.
- */
- if (!pipe_has_enabled_pch(pipe_B_crtc) &&
- !pipe_has_enabled_pch(pipe_C_crtc)) {
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
-
- temp = I915_READ(SOUTH_CHICKEN1);
- temp &= ~FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("disabling fdi C rx\n");
- I915_WRITE(SOUTH_CHICKEN1, temp);
- }
-}
-
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -3751,20 +3928,23 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
-static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
- if (temp & FDI_BC_BIFURCATION_SELECT)
+ if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
- temp |= FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("enabling fdi C rx\n");
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ if (enable)
+ temp |= FDI_BC_BIFURCATION_SELECT;
+
+ DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
I915_WRITE(SOUTH_CHICKEN1, temp);
POSTING_READ(SOUTH_CHICKEN1);
}
@@ -3772,20 +3952,19 @@ static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
switch (intel_crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
if (intel_crtc->config->fdi_lanes > 2)
- WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ cpt_set_fdi_bc_bifurcation(dev, false);
else
- cpt_enable_fdi_bc_bifurcation(dev);
+ cpt_set_fdi_bc_bifurcation(dev, true);
break;
case PIPE_C:
- cpt_enable_fdi_bc_bifurcation(dev);
+ cpt_set_fdi_bc_bifurcation(dev, true);
break;
default:
@@ -4120,6 +4299,24 @@ static void intel_enable_sprite_planes(struct drm_crtc *crtc)
}
}
+/*
+ * Disable a plane internally without actually modifying the plane's state.
+ * This will allow us to easily restore the plane later by just reprogramming
+ * its state.
+ */
+static void disable_plane_internal(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_plane_state *state =
+ plane->funcs->atomic_duplicate_state(plane);
+ struct intel_plane_state *intel_state = to_intel_plane_state(state);
+
+ intel_state->visible = false;
+ intel_plane->commit_plane(plane, intel_state);
+
+ intel_plane_destroy_state(plane, state);
+}
+
static void intel_disable_sprite_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4129,8 +4326,8 @@ static void intel_disable_sprite_planes(struct drm_crtc *crtc)
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe == pipe)
- plane->funcs->disable_plane(plane);
+ if (plane->fb && intel_plane->pipe == pipe)
+ disable_plane_internal(plane);
}
}
@@ -4204,7 +4401,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
bool reenable_ips = false;
/* The clocks have to be on to load the palette. */
- if (!crtc->enabled || !intel_crtc->active)
+ if (!crtc->state->enable || !intel_crtc->active)
return;
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
@@ -4288,11 +4485,10 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
intel_crtc_wait_for_pending_flips(crtc);
- if (dev_priv->fbc.plane == plane)
+ if (dev_priv->fbc.crtc == intel_crtc)
intel_fbc_disable(dev);
hsw_disable_ips(intel_crtc);
@@ -4318,7 +4514,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -4327,7 +4523,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_prepare_shared_dpll(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -4426,7 +4622,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -4435,7 +4631,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_enable_shared_dpll(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -4760,8 +4956,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
return mask;
}
-static void modeset_update_crtc_power_domains(struct drm_device *dev)
+static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
@@ -4773,7 +4970,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
- if (!crtc->base.enabled)
+ if (!crtc->base.state->enable)
continue;
pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
@@ -4783,7 +4980,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
}
if (dev_priv->display.modeset_global_resources)
- dev_priv->display.modeset_global_resources(dev);
+ dev_priv->display.modeset_global_resources(state);
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
@@ -4900,24 +5097,23 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
switch (cdclk) {
- case 400000:
- cmd = 3;
- break;
case 333333:
case 320000:
- cmd = 2;
- break;
case 266667:
- cmd = 1;
- break;
case 200000:
- cmd = 0;
break;
default:
MISSING_CASE(cdclk);
return;
}
+ /*
+ * Specs are full of misinformation, but testing on actual
+ * hardware has shown that we just need to write the desired
+ * CCK divider into the Punit register.
+ */
+ cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
@@ -4937,27 +5133,25 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
-
- /* FIXME: Punit isn't quite ready yet */
- if (IS_CHERRYVIEW(dev_priv->dev))
- return 400000;
+ int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
* 320/333MHz (depends on HPLL freq)
- * 400MHz
- * So we check to see whether we're above 90% of the lower bin and
- * adjust if needed.
+ * 400MHz (VLV only)
+ * So we check to see whether we're above 90% (VLV) or 95% (CHV)
+ * of the lower bin and adjust if needed.
*
* We seem to get an unstable or solid color picture at 200MHz.
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
- if (max_pixclk > freq_320*9/10)
+ if (!IS_CHERRYVIEW(dev_priv) &&
+ max_pixclk > freq_320*limit/100)
return 400000;
- else if (max_pixclk > 266667*9/10)
+ else if (max_pixclk > 266667*limit/100)
return freq_320;
else if (max_pixclk > 0)
return 266667;
@@ -4994,12 +5188,49 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
/* disable/enable all currently active pipes while we change cdclk */
for_each_intel_crtc(dev, intel_crtc)
- if (intel_crtc->base.enabled)
+ if (intel_crtc->base.state->enable)
*prepare_pipes |= (1 << intel_crtc->pipe);
}
-static void valleyview_modeset_global_resources(struct drm_device *dev)
+static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
+{
+ unsigned int credits, default_credits;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ default_credits = PFI_CREDIT(12);
+ else
+ default_credits = PFI_CREDIT(8);
+
+ if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
+ /* CHV suggested value is 31 or 63 */
+ if (IS_CHERRYVIEW(dev_priv))
+ credits = PFI_CREDIT_31;
+ else
+ credits = PFI_CREDIT(15);
+ } else {
+ credits = default_credits;
+ }
+
+ /*
+ * WA - write default credits before re-programming
+ * FIXME: should we also set the resend bit here?
+ */
+ I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+ default_credits);
+
+ I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+ credits | PFI_CREDIT_RESEND);
+
+ /*
+ * FIXME is this guaranteed to clear
+ * immediately or should we poll for it?
+ */
+ WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+}
+
+static void valleyview_modeset_global_resources(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
@@ -5021,6 +5252,8 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
else
valleyview_set_cdclk(dev, req_cdclk);
+ vlv_program_pfi_credits(dev_priv);
+
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
}
@@ -5034,7 +5267,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
bool is_dsi;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -5049,7 +5282,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
}
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -5117,7 +5350,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -5125,7 +5358,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_set_pll_dividers(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -5316,7 +5549,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
/* crtc should still be enabled when we disable it. */
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc);
@@ -5394,7 +5627,8 @@ static void intel_connector_check_state(struct intel_connector *connector)
crtc = encoder->base.crtc;
- I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n");
+ I915_STATE_WARN(!crtc->state->enable,
+ "crtc not enabled\n");
I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
"encoder active on the wrong pipe\n");
@@ -5402,6 +5636,34 @@ static void intel_connector_check_state(struct intel_connector *connector)
}
}
+int intel_connector_init(struct intel_connector *connector)
+{
+ struct drm_connector_state *connector_state;
+
+ connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
+ if (!connector_state)
+ return -ENOMEM;
+
+ connector->base.state = connector_state;
+ return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+ struct intel_connector *connector;
+
+ connector = kzalloc(sizeof *connector, GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ if (intel_connector_init(connector) < 0) {
+ kfree(connector);
+ return NULL;
+ }
+
+ return connector;
+}
+
/* Even simpler default implementation, if there's really no special case to
* consider. */
void intel_connector_dpms(struct drm_connector *connector, int mode)
@@ -5433,13 +5695,21 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
return encoder->get_hw_state(encoder, &pipe);
}
+static int pipe_required_fdi_lanes(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_crtc *crtc =
+ to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+
+ if (crtc->base.state->enable &&
+ crtc->config->has_pch_encoder)
+ return crtc->config->fdi_lanes;
+
+ return 0;
+}
+
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *pipe_B_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
-
DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
@@ -5466,22 +5736,20 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
case PIPE_A:
return true;
case PIPE_B:
- if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
- pipe_config->fdi_lanes > 2) {
+ if (pipe_config->fdi_lanes > 2 &&
+ pipe_required_fdi_lanes(dev, PIPE_C) > 0) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
}
return true;
case PIPE_C:
- if (!pipe_has_enabled_pch(pipe_B_crtc) ||
- pipe_B_crtc->config->fdi_lanes <= 2) {
- if (pipe_config->fdi_lanes > 2) {
- DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return false;
- }
- } else {
+ if (pipe_config->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return false;
+ }
+ if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
return false;
}
@@ -5581,7 +5849,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode
* - Double wide pipe
*/
- if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
@@ -5615,10 +5883,6 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
u32 val;
int divider;
- /* FIXME: Punit isn't quite ready yet */
- if (IS_CHERRYVIEW(dev))
- return 400000;
-
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
@@ -5764,15 +6028,18 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
+static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
+ int num_connectors)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
+ WARN_ON(!crtc_state->base.state);
+
if (IS_VALLEYVIEW(dev)) {
refclk = 100000;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -5815,8 +6082,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
- reduced_clock && i915.powersave) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ reduced_clock) {
crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
@@ -5884,7 +6151,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* for gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily accessed).
*/
- if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
@@ -5900,13 +6167,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
}
}
-void intel_dp_set_m_n(struct intel_crtc *crtc)
+void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
{
+ struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+
+ if (m_n == M1_N1) {
+ dp_m_n = &crtc->config->dp_m_n;
+ dp_m2_n2 = &crtc->config->dp_m2_n2;
+ } else if (m_n == M2_N2) {
+
+ /*
+ * M2_N2 registers are not supported. Hence m2_n2 divider value
+ * needs to be programmed into M1_N1.
+ */
+ dp_m_n = &crtc->config->dp_m2_n2;
+ } else {
+ DRM_ERROR("Unsupported divider value\n");
+ return;
+ }
+
if (crtc->config->has_pch_encoder)
intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n,
- &crtc->config->dp_m2_n2);
+ intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
}
static void vlv_update_pll(struct intel_crtc *crtc,
@@ -6044,9 +6327,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
int pipe = crtc->pipe;
int dpll_reg = DPLL(crtc->pipe);
enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 loopfilter, intcoeff;
+ u32 loopfilter, tribuf_calcntr;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
- int refclk;
+ u32 dpio_val;
+ int vco;
bestn = pipe_config->dpll.n;
bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
@@ -6054,6 +6338,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
bestm2 = pipe_config->dpll.m2 >> 22;
bestp1 = pipe_config->dpll.p1;
bestp2 = pipe_config->dpll.p2;
+ vco = pipe_config->dpll.vco;
+ dpio_val = 0;
+ loopfilter = 0;
/*
* Enable Refclk and SSC
@@ -6079,26 +6366,56 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
1 << DPIO_CHV_N_DIV_SHIFT);
/* M2 fraction division */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+ if (bestm2_frac)
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
/* M2 fraction division enable */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
- DPIO_CHV_FRAC_DIV_EN |
- (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+ dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
+ if (bestm2_frac)
+ dpio_val |= DPIO_CHV_FRAC_DIV_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+
+ /* Program digital lock detect threshold */
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+ dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+ DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+ dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
+ if (!bestm2_frac)
+ dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
/* Loop filter */
- refclk = i9xx_get_refclk(crtc, 0);
- loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
- 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
- if (refclk == 100000)
- intcoeff = 11;
- else if (refclk == 38400)
- intcoeff = 10;
- else
- intcoeff = 9;
- loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
+ if (vco == 5400000) {
+ loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6200000) {
+ loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6480000) {
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x8;
+ } else {
+ /* Not supported. Apply the same limits as in the max case */
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0;
+ }
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+ dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+ dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+
/* AFC Recal */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
@@ -6123,6 +6440,7 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
struct intel_crtc_state pipe_config = {
+ .base.crtc = &crtc->base,
.pixel_multiplier = 1,
.dpll = *dpll,
};
@@ -6167,12 +6485,12 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
- is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
- intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -6215,7 +6533,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -6245,7 +6563,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
@@ -6256,10 +6574,10 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -6473,11 +6791,20 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
bool is_lvds = false, is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
+ int i;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != crtc)
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != &crtc->base)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -6496,7 +6823,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
if (!crtc_state->clock_set) {
- refclk = i9xx_get_refclk(crtc, num_connectors);
+ refclk = i9xx_get_refclk(crtc_state, num_connectors);
/*
* Returns a set of divisors for the desired target clock with
@@ -6504,8 +6831,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
* the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
* 2) / p1 / p2.
*/
- limit = intel_limit(crtc, refclk);
- ok = dev_priv->display.find_dpll(limit, crtc,
+ limit = intel_limit(crtc_state, refclk);
+ ok = dev_priv->display.find_dpll(limit, crtc_state,
crtc_state->port_clock,
refclk, NULL, &clock);
if (!ok) {
@@ -6521,7 +6848,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
* we will disable the LVDS downclock feature.
*/
has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->display.find_dpll(limit, crtc_state,
dev_priv->lvds_downclock,
refclk, &clock,
&reduced_clock);
@@ -6620,7 +6947,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
u32 val, base, offset;
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -6636,9 +6963,12 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4)
- if (val & DISPPLANE_TILED)
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
+ fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ }
+ }
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
@@ -6664,7 +6994,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(dev, fb->height,
- plane_config->tiling);
+ fb->pixel_format,
+ fb->modifier[0]);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -6673,7 +7004,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
- crtc->base.primary->fb = fb;
+ plane_config->fb = intel_fb;
}
static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7147,18 +7478,26 @@ void intel_init_pch_refclk(struct drm_device *dev)
lpt_init_pch_refclk(dev);
}
-static int ironlake_get_refclk(struct drm_crtc *crtc)
+static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
- int num_connectors = 0;
+ int num_connectors = 0, i;
bool is_lvds = false;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != to_intel_crtc(crtc))
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -7345,22 +7684,21 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk;
const intel_limit_t *limit;
bool ret, is_lvds = false;
- is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
+ is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
- refclk = ironlake_get_refclk(crtc);
+ refclk = ironlake_get_refclk(crtc_state);
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(intel_crtc, refclk);
- ret = dev_priv->display.find_dpll(limit, intel_crtc,
+ limit = intel_limit(crtc_state, refclk);
+ ret = dev_priv->display.find_dpll(limit, crtc_state,
crtc_state->port_clock,
refclk, NULL, clock);
if (!ret)
@@ -7374,7 +7712,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
* downclock feature.
*/
*has_reduced_clock =
- dev_priv->display.find_dpll(limit, intel_crtc,
+ dev_priv->display.find_dpll(limit, crtc_state,
dev_priv->lvds_downclock,
refclk, clock,
reduced_clock);
@@ -7407,16 +7745,24 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
+ struct intel_encoder *encoder;
uint32_t dpll;
- int factor, num_connectors = 0;
+ int factor, num_connectors = 0, i;
bool is_lvds = false, is_sdvo = false;
- for_each_intel_encoder(dev, intel_encoder) {
- if (intel_encoder->new_crtc != to_intel_crtc(crtc))
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
- switch (intel_encoder->type) {
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
+ switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -7545,7 +7891,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
}
}
- if (is_lvds && has_reduced_clock && i915.powersave)
+ if (is_lvds && has_reduced_clock)
crtc->lowfreq_avail = true;
else
crtc->lowfreq_avail = false;
@@ -7651,10 +7997,10 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val, base, offset, stride_mult;
+ u32 val, base, offset, stride_mult, tiling;
int pipe = crtc->pipe;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -7670,9 +8016,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
if (!(val & PLANE_CTL_ENABLE))
goto error;
- if (val & PLANE_CTL_TILED_MASK)
- plane_config->tiling = I915_TILING_X;
-
pixel_format = val & PLANE_CTL_FORMAT_MASK;
fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX,
@@ -7680,6 +8023,26 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+ tiling = val & PLANE_CTL_TILED_MASK;
+ switch (tiling) {
+ case PLANE_CTL_TILED_LINEAR:
+ fb->modifier[0] = DRM_FORMAT_MOD_NONE;
+ break;
+ case PLANE_CTL_TILED_X:
+ plane_config->tiling = I915_TILING_X;
+ fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ break;
+ case PLANE_CTL_TILED_Y:
+ fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
+ break;
+ case PLANE_CTL_TILED_YF:
+ fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
+ break;
+ default:
+ MISSING_CASE(tiling);
+ goto error;
+ }
+
base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
plane_config->base = base;
@@ -7690,21 +8053,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, 0));
- switch (plane_config->tiling) {
- case I915_TILING_NONE:
- stride_mult = 64;
- break;
- case I915_TILING_X:
- stride_mult = 512;
- break;
- default:
- MISSING_CASE(plane_config->tiling);
- goto error;
- }
+ stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
+ fb->pixel_format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(dev, fb->height,
- plane_config->tiling);
+ fb->pixel_format,
+ fb->modifier[0]);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -7713,7 +8068,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
- crtc->base.primary->fb = fb;
+ plane_config->fb = intel_fb;
return;
error:
@@ -7753,7 +8108,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
u32 val, base, offset;
int pipe = crtc->pipe;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -7769,9 +8124,12 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4)
- if (val & DISPPLANE_TILED)
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
+ fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ }
+ }
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
@@ -7797,7 +8155,8 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(dev, fb->height,
- plane_config->tiling);
+ fb->pixel_format,
+ fb->modifier[0]);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -7806,7 +8165,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
- crtc->base.primary->fb = fb;
+ plane_config->fb = intel_fb;
}
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
@@ -8292,8 +8651,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
uint32_t cntl = 0, size = 0;
if (base) {
- unsigned int width = intel_crtc->cursor_width;
- unsigned int height = intel_crtc->cursor_height;
+ unsigned int width = intel_crtc->base.cursor->state->crtc_w;
+ unsigned int height = intel_crtc->base.cursor->state->crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
switch (stride) {
@@ -8357,7 +8716,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl = 0;
if (base) {
cntl = MCURSOR_GAMMA_ENABLE;
- switch (intel_crtc->cursor_width) {
+ switch (intel_crtc->base.cursor->state->crtc_w) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
@@ -8368,7 +8727,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(intel_crtc->cursor_width);
+ MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
@@ -8415,7 +8774,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
base = 0;
if (x < 0) {
- if (x + intel_crtc->cursor_width <= 0)
+ if (x + intel_crtc->base.cursor->state->crtc_w <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -8424,7 +8783,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
pos |= x << CURSOR_X_SHIFT;
if (y < 0) {
- if (y + intel_crtc->cursor_height <= 0)
+ if (y + intel_crtc->base.cursor->state->crtc_h <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -8440,8 +8799,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
- base += (intel_crtc->cursor_height *
- intel_crtc->cursor_width - 1) * 4;
+ base += (intel_crtc->base.cursor->state->crtc_h *
+ intel_crtc->base.cursor->state->crtc_w - 1) * 4;
}
if (IS_845G(dev) || IS_I865G(dev))
@@ -8633,6 +8992,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_atomic_state *state = NULL;
+ struct drm_connector_state *connector_state;
int ret, i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -8680,7 +9041,7 @@ retry:
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
- if (possible_crtc->enabled)
+ if (possible_crtc->state->enable)
continue;
/* This can occur when applying the pipe A quirk on resume. */
if (to_intel_crtc(possible_crtc)->new_enabled)
@@ -8714,6 +9075,21 @@ retry:
old->load_detect_temp = true;
old->release_fb = NULL;
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return false;
+
+ state->acquire_ctx = ctx;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ goto fail;
+ }
+
+ connector_state->crtc = crtc;
+ connector_state->best_encoder = &intel_encoder->base;
+
if (!mode)
mode = &load_detect_mode;
@@ -8736,7 +9112,7 @@ retry:
goto fail;
}
- if (intel_set_mode(crtc, mode, 0, 0, fb)) {
+ if (intel_set_mode(crtc, mode, 0, 0, fb, state)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -8749,12 +9125,17 @@ retry:
return true;
fail:
- intel_crtc->new_enabled = crtc->enabled;
+ intel_crtc->new_enabled = crtc->state->enable;
if (intel_crtc->new_enabled)
intel_crtc->new_config = intel_crtc->config;
else
intel_crtc->new_config = NULL;
fail_unlock:
+ if (state) {
+ drm_atomic_state_free(state);
+ state = NULL;
+ }
+
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
@@ -8764,24 +9145,44 @@ fail_unlock:
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old)
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
{
+ struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_atomic_state *state;
+ struct drm_connector_state *connector_state;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
encoder->base.id, encoder->name);
if (old->load_detect_temp) {
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ goto fail;
+
+ state->acquire_ctx = ctx;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state))
+ goto fail;
+
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_crtc->new_enabled = false;
intel_crtc->new_config = NULL;
- intel_set_mode(crtc, NULL, 0, 0, NULL);
+
+ connector_state->best_encoder = NULL;
+ connector_state->crtc = NULL;
+
+ intel_set_mode(crtc, NULL, 0, 0, NULL, state);
+
+ drm_atomic_state_free(state);
if (old->release_fb) {
drm_framebuffer_unregister_private(old->release_fb);
@@ -8794,6 +9195,11 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
+
+ return;
+fail:
+ DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
+ drm_atomic_state_free(state);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -9032,6 +9438,8 @@ void intel_mark_busy(struct drm_device *dev)
intel_runtime_pm_get(dev_priv);
i915_update_gfx_val(dev_priv);
+ if (INTEL_INFO(dev)->gen >= 6)
+ gen6_rps_busy(dev_priv);
dev_priv->mm.busy = true;
}
@@ -9045,9 +9453,6 @@ void intel_mark_idle(struct drm_device *dev)
dev_priv->mm.busy = false;
- if (!i915.powersave)
- goto out;
-
for_each_crtc(dev, crtc) {
if (!crtc->primary->fb)
continue;
@@ -9058,7 +9463,6 @@ void intel_mark_idle(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
-out:
intel_runtime_pm_put(dev_priv);
}
@@ -9100,9 +9504,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(work->old_fb_obj);
+ intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
drm_gem_object_unreference(&work->pending_flip_obj->base);
- drm_gem_object_unreference(&work->old_fb_obj->base);
intel_fbc_update(dev);
@@ -9111,6 +9514,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_unlock(&dev->struct_mutex);
intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+ drm_framebuffer_unreference(work->old_fb);
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
@@ -9627,69 +10031,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
return 0;
}
-static int intel_gen9_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
- uint32_t flags)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t plane = 0, stride;
- int ret;
-
- switch(intel_crtc->pipe) {
- case PIPE_A:
- plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
- break;
- case PIPE_B:
- plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
- break;
- case PIPE_C:
- plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
- break;
- default:
- WARN_ONCE(1, "unknown plane in flip command\n");
- return -ENODEV;
- }
-
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- stride = fb->pitches[0] >> 6;
- break;
- case I915_TILING_X:
- stride = fb->pitches[0] >> 9;
- break;
- default:
- WARN_ONCE(1, "unknown tiling in flip command\n");
- return -ENODEV;
- }
-
- ret = intel_ring_begin(ring, 10);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, DERRMR);
- intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE));
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
- MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit(ring, DERRMR);
- intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
- intel_ring_emit(ring, 0);
-
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
- intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-
- intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
-
- return 0;
-}
-
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -9719,10 +10060,10 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
!i915_gem_request_completed(work->flip_queued_req, true))
return false;
- work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe);
+ work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
}
- if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3)
+ if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
return false;
/* Potential stall - if we see that the flip has happened,
@@ -9753,7 +10094,8 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
spin_lock(&dev->event_lock);
if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
- intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
+ intel_crtc->unpin_work->flip_queued_vblank,
+ drm_vblank_count(dev, pipe));
page_flip_completed(intel_crtc);
}
spin_unlock(&dev->event_lock);
@@ -9805,7 +10147,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
- work->old_fb_obj = intel_fb_obj(old_fb);
+ work->old_fb = old_fb;
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
@@ -9836,12 +10178,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto cleanup;
-
/* Reference the objects for the scheduled work. */
- drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_framebuffer_reference(work->old_fb);
drm_gem_object_reference(&obj->base);
crtc->primary->fb = fb;
@@ -9849,6 +10187,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->pending_flip_obj = obj;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto cleanup;
+
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -9857,7 +10199,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
- if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
+ if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
@@ -9870,12 +10212,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ring = &dev_priv->ring[RCS];
}
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
+ crtc->primary->state, ring);
if (ret)
goto cleanup_pending;
- work->gtt_offset =
- i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
+ work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj)
+ + intel_crtc->dspaddr_offset;
if (use_mmio_flip(ring, obj)) {
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
@@ -9895,10 +10238,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_ring_get_request(ring));
}
- work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
+ work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true;
- i915_gem_track_fb(work->old_fb_obj, obj,
+ i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
intel_fbc_disable(dev);
@@ -9910,16 +10253,17 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_unpin:
- intel_unpin_fb_obj(obj);
+ intel_unpin_fb_obj(fb, crtc->primary->state);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
+ mutex_unlock(&dev->struct_mutex);
+cleanup:
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
- drm_gem_object_unreference(&work->old_fb_obj->base);
- drm_gem_object_unreference(&obj->base);
- mutex_unlock(&dev->struct_mutex);
-cleanup:
+ drm_gem_object_unreference_unlocked(&obj->base);
+ drm_framebuffer_unreference(work->old_fb);
+
spin_lock_irq(&dev->event_lock);
intel_crtc->unpin_work = NULL;
spin_unlock_irq(&dev->event_lock);
@@ -9959,8 +10303,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
connector->new_encoder =
to_intel_encoder(connector->base.encoder);
}
@@ -9971,7 +10314,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
- crtc->new_enabled = crtc->base.enabled;
+ crtc->new_enabled = crtc->base.state->enable;
if (crtc->new_enabled)
crtc->new_config = crtc->config;
@@ -9980,6 +10323,27 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
}
}
+/* Transitional helper to copy current connector/encoder state to
+ * connector->state. This is needed so that code that is partially
+ * converted to atomic does the right thing.
+ */
+static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ for_each_intel_connector(dev, connector) {
+ if (connector->base.encoder) {
+ connector->base.state->best_encoder =
+ connector->base.encoder;
+ connector->base.state->crtc =
+ connector->base.encoder->crtc;
+ } else {
+ connector->base.state->best_encoder = NULL;
+ connector->base.state->crtc = NULL;
+ }
+ }
+}
+
/**
* intel_modeset_commit_output_state
*
@@ -9991,8 +10355,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
connector->base.encoder = &connector->new_encoder->base;
}
@@ -10001,8 +10364,11 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
+ crtc->base.state->enable = crtc->new_enabled;
crtc->base.enabled = crtc->new_enabled;
}
+
+ intel_modeset_update_connector_atomic_state(dev);
}
static void
@@ -10037,8 +10403,9 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_atomic_state *state;
struct intel_connector *connector;
- int bpp;
+ int bpp, i;
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
@@ -10078,11 +10445,15 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
pipe_config->pipe_bpp = bpp;
+ state = pipe_config->base.state;
+
/* Clamp display bpp to EDID value */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
- if (!connector->new_encoder ||
- connector->new_encoder->new_crtc != crtc)
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector = to_intel_connector(state->connectors[i]);
+ if (state->connector_states[i]->crtc != &crtc->base)
continue;
connected_sink_compute_bpp(connector, pipe_config);
@@ -10207,8 +10578,7 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
struct intel_encoder *encoder = connector->new_encoder;
if (!encoder)
@@ -10239,15 +10609,30 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
return true;
}
+static void
+clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
+{
+ struct drm_crtc_state tmp_state;
+
+ /* Clear only the intel specific part of the crtc state */
+ tmp_state = crtc_state->base;
+ memset(crtc_state, 0, sizeof *crtc_state);
+ crtc_state->base = tmp_state;
+}
+
static struct intel_crtc_state *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode,
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
struct intel_crtc_state *pipe_config;
int plane_bpp, ret = -EINVAL;
+ int i;
bool retry = true;
if (!check_encoder_cloning(to_intel_crtc(crtc))) {
@@ -10260,10 +10645,13 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
return ERR_PTR(-EINVAL);
}
- pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
- if (!pipe_config)
- return ERR_PTR(-ENOMEM);
+ pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+ clear_intel_crtc_state(pipe_config);
+
+ pipe_config->base.crtc = crtc;
drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
drm_mode_copy(&pipe_config->base.mode, mode);
@@ -10318,11 +10706,17 @@ encoder_retry:
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- for_each_intel_encoder(dev, encoder) {
+ for (i = 0; i < state->num_connector; i++) {
+ connector = to_intel_connector(state->connectors[i]);
+ if (!connector)
+ continue;
- if (&encoder->new_crtc->base != crtc)
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
@@ -10358,7 +10752,6 @@ encoder_retry:
return pipe_config;
fail:
- kfree(pipe_config);
return ERR_PTR(ret);
}
@@ -10380,8 +10773,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
* to be part of the prepare_pipes mask. We don't (yet) support global
* modeset across multiple crtcs, so modeset_pipes will only have one
* bit set at most. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.encoder == &connector->new_encoder->base)
continue;
@@ -10412,7 +10804,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
/* Check for pipes that will be enabled/disabled ... */
for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->base.enabled == intel_crtc->new_enabled)
+ if (intel_crtc->base.state->enable == intel_crtc->new_enabled)
continue;
if (!intel_crtc->new_enabled)
@@ -10487,10 +10879,10 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
/* Double check state. */
for_each_intel_crtc(dev, intel_crtc) {
- WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
+ WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config &&
intel_crtc->new_config != intel_crtc->config);
- WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
+ WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config);
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -10750,7 +11142,7 @@ static void check_wm_state(struct drm_device *dev)
continue;
/* planes */
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
hw_entry = &hw_ddb.plane[pipe][plane];
sw_entry = &sw_ddb->plane[pipe][plane];
@@ -10784,8 +11176,7 @@ check_connector_state(struct drm_device *dev)
{
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
/* This also checks the encoder/connector hw state with the
* ->get_hw_state callbacks. */
intel_connector_check_state(connector);
@@ -10815,8 +11206,7 @@ check_encoder_state(struct drm_device *dev)
I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
"encoder's active_connectors set, but no crtc\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.encoder != &encoder->base)
continue;
enabled = true;
@@ -10877,7 +11267,7 @@ check_crtc_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id);
- I915_STATE_WARN(crtc->active && !crtc->base.enabled,
+ I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
"active crtc, but not enabled in sw tracking\n");
for_each_intel_encoder(dev, encoder) {
@@ -10891,9 +11281,10 @@ check_crtc_state(struct drm_device *dev)
I915_STATE_WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active);
- I915_STATE_WARN(enabled != crtc->base.enabled,
+ I915_STATE_WARN(enabled != crtc->base.state->enable,
"crtc's computed enabled state doesn't match tracked enabled state "
- "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+ "(expected %i, found %i)\n", enabled,
+ crtc->base.state->enable);
active = dev_priv->display.get_pipe_config(crtc,
&pipe_config);
@@ -10957,7 +11348,7 @@ check_shared_dpll_state(struct drm_device *dev)
pll->on, active);
for_each_intel_crtc(dev, crtc) {
- if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
+ if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
enabled_crtcs++;
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
active_crtcs++;
@@ -11039,17 +11430,30 @@ static struct intel_crtc_state *
intel_modeset_compute_config(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
+ struct drm_atomic_state *state,
unsigned *modeset_pipes,
unsigned *prepare_pipes,
unsigned *disable_pipes)
{
+ struct drm_device *dev = crtc->dev;
struct intel_crtc_state *pipe_config = NULL;
+ struct intel_crtc *intel_crtc;
+ int ret = 0;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ERR_PTR(ret);
intel_modeset_affected_pipes(crtc, modeset_pipes,
prepare_pipes, disable_pipes);
- if ((*modeset_pipes) == 0)
- goto out;
+ for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) {
+ pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ pipe_config->base.enable = false;
+ }
/*
* Note this needs changes when we start tracking multiple modes
@@ -11057,15 +11461,21 @@ intel_modeset_compute_config(struct drm_crtc *crtc,
* (i.e. one pipe_config for each crtc) rather than just the one
* for this crtc.
*/
- pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
- if (IS_ERR(pipe_config)) {
- goto out;
+ for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) {
+ /* FIXME: For now we still expect modeset_pipes has at most
+ * one bit set. */
+ if (WARN_ON(&intel_crtc->base != crtc))
+ continue;
+
+ pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state);
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ "[modeset]");
}
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- "[modeset]");
-out:
- return pipe_config;
+ return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));;
}
static int __intel_set_mode_setup_plls(struct drm_device *dev,
@@ -11109,6 +11519,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
+ struct intel_crtc_state *crtc_state_copy = NULL;
struct intel_crtc *intel_crtc;
int ret = 0;
@@ -11116,6 +11527,12 @@ static int __intel_set_mode(struct drm_crtc *crtc,
if (!saved_mode)
return -ENOMEM;
+ crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), GFP_KERNEL);
+ if (!crtc_state_copy) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
*saved_mode = crtc->mode;
if (modeset_pipes)
@@ -11143,7 +11560,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
intel_crtc_disable(&intel_crtc->base);
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
- if (intel_crtc->base.enabled)
+ if (intel_crtc->base.state->enable)
dev_priv->display.crtc_disable(&intel_crtc->base);
}
@@ -11173,7 +11590,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
- modeset_update_crtc_power_domains(dev);
+ modeset_update_crtc_power_domains(pipe_config->base.state);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
@@ -11199,9 +11616,25 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* FIXME: add subpixel order */
done:
- if (ret && crtc->enabled)
+ if (ret && crtc->state->enable)
crtc->mode = *saved_mode;
+ if (ret == 0 && pipe_config) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* The pipe_config will be freed with the atomic state, so
+ * make a copy. */
+ memcpy(crtc_state_copy, intel_crtc->config,
+ sizeof *crtc_state_copy);
+ intel_crtc->config = crtc_state_copy;
+ intel_crtc->base.state = &crtc_state_copy->base;
+
+ if (modeset_pipes)
+ intel_crtc->new_config = intel_crtc->config;
+ } else {
+ kfree(crtc_state_copy);
+ }
+
kfree(saved_mode);
return ret;
}
@@ -11227,27 +11660,81 @@ static int intel_set_mode_pipes(struct drm_crtc *crtc,
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+ int x, int y, struct drm_framebuffer *fb,
+ struct drm_atomic_state *state)
{
struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
+ int ret = 0;
- pipe_config = intel_modeset_compute_config(crtc, mode, fb,
+ pipe_config = intel_modeset_compute_config(crtc, mode, fb, state,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
- if (IS_ERR(pipe_config))
- return PTR_ERR(pipe_config);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto out;
+ }
- return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
- modeset_pipes, prepare_pipes,
- disable_pipes);
+ ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
+ modeset_pipes, prepare_pipes,
+ disable_pipes);
+ if (ret)
+ goto out;
+
+out:
+ return ret;
}
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
+ crtc->base.id);
+ return;
+ }
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ /* The force restore path in the HW readout code relies on the staged
+ * config still keeping the user requested config while the actual
+ * state has been overwritten by the configuration read from HW. We
+ * need to copy the staged config to the atomic state, otherwise the
+ * mode set will just reapply the state the HW is already in. */
+ for_each_intel_encoder(dev, encoder) {
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+
+ for_each_intel_connector(dev, connector) {
+ if (connector->new_encoder != encoder)
+ continue;
+
+ connector_state = drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state)) {
+ DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
+ connector->base.base.id,
+ connector->base.name,
+ PTR_ERR(connector_state));
+ continue;
+ }
+
+ connector_state->crtc = crtc;
+ connector_state->best_encoder = &encoder->base;
+ }
+ }
+
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb,
+ state);
+
+ drm_atomic_state_free(state);
}
#undef for_each_intel_crtc_masked
@@ -11295,7 +11782,7 @@ static int intel_set_config_save_state(struct drm_device *dev,
*/
count = 0;
for_each_crtc(dev, crtc) {
- config->save_crtc_enabled[count++] = crtc->enabled;
+ config->save_crtc_enabled[count++] = crtc->state->enable;
}
count = 0;
@@ -11336,7 +11823,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
connector->new_encoder =
to_intel_encoder(config->save_connector_encoders[count++]);
}
@@ -11416,9 +11903,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
static int
intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_mode_set *set,
- struct intel_set_config *config)
+ struct intel_set_config *config,
+ struct drm_atomic_state *state)
{
struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
int ro;
@@ -11428,8 +11917,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
/* Otherwise traverse passed in connector list and get encoders
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
@@ -11454,15 +11942,16 @@ intel_modeset_stage_output_state(struct drm_device *dev,
if (&connector->new_encoder->base != connector->base.encoder) {
- DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n",
+ connector->base.base.id,
+ connector->base.name);
config->mode_changed = true;
}
}
/* connector->new_encoder is now updated for all connectors. */
/* Update crtc of enabled connectors. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
struct drm_crtc *new_crtc;
if (!connector->new_encoder)
@@ -11482,6 +11971,14 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
+ connector_state =
+ drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ connector_state->crtc = new_crtc;
+ connector_state->best_encoder = &connector->new_encoder->base;
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
connector->base.name,
@@ -11491,9 +11988,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Check for any encoders that needs to be disabled. */
for_each_intel_encoder(dev, encoder) {
int num_connectors = 0;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->new_encoder == encoder) {
WARN_ON(!connector->new_encoder->new_crtc);
num_connectors++;
@@ -11508,16 +12003,25 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Only now check for crtc changes so we don't miss encoders
* that will be disabled. */
if (&encoder->new_crtc->base != encoder->base.crtc) {
- DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n",
+ encoder->base.base.id,
+ encoder->base.name);
config->mode_changed = true;
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
- if (connector->new_encoder)
+ for_each_intel_connector(dev, connector) {
+ connector_state =
+ drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ if (connector->new_encoder) {
if (connector->new_encoder != connector->encoder)
connector->encoder = connector->new_encoder;
+ } else {
+ connector_state->crtc = NULL;
+ }
}
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
@@ -11529,8 +12033,9 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
}
- if (crtc->new_enabled != crtc->base.enabled) {
- DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
+ if (crtc->new_enabled != crtc->base.state->enable) {
+ DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n",
+ crtc->base.base.id,
crtc->new_enabled ? "en" : "dis");
config->mode_changed = true;
}
@@ -11553,7 +12058,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc)
DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
pipe_name(crtc->pipe));
- list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->new_encoder &&
connector->new_encoder->new_crtc == crtc)
connector->new_encoder = NULL;
@@ -11572,6 +12077,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_mode_set save_set;
+ struct drm_atomic_state *state = NULL;
struct intel_set_config *config;
struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
@@ -11616,12 +12122,20 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
* such cases. */
intel_set_config_compute_mode_changes(set, config);
- ret = intel_modeset_stage_output_state(dev, set, config);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_config;
+ }
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ ret = intel_modeset_stage_output_state(dev, set, config, state);
if (ret)
goto fail;
pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
- set->fb,
+ set->fb, state,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
@@ -11641,10 +12155,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
*/
}
- /* set_mode will free it in the mode_changed case */
- if (!config->mode_changed)
- kfree(pipe_config);
-
intel_update_pipe_size(to_intel_crtc(set->crtc));
if (config->mode_changed) {
@@ -11690,6 +12200,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
fail:
intel_set_config_restore_state(dev, config);
+ drm_atomic_state_clear(state);
+
/*
* HACK: if the pipe was on, but we didn't have a framebuffer,
* force the pipe off to avoid oopsing in the modeset code
@@ -11702,11 +12214,15 @@ fail:
/* Try to restore the config */
if (config->mode_changed &&
intel_set_mode(save_set.crtc, save_set.mode,
- save_set.x, save_set.y, save_set.fb))
+ save_set.x, save_set.y, save_set.fb,
+ state))
DRM_ERROR("failed to restore config after modeset failure\n");
}
out_config:
+ if (state)
+ drm_atomic_state_free(state);
+
intel_set_config_free(config);
return ret;
}
@@ -11821,6 +12337,28 @@ static void intel_shared_dpll_init(struct drm_device *dev)
}
/**
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
+ *
+ * Returns true or false.
+ */
+bool intel_wm_need_update(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ /* Update watermarks on tiling changes. */
+ if (!plane->state->fb || !state->fb ||
+ plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+ plane->state->rotation != state->rotation)
+ return true;
+
+ return false;
+}
+
+/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
* @fb: framebuffer to prepare for presentation
@@ -11834,7 +12372,8 @@ static void intel_shared_dpll_init(struct drm_device *dev)
*/
int
intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -11868,7 +12407,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
- ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
}
if (ret == 0)
@@ -11888,7 +12427,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
*/
void
intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -11899,7 +12439,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical) {
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(obj);
+ intel_unpin_fb_obj(fb, old_state);
mutex_unlock(&dev->struct_mutex);
}
}
@@ -11944,7 +12484,7 @@ intel_check_primary_plane(struct drm_plane *plane,
*/
if (intel_crtc->primary_enabled &&
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.crtc == intel_crtc &&
state->base.rotation != BIT(DRM_ROTATE_0)) {
intel_crtc->atomic.disable_fbc = true;
}
@@ -11963,6 +12503,9 @@ intel_check_primary_plane(struct drm_plane *plane,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
intel_crtc->atomic.update_fbc = true;
+
+ if (intel_wm_need_update(plane, &state->base))
+ intel_crtc->atomic.update_wm = true;
}
return 0;
@@ -11977,8 +12520,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_rect *src = &state->src;
crtc = crtc ? crtc : plane->crtc;
@@ -11988,8 +12529,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
- intel_plane->obj = obj;
-
if (intel_crtc->active) {
if (state->visible) {
/* FIXME: kill this fastboot hack */
@@ -12229,17 +12768,14 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -ENOMEM;
}
- /* we only need to pin inside GTT if cursor is non-phy */
- mutex_lock(&dev->struct_mutex);
- if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
+ if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
ret = -EINVAL;
}
- mutex_unlock(&dev->struct_mutex);
finish:
if (intel_crtc->active) {
- if (intel_crtc->cursor_width != state->base.crtc_w)
+ if (plane->state->crtc_w != state->base.crtc_w)
intel_crtc->atomic.update_wm = true;
intel_crtc->atomic.fb_bits |=
@@ -12256,7 +12792,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc = state->base.crtc;
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
@@ -12267,8 +12802,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
crtc->cursor_x = state->base.crtc_x;
crtc->cursor_y = state->base.crtc_y;
- intel_plane->obj = obj;
-
if (intel_crtc->cursor_bo == obj)
goto update;
@@ -12282,8 +12815,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
update:
- intel_crtc->cursor_width = state->base.crtc_w;
- intel_crtc->cursor_height = state->base.crtc_h;
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, state->visible);
@@ -12353,6 +12884,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
if (!crtc_state)
goto fail;
intel_crtc_set_state(intel_crtc, crtc_state);
+ crtc_state->base.crtc = &intel_crtc->base;
primary = intel_primary_plane_create(dev, pipe);
if (!primary)
@@ -12430,9 +12962,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
if (!drmmode_crtc) {
@@ -12502,7 +13031,6 @@ static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct drm_connector *connector;
bool dpd_is_edp = false;
intel_lvds_init(dev);
@@ -12513,10 +13041,15 @@ static void intel_setup_outputs(struct drm_device *dev)
if (HAS_DDI(dev)) {
int found;
- /* Haswell uses DDI functions to detect digital outputs */
+ /*
+ * Haswell uses DDI functions to detect digital outputs.
+ * On SKL pre-D0 the strap isn't connected, so we assume
+ * it's there.
+ */
found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
- /* DDI A only supports eDP */
- if (found)
+ /* WaIgnoreDDIAStrap: skl */
+ if (found ||
+ (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -12633,37 +13166,6 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- /*
- * FIXME: We don't have full atomic support yet, but we want to be
- * able to enable/test plane updates via the atomic interface in the
- * meantime. However as soon as we flip DRIVER_ATOMIC on, the DRM core
- * will take some atomic codepaths to lookup properties during
- * drmModeGetConnector() that unconditionally dereference
- * connector->state.
- *
- * We create a dummy connector state here for each connector to ensure
- * the DRM core doesn't try to dereference a NULL connector->state.
- * The actual connector properties will never be updated or contain
- * useful information, but since we're doing this specifically for
- * testing/debug of the plane operations (and only when a specific
- * kernel module option is given), that shouldn't really matter.
- *
- * Once atomic support for crtc's + connectors lands, this loop should
- * be removed since we'll be setting up real connector state, which
- * will contain Intel-specific properties.
- */
- if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- if (!WARN_ON(connector->state)) {
- connector->state =
- kzalloc(sizeof(*connector->state),
- GFP_KERNEL);
- }
- }
- }
-
intel_psr_init(dev);
for_each_intel_encoder(dev, encoder) {
@@ -12705,52 +13207,100 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
+static
+u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
+ uint32_t pixel_format)
+{
+ u32 gen = INTEL_INFO(dev)->gen;
+
+ if (gen >= 9) {
+ /* "The stride in bytes must not exceed the of the size of 8K
+ * pixels and 32K bytes."
+ */
+ return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
+ } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
+ return 32*1024;
+ } else if (gen >= 4) {
+ if (fb_modifier == I915_FORMAT_MOD_X_TILED)
+ return 16*1024;
+ else
+ return 32*1024;
+ } else if (gen >= 3) {
+ if (fb_modifier == I915_FORMAT_MOD_X_TILED)
+ return 8*1024;
+ else
+ return 16*1024;
+ } else {
+ /* XXX DSPC is limited to 4k tiled */
+ return 8*1024;
+ }
+}
+
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
- int aligned_height;
- int pitch_limit;
+ unsigned int aligned_height;
int ret;
+ u32 pitch_limit, stride_alignment;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (obj->tiling_mode == I915_TILING_Y) {
- DRM_DEBUG("hardware does not support tiling Y\n");
- return -EINVAL;
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /* Enforce that fb modifier and tiling mode match, but only for
+ * X-tiled. This is needed for FBC. */
+ if (!!(obj->tiling_mode == I915_TILING_X) !=
+ !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
+ DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
+ return -EINVAL;
+ }
+ } else {
+ if (obj->tiling_mode == I915_TILING_X)
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ else if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("No Y tiling for legacy addfb\n");
+ return -EINVAL;
+ }
}
- if (mode_cmd->pitches[0] & 63) {
- DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
- mode_cmd->pitches[0]);
+ /* Passed in modifier sanity checking. */
+ switch (mode_cmd->modifier[0]) {
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (INTEL_INFO(dev)->gen < 9) {
+ DRM_DEBUG("Unsupported tiling 0x%llx!\n",
+ mode_cmd->modifier[0]);
+ return -EINVAL;
+ }
+ case DRM_FORMAT_MOD_NONE:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
+ mode_cmd->modifier[0]);
return -EINVAL;
}
- if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
- pitch_limit = 32*1024;
- } else if (INTEL_INFO(dev)->gen >= 4) {
- if (obj->tiling_mode)
- pitch_limit = 16*1024;
- else
- pitch_limit = 32*1024;
- } else if (INTEL_INFO(dev)->gen >= 3) {
- if (obj->tiling_mode)
- pitch_limit = 8*1024;
- else
- pitch_limit = 16*1024;
- } else
- /* XXX DSPC is limited to 4k tiled */
- pitch_limit = 8*1024;
+ stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
+ mode_cmd->pixel_format);
+ if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
+ DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
+ mode_cmd->pitches[0], stride_alignment);
+ return -EINVAL;
+ }
+ pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
+ mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
- DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
- obj->tiling_mode ? "tiled" : "linear",
+ DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
+ "tiled" : "linear",
mode_cmd->pitches[0], pitch_limit);
return -EINVAL;
}
- if (obj->tiling_mode != I915_TILING_NONE &&
+ if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
mode_cmd->pitches[0] != obj->stride) {
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
mode_cmd->pitches[0], obj->stride);
@@ -12805,7 +13355,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
return -EINVAL;
aligned_height = intel_fb_align_height(dev, mode_cmd->height,
- obj->tiling_mode);
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
/* FIXME drm helper for size checks (especially planar formats)? */
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
return -EINVAL;
@@ -12958,8 +13509,6 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- dev_priv->display.modeset_global_resources =
- ivb_modeset_global_resources;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
} else if (IS_VALLEYVIEW(dev)) {
@@ -12967,9 +13516,6 @@ static void intel_init_display(struct drm_device *dev)
valleyview_modeset_global_resources;
}
- /* Default just returns -ENODEV to indicate unsupported */
- dev_priv->display.queue_flip = intel_default_queue_flip;
-
switch (INTEL_INFO(dev)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
@@ -12992,8 +13538,10 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
case 9:
- dev_priv->display.queue_flip = intel_gen9_queue_flip;
- break;
+ /* Drop through - unsupported since execlist only. */
+ default:
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
}
intel_panel_init_backlight_funcs(dev);
@@ -13212,6 +13760,8 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
+ dev->mode_config.allow_fb_modifiers = true;
+
dev->mode_config.funcs = &intel_mode_funcs;
intel_init_quirks(dev);
@@ -13254,7 +13804,7 @@ void intel_modeset_init(struct drm_device *dev)
for_each_pipe(dev_priv, pipe) {
intel_crtc_init(dev, pipe);
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
ret = intel_plane_init(dev, pipe, sprite);
if (ret)
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
@@ -13295,7 +13845,7 @@ void intel_modeset_init(struct drm_device *dev)
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
- intel_find_plane_obj(crtc, &crtc->plane_config);
+ intel_find_initial_plane_obj(crtc, &crtc->plane_config);
}
}
}
@@ -13310,9 +13860,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
* by enabling the load detect pipe once. */
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
crt = &connector->base;
break;
@@ -13323,7 +13871,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
return;
if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
- intel_release_load_detect_pipe(crt, &load_detect_temp);
+ intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
}
static bool
@@ -13357,11 +13905,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* restore vblank interrupts to correct state */
+ drm_crtc_vblank_reset(&crtc->base);
if (crtc->active) {
update_scanline_offset(crtc);
- drm_vblank_on(dev, crtc->pipe);
- } else
- drm_vblank_off(dev, crtc->pipe);
+ drm_crtc_vblank_on(&crtc->base);
+ }
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
@@ -13383,8 +13931,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
crtc->plane = plane;
/* ... and break all links. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder->base.crtc != &crtc->base)
continue;
@@ -13393,14 +13940,14 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
}
/* multiple connectors may have the same encoder:
* handle them and break crtc link separately */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head)
+ for_each_intel_connector(dev, connector)
if (connector->encoder->base.crtc == &crtc->base) {
connector->encoder->base.crtc = NULL;
connector->encoder->connectors_active = false;
}
WARN_ON(crtc->active);
+ crtc->base.state->enable = false;
crtc->base.enabled = false;
}
@@ -13417,7 +13964,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* have active connectors/encoders. */
intel_crtc_update_dpms(&crtc->base);
- if (crtc->active != crtc->base.enabled) {
+ if (crtc->active != crtc->base.state->enable) {
struct intel_encoder *encoder;
/* This can happen either due to bugs in the get_hw_state
@@ -13425,9 +13972,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* pipe A quirk. */
DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
crtc->base.base.id,
- crtc->base.enabled ? "enabled" : "disabled",
+ crtc->base.state->enable ? "enabled" : "disabled",
crtc->active ? "enabled" : "disabled");
+ crtc->base.state->enable = crtc->active;
crtc->base.enabled = crtc->active;
/* Because we only establish the connector -> encoder ->
@@ -13496,9 +14044,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* a bug in one of the get_hw_state functions. Or someplace else
* in our code, like the register restore mess on resume. Clamp
* things to off as a safer default. */
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder != encoder)
continue;
connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -13564,6 +14110,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active = dev_priv->display.get_pipe_config(crtc,
crtc->config);
+ crtc->base.state->enable = crtc->active;
crtc->base.enabled = crtc->active;
crtc->primary_enabled = primary_get_hw_state(crtc);
@@ -13612,8 +14159,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe_name(pipe));
}
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
connector->encoder->connectors_active = true;
@@ -13669,6 +14215,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
"[setup_hw_state]");
}
+ intel_modeset_update_connector_atomic_state(dev);
+
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
@@ -13697,8 +14245,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_crtc *crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
- crtc->primary->fb);
+ intel_crtc_restore_mode(crtc);
}
} else {
intel_modeset_update_staged_output_state(dev);
@@ -13712,6 +14259,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
+ int ret;
mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
@@ -13736,15 +14284,18 @@ void intel_modeset_gem_init(struct drm_device *dev)
* pinned & fenced. When we do the allocation it's too early
* for this.
*/
- mutex_lock(&dev->struct_mutex);
for_each_crtc(dev, c) {
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue;
- if (intel_pin_and_fence_fb_obj(c->primary,
- c->primary->fb,
- NULL)) {
+ mutex_lock(&dev->struct_mutex);
+ ret = intel_pin_and_fence_fb_obj(c->primary,
+ c->primary->fb,
+ c->primary->state,
+ NULL);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
@@ -13752,7 +14303,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
update_state_fb(c->primary);
}
}
- mutex_unlock(&dev->struct_mutex);
intel_backlight_register(dev);
}
@@ -13793,8 +14343,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_fbc_disable(dev);
- ironlake_teardown_rc6(dev);
-
mutex_unlock(&dev->struct_mutex);
/* flush any delayed tasks or pending work */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a74aaf9242b9..d0237102c27e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -84,6 +84,13 @@ static const struct dp_link_dpll chv_dpll[] = {
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
+/* Skylake supports following rates */
+static const int gen9_rates[] = { 162000, 216000, 270000,
+ 324000, 432000, 540000 };
+static const int chv_rates[] = { 162000, 202500, 210000, 216000,
+ 243000, 270000, 324000, 405000,
+ 420000, 432000, 540000 };
+static const int default_rates[] = { 162000, 270000, 540000 };
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
@@ -118,23 +125,15 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
-int
-intel_dp_max_link_bw(struct intel_dp *intel_dp)
+static int
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
- struct drm_device *dev = intel_dp->attached_connector->base.dev;
switch (max_link_bw) {
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
- break;
- case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
- if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
- INTEL_INFO(dev)->gen >= 8) &&
- intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
- max_link_bw = DP_LINK_BW_5_4;
- else
- max_link_bw = DP_LINK_BW_2_7;
+ case DP_LINK_BW_5_4:
break;
default:
WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@ -210,7 +209,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
target_clock = fixed_mode->clock;
}
- max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
+ max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
@@ -240,7 +239,7 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
return v;
}
-void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
@@ -943,8 +942,9 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
size_t txsize, rxsize;
int ret;
- txbuf[0] = msg->request << 4;
- txbuf[1] = msg->address >> 8;
+ txbuf[0] = (msg->request << 4) |
+ ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
txbuf[2] = msg->address & 0xff;
txbuf[3] = msg->size - 1;
@@ -952,7 +952,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
- rxsize = 1;
+ rxsize = 2; /* 0 or 1 data bytes */
if (WARN_ON(txsize > 20))
return -E2BIG;
@@ -963,8 +963,13 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
- /* Return payload size. */
- ret = msg->size;
+ if (ret > 1) {
+ /* Number of bytes written in a short write. */
+ ret = clamp_t(int, rxbuf[1], 0, msg->size);
+ } else {
+ /* Return payload size. */
+ ret = msg->size;
+ }
}
break;
@@ -1075,7 +1080,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
{
u32 ctrl1;
@@ -1084,19 +1089,35 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
pipe_config->dpll_hw_state.cfgcr2 = 0;
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- switch (link_bw) {
- case DP_LINK_BW_1_62:
+ switch (link_clock / 2) {
+ case 81000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
SKL_DPLL0);
break;
- case DP_LINK_BW_2_7:
+ case 135000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
SKL_DPLL0);
break;
- case DP_LINK_BW_5_4:
+ case 270000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
SKL_DPLL0);
break;
+ case 162000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
+ SKL_DPLL0);
+ break;
+ /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
+ results in CDCLK change. Need to handle the change of CDCLK by
+ disabling pipes and re-enabling them */
+ case 108000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
+ SKL_DPLL0);
+ break;
+ case 216000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
+ SKL_DPLL0);
+ break;
+
}
pipe_config->dpll_hw_state.ctrl1 = ctrl1;
}
@@ -1117,6 +1138,42 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
}
}
+static int
+intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
+{
+ if (intel_dp->num_sink_rates) {
+ *sink_rates = intel_dp->sink_rates;
+ return intel_dp->num_sink_rates;
+ }
+
+ *sink_rates = default_rates;
+
+ return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+}
+
+static int
+intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
+{
+ if (INTEL_INFO(dev)->gen >= 9) {
+ *source_rates = gen9_rates;
+ return ARRAY_SIZE(gen9_rates);
+ } else if (IS_CHERRYVIEW(dev)) {
+ *source_rates = chv_rates;
+ return ARRAY_SIZE(chv_rates);
+ }
+
+ *source_rates = default_rates;
+
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ /* WaDisableHBR2:skl */
+ return (DP_LINK_BW_2_7 >> 3) + 1;
+ else if (INTEL_INFO(dev)->gen >= 8 ||
+ (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+ return (DP_LINK_BW_5_4 >> 3) + 1;
+ else
+ return (DP_LINK_BW_2_7 >> 3) + 1;
+}
+
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config, int link_bw)
@@ -1150,6 +1207,113 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
+static int intersect_rates(const int *source_rates, int source_len,
+ const int *sink_rates, int sink_len,
+ int *common_rates)
+{
+ int i = 0, j = 0, k = 0;
+
+ while (i < source_len && j < sink_len) {
+ if (source_rates[i] == sink_rates[j]) {
+ if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+ return k;
+ common_rates[k] = source_rates[i];
+ ++k;
+ ++i;
+ ++j;
+ } else if (source_rates[i] < sink_rates[j]) {
+ ++i;
+ } else {
+ ++j;
+ }
+ }
+ return k;
+}
+
+static int intel_dp_common_rates(struct intel_dp *intel_dp,
+ int *common_rates)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len;
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ source_len = intel_dp_source_rates(dev, &source_rates);
+
+ return intersect_rates(source_rates, source_len,
+ sink_rates, sink_len,
+ common_rates);
+}
+
+static void snprintf_int_array(char *str, size_t len,
+ const int *array, int nelem)
+{
+ int i;
+
+ str[0] = '\0';
+
+ for (i = 0; i < nelem; i++) {
+ int r = snprintf(str, len, "%d,", array[i]);
+ if (r >= len)
+ return;
+ str += r;
+ len -= r;
+ }
+}
+
+static void intel_dp_print_rates(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len, common_len;
+ int common_rates[DP_MAX_SUPPORTED_RATES];
+ char str[128]; /* FIXME: too big for stack? */
+
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ source_len = intel_dp_source_rates(dev, &source_rates);
+ snprintf_int_array(str, sizeof(str), source_rates, source_len);
+ DRM_DEBUG_KMS("source rates: %s\n", str);
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
+ DRM_DEBUG_KMS("sink rates: %s\n", str);
+
+ common_len = intel_dp_common_rates(intel_dp, common_rates);
+ snprintf_int_array(str, sizeof(str), common_rates, common_len);
+ DRM_DEBUG_KMS("common rates: %s\n", str);
+}
+
+static int rate_to_index(int find, const int *rates)
+{
+ int i = 0;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
+ if (find == rates[i])
+ break;
+
+ return i;
+}
+
+int
+intel_dp_max_link_rate(struct intel_dp *intel_dp)
+{
+ int rates[DP_MAX_SUPPORTED_RATES] = {};
+ int len;
+
+ len = intel_dp_common_rates(intel_dp, rates);
+ if (WARN_ON(len <= 0))
+ return 162000;
+
+ return rates[rate_to_index(0, rates) - 1];
+}
+
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
+{
+ return rate_to_index(rate, intel_dp->sink_rates);
+}
+
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -1159,17 +1323,25 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *intel_crtc = encoder->new_crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int min_lane_count = 1;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
- int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
+ int max_clock;
int bpp, mode_rate;
- static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
int link_avail, link_clock;
+ int common_rates[DP_MAX_SUPPORTED_RATES] = {};
+ int common_len;
+
+ common_len = intel_dp_common_rates(intel_dp, common_rates);
+
+ /* No common link rates between source and sink */
+ WARN_ON(common_len <= 0);
+
+ max_clock = common_len - 1;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -1193,8 +1365,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
return false;
DRM_DEBUG_KMS("DP link computation with max lane count %i "
- "max bw %02x pixel clock %iKHz\n",
- max_lane_count, bws[max_clock],
+ "max bw %d pixel clock %iKHz\n",
+ max_lane_count, common_rates[max_clock],
adjusted_mode->crtc_clock);
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
@@ -1223,8 +1395,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
bpp);
for (clock = min_clock; clock <= max_clock; clock++) {
- for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
- link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
+ for (lane_count = min_lane_count;
+ lane_count <= max_lane_count;
+ lane_count <<= 1) {
+
+ link_clock = common_rates[clock];
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -1253,10 +1428,20 @@ found:
if (intel_dp->color_range)
pipe_config->limited_color_range = true;
- intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
+
+ if (intel_dp->num_sink_rates) {
+ intel_dp->link_bw = 0;
+ intel_dp->rate_select =
+ intel_dp_rate_select(intel_dp, common_rates[clock]);
+ } else {
+ intel_dp->link_bw =
+ drm_dp_link_rate_to_bw_code(common_rates[clock]);
+ intel_dp->rate_select = 0;
+ }
+
pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ pipe_config->port_clock = common_rates[clock];
DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
@@ -1279,7 +1464,7 @@ found:
}
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
- skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
+ skl_edp_set_pll_config(pipe_config, common_rates[clock]);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
@@ -2557,11 +2742,6 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
/* Program Tx lane latency optimal setting*/
for (i = 0; i < 4; i++) {
- /* Set the latency optimal bit */
- data = (i == 1) ? 0x0 : 0x6;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
- data << DPIO_FRC_LATENCY_SHFIT);
-
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
@@ -2691,11 +2871,14 @@ static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- else if (IS_VALLEYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2719,6 +2902,8 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
@@ -3201,6 +3386,9 @@ intel_hsw_signal_levels(uint8_t train_set)
return DDI_BUF_TRANS_SELECT(7);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return DDI_BUF_TRANS_SELECT(8);
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return DDI_BUF_TRANS_SELECT(9);
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
@@ -3358,6 +3546,9 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+ if (intel_dp->num_sink_rates)
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
+ &intel_dp->rate_select, 1);
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
@@ -3570,6 +3761,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint8_t rev;
if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
@@ -3601,6 +3793,32 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
} else
intel_dp->use_tps3 = false;
+ /* Intermediate frequency support */
+ if (is_edp(intel_dp) &&
+ (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+ (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
+ (rev >= 0x03)) { /* eDp v1.4 or higher */
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+ int i;
+
+ intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_SUPPORTED_LINK_RATES,
+ sink_rates,
+ sizeof(sink_rates));
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+ int val = le16_to_cpu(sink_rates[i]);
+
+ if (val == 0)
+ break;
+
+ intel_dp->sink_rates[i] = val * 200;
+ }
+ intel_dp->num_sink_rates = i;
+ }
+
+ intel_dp_print_rates(intel_dp);
+
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -3803,7 +4021,7 @@ go_again:
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
-void
+static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4390,6 +4608,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -4736,6 +4955,18 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_READ(pp_div_reg));
}
+/**
+ * intel_dp_set_drrs_state - program registers for RR switch to take effect
+ * @dev: DRM device
+ * @refresh_rate: RR to be programmed
+ *
+ * This function gets called when refresh rate (RR) has to be changed from
+ * one frequency to another. Switches can be between high and low RR
+ * supported by the panel or to any other RR based on media playback (in
+ * this case, RR value needs to be passed from user space).
+ *
+ * The caller of this function needs to take a lock on dev_priv->drrs.
+ */
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4764,7 +4995,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
- intel_crtc = encoder->new_crtc;
+ intel_crtc = to_intel_crtc(encoder->base.crtc);
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
@@ -4793,14 +5024,32 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
- if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
+ if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
+ switch (index) {
+ case DRRS_HIGH_RR:
+ intel_dp_set_m_n(intel_crtc, M1_N1);
+ break;
+ case DRRS_LOW_RR:
+ intel_dp_set_m_n(intel_crtc, M2_N2);
+ break;
+ case DRRS_MAX_RR:
+ default:
+ DRM_ERROR("Unsupported refreshrate type\n");
+ }
+ } else if (INTEL_INFO(dev)->gen > 6) {
reg = PIPECONF(intel_crtc->config->cpu_transcoder);
val = I915_READ(reg);
+
if (index > DRRS_HIGH_RR) {
- val |= PIPECONF_EDP_RR_MODE_SWITCH;
- intel_dp_set_m_n(intel_crtc);
+ if (IS_VALLEYVIEW(dev))
+ val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ else
+ val |= PIPECONF_EDP_RR_MODE_SWITCH;
} else {
- val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
+ if (IS_VALLEYVIEW(dev))
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ else
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
I915_WRITE(reg, val);
}
@@ -4810,6 +5059,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
}
+/**
+ * intel_edp_drrs_enable - init drrs struct if supported
+ * @intel_dp: DP struct
+ *
+ * Initializes frontbuffer_bits and drrs.dp
+ */
void intel_edp_drrs_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4837,6 +5092,11 @@ unlock:
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * intel_edp_drrs_disable - Disable DRRS
+ * @intel_dp: DP struct
+ *
+ */
void intel_edp_drrs_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4892,10 +5152,20 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
downclock_mode->vrefresh);
unlock:
-
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * intel_edp_drrs_invalidate - Invalidate DRRS
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * When there is a disturbance on screen (due to cursor movement/time
+ * update etc), DRRS needs to be invalidated, i.e. need to switch to
+ * high RR.
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
@@ -4903,15 +5173,21 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
struct drm_crtc *crtc;
enum pipe pipe;
- if (!dev_priv->drrs.dp)
+ if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
+ cancel_delayed_work(&dev_priv->drrs.work);
+
mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
- cancel_delayed_work_sync(&dev_priv->drrs.work);
intel_dp_set_drrs_state(dev_priv->dev,
dev_priv->drrs.dp->attached_connector->panel.
fixed_mode->vrefresh);
@@ -4923,6 +5199,17 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * intel_edp_drrs_flush - Flush DRRS
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * When there is no movement on screen, DRRS work can be scheduled.
+ * This DRRS work is responsible for setting relevant registers after a
+ * timeout of 1 second.
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
void intel_edp_drrs_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
@@ -4930,16 +5217,21 @@ void intel_edp_drrs_flush(struct drm_device *dev,
struct drm_crtc *crtc;
enum pipe pipe;
- if (!dev_priv->drrs.dp)
+ if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
+ cancel_delayed_work(&dev_priv->drrs.work);
+
mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
- cancel_delayed_work_sync(&dev_priv->drrs.work);
-
if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
!dev_priv->drrs.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->drrs.work,
@@ -4947,6 +5239,56 @@ void intel_edp_drrs_flush(struct drm_device *dev,
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * DOC: Display Refresh Rate Switching (DRRS)
+ *
+ * Display Refresh Rate Switching (DRRS) is a power conservation feature
+ * which enables swtching between low and high refresh rates,
+ * dynamically, based on the usage scenario. This feature is applicable
+ * for internal panels.
+ *
+ * Indication that the panel supports DRRS is given by the panel EDID, which
+ * would list multiple refresh rates for one resolution.
+ *
+ * DRRS is of 2 types - static and seamless.
+ * Static DRRS involves changing refresh rate (RR) by doing a full modeset
+ * (may appear as a blink on screen) and is used in dock-undock scenario.
+ * Seamless DRRS involves changing RR without any visual effect to the user
+ * and can be used during normal system usage. This is done by programming
+ * certain registers.
+ *
+ * Support for static/seamless DRRS may be indicated in the VBT based on
+ * inputs from the panel spec.
+ *
+ * DRRS saves power by switching to low RR based on usage scenarios.
+ *
+ * eDP DRRS:-
+ * The implementation is based on frontbuffer tracking implementation.
+ * When there is a disturbance on the screen triggered by user activity or a
+ * periodic system activity, DRRS is disabled (RR is changed to high RR).
+ * When there is no movement on screen, after a timeout of 1 second, a switch
+ * to low RR is made.
+ * For integration with frontbuffer tracking code,
+ * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
+ *
+ * DRRS can be further extended to support other internal panels and also
+ * the scenario of video playback wherein RR is set based on the rate
+ * requested by userspace.
+ */
+
+/**
+ * intel_dp_drrs_init - Init basic DRRS work and mutex.
+ * @intel_connector: eDP connector
+ * @fixed_mode: preferred mode of panel
+ *
+ * This function is called only once at driver load to initialize basic
+ * DRRS stuff.
+ *
+ * Returns:
+ * Downclock mode if panel supports it, else return NULL.
+ * DRRS support is determined by the presence of downclock mode (apart
+ * from VBT setting).
+ */
static struct drm_display_mode *
intel_dp_drrs_init(struct intel_connector *intel_connector,
struct drm_display_mode *fixed_mode)
@@ -4956,6 +5298,9 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *downclock_mode = NULL;
+ INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
+ mutex_init(&dev_priv->drrs.mutex);
+
if (INTEL_INFO(dev)->gen <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
@@ -4970,14 +5315,10 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
(dev, fixed_mode, connector);
if (!downclock_mode) {
- DRM_DEBUG_KMS("DRRS not supported\n");
+ DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
return NULL;
}
- INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
-
- mutex_init(&dev_priv->drrs.mutex);
-
dev_priv->drrs.type = dev_priv->vbt.drrs_type;
dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
@@ -5000,8 +5341,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
- dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
-
if (!is_edp(intel_dp))
return true;
@@ -5251,7 +5590,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
if (!intel_dig_port)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dig_port);
return;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9f67a379a9a5..5cb47482d29f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -36,11 +36,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = encoder->base.dev;
- int bpp;
- int lane_count, slots;
+ struct drm_atomic_state *state;
+ int bpp, i;
+ int lane_count, slots, rate;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_connector *found = NULL, *intel_connector;
+ struct intel_connector *found = NULL;
int mst_pbn;
pipe_config->dp_encoder_is_mst = true;
@@ -52,15 +52,30 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
* seem to suggest we should do otherwise.
*/
lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
- intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
+
+ rate = intel_dp_max_link_rate(intel_dp);
+
+ if (intel_dp->num_sink_rates) {
+ intel_dp->link_bw = 0;
+ intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate);
+ } else {
+ intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate);
+ intel_dp->rate_select = 0;
+ }
+
intel_dp->lane_count = lane_count;
pipe_config->pipe_bpp = 24;
- pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ pipe_config->port_clock = rate;
- list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
- if (intel_connector->new_encoder == encoder) {
- found = intel_connector;
+ state = pipe_config->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ if (state->connector_states[i]->best_encoder == &encoder->base) {
+ found = to_intel_connector(state->connectors[i]);
break;
}
}
@@ -140,7 +155,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
struct drm_crtc *crtc = encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, intel_connector) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
break;
@@ -317,6 +332,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_mst_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
@@ -399,7 +415,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
struct drm_connector *connector;
int i;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector)
return NULL;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index eef79ccd0b7c..897f17db08af 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -35,9 +35,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
-
-#define DIV_ROUND_CLOSEST_ULL(ll, d) \
-({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+#include <drm/drm_atomic.h>
/**
* _wait_for - magic (register) wait macro
@@ -56,8 +54,8 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && drm_can_sleep()) { \
- msleep(W); \
+ if ((W) && drm_can_sleep()) { \
+ usleep_range((W)*1000, (W)*2000); \
} else { \
cpu_relax(); \
} \
@@ -258,6 +256,7 @@ struct intel_plane_state {
};
struct intel_initial_plane_config {
+ struct intel_framebuffer *fb;
unsigned int tiling;
int size;
u32 base;
@@ -463,7 +462,6 @@ struct intel_crtc {
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
- int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_size;
uint32_t cursor_base;
@@ -500,16 +498,20 @@ struct intel_plane_wm_parameters {
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
+ u64 tiling;
+ unsigned int rotation;
};
struct intel_plane {
struct drm_plane base;
int plane;
enum pipe pipe;
- struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
+ /* FIXME convert to properties */
+ struct drm_intel_sprite_colorkey ckey;
+
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
* as the other pieces of the struct may not reflect the values we want
@@ -526,7 +528,6 @@ struct intel_plane {
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -537,10 +538,6 @@ struct intel_plane {
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
- int (*update_colorkey)(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key);
- void (*get_colorkey)(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key);
};
struct intel_watermark_params {
@@ -563,6 +560,7 @@ struct cxsr_latency {
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
@@ -592,6 +590,26 @@ struct intel_hdmi {
struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
+/*
+ * enum link_m_n_set:
+ * When platform provides two set of M_N registers for dp, we can
+ * program them and switch between them incase of DRRS.
+ * But When only one such register is provided, we have to program the
+ * required divider value on that registers itself based on the DRRS state.
+ *
+ * M1_N1 : Program dp_m_n on M1_N1 registers
+ * dp_m2_n2 on M2_N2 registers (If supported)
+ *
+ * M2_N2 : Program dp_m2_n2 on M1_N1 registers
+ * M2_N2 registers are not supported
+ */
+
+enum link_m_n_set {
+ /* Sets the m1_n1 and m2_n2 */
+ M1_N1 = 0,
+ M2_N2
+};
+
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
@@ -601,10 +619,14 @@ struct intel_dp {
uint32_t color_range;
bool color_range_auto;
uint8_t link_bw;
+ uint8_t rate_select;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
+ uint8_t num_sink_rates;
+ int sink_rates[DP_MAX_SUPPORTED_RATES];
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
@@ -710,7 +732,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
struct intel_unpin_work {
struct work_struct work;
struct drm_crtc *crtc;
- struct drm_i915_gem_object *old_fb_obj;
+ struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
atomic_t pending;
@@ -817,7 +839,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
}
int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -852,7 +875,8 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
@@ -877,10 +901,14 @@ void intel_frontbuffer_flip(struct drm_device *dev,
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
-int intel_fb_align_height(struct drm_device *dev, int height,
- unsigned int tiling);
+unsigned int intel_fb_align_height(struct drm_device *dev,
+ unsigned int height,
+ uint32_t pixel_format,
+ uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
+u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
+ uint32_t pixel_format);
/* intel_audio.c */
void intel_init_audio(struct drm_device *dev);
@@ -899,6 +927,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
+int intel_connector_init(struct intel_connector *);
+struct intel_connector *intel_connector_alloc(void);
void intel_connector_dpms(struct drm_connector *, int mode);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_modeset_check_state(struct drm_device *dev);
@@ -928,11 +958,12 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old);
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined);
-void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -942,9 +973,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state);
int intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
@@ -954,6 +987,19 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val);
+unsigned int
+intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
+ uint64_t fb_format_modifier);
+
+static inline bool
+intel_rotation_90_or_270(unsigned int rotation)
+{
+ return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
+}
+
+bool intel_wm_need_update(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -993,7 +1039,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(struct intel_crtc *crtc);
+void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
@@ -1008,6 +1054,9 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
+unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
+ struct drm_i915_gem_object *obj);
+
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1017,7 +1066,6 @@ void intel_dp_complete_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-void intel_dp_check_link_status(struct intel_dp *intel_dp);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
@@ -1032,17 +1080,11 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
-int intel_dp_max_link_bw(struct intel_dp *intel_dp);
+int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
-void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
-int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-int intel_disable_plane(struct drm_plane *plane);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp);
void intel_edp_drrs_disable(struct intel_dp *intel_dp);
@@ -1097,7 +1139,11 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_fbc_update(struct drm_device *dev);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_disable(struct drm_device *dev);
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
@@ -1213,8 +1259,9 @@ void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
-void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_busy(struct drm_i915_private *dev_priv);
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -1231,14 +1278,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane);
-int intel_plane_set_property(struct drm_plane *plane,
- struct drm_property *prop,
- uint64_t val);
int intel_plane_restore(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
@@ -1261,6 +1303,17 @@ int intel_connector_atomic_get_property(struct drm_connector *connector,
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+static inline struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
+ if (IS_ERR(crtc_state))
+ return ERR_PTR(PTR_ERR(crtc_state));
+
+ return to_intel_crtc_state(crtc_state);
+}
/* intel_atomic_plane.c */
struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 10ab68457ca8..51966426addf 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -854,7 +854,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
/* recovery disables */
- I915_WRITE(MIPI_EOT_DISABLE(port), val);
+ I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
@@ -975,6 +975,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
void intel_dsi_init(struct drm_device *dev)
@@ -1006,7 +1007,7 @@ void intel_dsi_init(struct drm_device *dev)
if (!intel_dsi)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dsi);
return;
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
deleted file mode 100644
index 886779030f1a..000000000000
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright © 2013 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Jani Nikula <jani.nikula@intel.com>
- */
-
-#ifndef _INTEL_DSI_DSI_H
-#define _INTEL_DSI_DSI_H
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <video/mipi_display.h>
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_dsi.h"
-
-void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
- enum port port);
-
-#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index d8579510beb0..770040ff486e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -393,6 +393,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
@@ -468,7 +469,7 @@ void intel_dvo_init(struct drm_device *dev)
if (!intel_dvo)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dvo);
return;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 624d1d92d284..4165ce0644f7 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -78,7 +78,8 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
dev_priv->fbc.enabled = true;
- cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
+ /* Note: fbc.threshold == 1 for i8xx */
+ cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -173,29 +174,10 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void snb_fbc_blit_update(struct drm_device *dev)
+static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
-
- /* Blitter is part of Media powerwell on VLV. No impact of
- * his param in other platforms for now */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
-
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+ I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
+ POSTING_READ(MSG_FBC_REND_STATE);
}
static void ilk_fbc_enable(struct drm_crtc *crtc)
@@ -238,9 +220,10 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- snb_fbc_blit_update(dev);
}
+ intel_fbc_nuke(dev_priv);
+
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
@@ -319,7 +302,7 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- snb_fbc_blit_update(dev);
+ intel_fbc_nuke(dev_priv);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
@@ -339,19 +322,6 @@ bool intel_fbc_enabled(struct drm_device *dev)
return dev_priv->fbc.enabled;
}
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!IS_GEN8(dev))
- return;
-
- if (!intel_fbc_enabled(dev))
- return;
-
- I915_WRITE(MSG_FBC_REND_STATE, value);
-}
-
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
@@ -368,7 +338,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
if (work->crtc->primary->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc);
- dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
dev_priv->fbc.y = work->crtc->y;
}
@@ -459,7 +429,7 @@ void intel_fbc_disable(struct drm_device *dev)
return;
dev_priv->display.disable_fbc(dev);
- dev_priv->fbc.plane = -1;
+ dev_priv->fbc.crtc = NULL;
}
static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
@@ -472,6 +442,43 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
return true;
}
+static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
+{
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ enum pipe pipe;
+ bool pipe_a_only = false, one_pipe_only = false;
+
+ if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
+ pipe_a_only = true;
+ else if (INTEL_INFO(dev_priv)->gen <= 4)
+ one_pipe_only = true;
+
+ for_each_pipe(dev_priv, pipe) {
+ tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ if (intel_crtc_active(tmp_crtc) &&
+ to_intel_crtc(tmp_crtc)->primary_enabled) {
+ if (one_pipe_only && crtc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ return NULL;
+ }
+ crtc = tmp_crtc;
+ }
+
+ if (pipe_a_only)
+ break;
+ }
+
+ if (!crtc || crtc->primary->fb == NULL) {
+ if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
+ DRM_DEBUG_KMS("no output, disabling\n");
+ return NULL;
+ }
+
+ return crtc;
+}
+
/**
* intel_fbc_update - enable/disable FBC as needed
* @dev: the drm_device
@@ -494,22 +501,30 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
void intel_fbc_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct drm_crtc *crtc = NULL;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
- if (!HAS_FBC(dev)) {
- set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
+ if (!HAS_FBC(dev))
return;
+
+ /* disable framebuffer compression in vGPU */
+ if (intel_vgpu_active(dev))
+ i915.enable_fbc = 0;
+
+ if (i915.enable_fbc < 0) {
+ if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ goto out_disable;
}
- if (!i915.powersave) {
+ if (!i915.enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
- return;
+ goto out_disable;
}
/*
@@ -521,39 +536,15 @@ void intel_fbc_update(struct drm_device *dev)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
- for_each_crtc(dev, tmp_crtc) {
- if (intel_crtc_active(tmp_crtc) &&
- to_intel_crtc(tmp_crtc)->primary_enabled) {
- if (crtc) {
- if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- goto out_disable;
- }
- crtc = tmp_crtc;
- }
- }
-
- if (!crtc || crtc->primary->fb == NULL) {
- if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
- DRM_DEBUG_KMS("no output, disabling\n");
+ crtc = intel_fbc_find_crtc(dev_priv);
+ if (!crtc)
goto out_disable;
- }
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config->base.adjusted_mode;
- if (i915.enable_fbc < 0) {
- if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
- DRM_DEBUG_KMS("disabled per chip default\n");
- goto out_disable;
- }
- if (!i915.enable_fbc) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- goto out_disable;
- }
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
@@ -617,7 +608,7 @@ void intel_fbc_update(struct drm_device *dev)
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
- if (dev_priv->fbc.plane == intel_crtc->plane &&
+ if (dev_priv->fbc.crtc == intel_crtc &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->y)
return;
@@ -663,6 +654,44 @@ out_disable:
i915_gem_stolen_cleanup_compression(dev);
}
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
+{
+ struct drm_device *dev = dev_priv->dev;
+ unsigned int fbc_bits;
+
+ if (origin == ORIGIN_GTT)
+ return;
+
+ if (dev_priv->fbc.enabled)
+ fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
+ else if (dev_priv->fbc.fbc_work)
+ fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
+ to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
+ else
+ fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
+
+ dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
+
+ if (dev_priv->fbc.busy_bits)
+ intel_fbc_disable(dev);
+}
+
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (!dev_priv->fbc.busy_bits)
+ return;
+
+ dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
+
+ if (!dev_priv->fbc.busy_bits)
+ intel_fbc_update(dev);
+}
+
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
@@ -671,11 +700,22 @@ out_disable:
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
+ enum pipe pipe;
+
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
+ dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
return;
}
+ for_each_pipe(dev_priv, pipe) {
+ dev_priv->fbc.possible_framebuffer_bits |=
+ INTEL_FRONTBUFFER_PRIMARY(pipe);
+
+ if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
+ break;
+ }
+
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->display.fbc_enabled = ilk_fbc_enabled;
dev_priv->display.enable_fbc = gen7_fbc_enable;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 3001a8674611..4e7e7da2e03b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -71,6 +71,31 @@ static int intel_fbdev_set_par(struct fb_info *info)
return ret;
}
+static int intel_fbdev_blank(int blank, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_blank(blank, info);
+
+ if (ret == 0) {
+ /*
+ * FIXME: fbdev presumes that all callbacks also work from
+ * atomic contexts and relies on that for emergency oops
+ * printing. KMS totally doesn't do that and the locking here is
+ * by far not the only place this goes wrong. Ignore this for
+ * now until we solve this for real.
+ */
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
+
+ return ret;
+}
+
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -79,7 +104,7 @@ static struct fb_ops intelfb_ops = {
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
+ .fb_blank = intel_fbdev_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
@@ -126,7 +151,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
}
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
+ ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_fb;
@@ -594,7 +619,8 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(dev, cur_size,
- plane_config->tiling);
+ fb->base.pixel_format,
+ fb->base.modifier[0]);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 73cb6e036445..a20cffb78c0f 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -110,16 +110,11 @@ static void intel_mark_fb_busy(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
- if (!i915.powersave)
- return;
-
for_each_pipe(dev_priv, pipe) {
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
intel_increase_pllclock(dev, pipe);
- if (ring && intel_fbc_enabled(dev))
- ring->fbc_dirty = true;
}
}
@@ -127,6 +122,7 @@ static void intel_mark_fb_busy(struct drm_device *dev,
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @ring: set for asynchronous rendering
+ * @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
@@ -135,7 +131,8 @@ static void intel_mark_fb_busy(struct drm_device *dev,
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -158,6 +155,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
intel_psr_invalidate(dev, obj->frontbuffer_bits);
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
+ intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
}
/**
@@ -185,16 +183,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
intel_edp_drrs_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits);
-
- /*
- * FIXME: Unconditional fbc flushing here is a rather gross hack and
- * needs to be reworked into a proper frontbuffer tracking scheme like
- * psr employs.
- */
- if (dev_priv->fbc.need_sw_cache_clean) {
- dev_priv->fbc.need_sw_cache_clean = false;
- bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
- }
+ intel_fbc_flush(dev_priv, frontbuffer_bits);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 995c5b261f4f..bfbe07b6ddce 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -951,19 +951,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
+static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_atomic_state *state;
struct intel_encoder *encoder;
+ struct drm_connector_state *connector_state;
int count = 0, count_hdmi = 0;
+ int i;
if (HAS_GMCH_DISPLAY(dev))
return false;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != crtc)
+ state = crtc_state->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
count++;
}
@@ -1020,7 +1031,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
clock_12bpc <= portclock_limit &&
- hdmi_12bpc_possible(encoder->new_crtc)) {
+ hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1504,11 +1515,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
/* Program Tx latency optimal setting */
for (i = 0; i < 4; i++) {
- /* Set the latency optimal bit */
- data = (i == 1) ? 0x0 : 0x6;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
- data << DPIO_FRC_LATENCY_SHFIT);
-
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
@@ -1618,6 +1624,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
@@ -1743,7 +1750,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
if (!intel_dig_port)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dig_port);
return;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e8d3da9f3373..fcb074bd55dc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -254,8 +254,10 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
return lrca >> 12;
}
-static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
+static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *ctx_obj)
{
+ struct drm_device *dev = ring->dev;
uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
@@ -272,6 +274,13 @@ static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
* signalling between Command Streamers */
/* desc |= GEN8_CTX_FORCE_RESTORE; */
+ /* WaEnableForceRestoreInCtxtDescForVCS:skl */
+ if (IS_GEN9(dev) &&
+ INTEL_REVID(dev) <= SKL_REVID_B0 &&
+ (ring->id == BCS || ring->id == VCS ||
+ ring->id == VECS || ring->id == VCS2))
+ desc |= GEN8_CTX_FORCE_RESTORE;
+
return desc;
}
@@ -286,13 +295,13 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
/* XXX: You must always write both descriptors in the order below. */
if (ctx_obj1)
- temp = execlists_ctx_descriptor(ctx_obj1);
+ temp = execlists_ctx_descriptor(ring, ctx_obj1);
else
temp = 0;
desc[1] = (u32)(temp >> 32);
desc[0] = (u32)temp;
- temp = execlists_ctx_descriptor(ctx_obj0);
+ temp = execlists_ctx_descriptor(ring, ctx_obj0);
desc[3] = (u32)(temp >> 32);
desc[2] = (u32)temp;
@@ -612,7 +621,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
* @vmas: list of vmas.
* @batch_obj: the batchbuffer to submit.
* @exec_start: batchbuffer start virtual address pointer.
- * @flags: translated execbuffer call flags.
+ * @dispatch_flags: translated execbuffer call flags.
*
* This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
* away the submission details of the execbuffer ioctl call.
@@ -625,7 +634,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags)
+ u64 exec_start, u32 dispatch_flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
@@ -698,10 +707,12 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode;
}
- ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags);
+ ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
if (ret)
return ret;
+ trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -776,7 +787,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
-/**
+/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @ringbuf: Logical Ringbuffer to advance.
*
@@ -785,9 +796,10 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
-void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- struct drm_i915_gem_request *request)
+static void
+intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = ringbuf->ring;
@@ -876,12 +888,9 @@ static int logical_ring_alloc_request(struct intel_engine_cs *ring,
return ret;
}
- /* Hold a reference to the context this request belongs to
- * (we will need it when the time comes to emit/retire the
- * request).
- */
request->ctx = ctx;
i915_gem_context_reference(request->ctx);
+ request->ringbuf = ctx->engine[ring->id].ringbuf;
ring->outstanding_lazy_request = request;
return 0;
@@ -1140,11 +1149,22 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
return init_workarounds_ring(ring);
}
+static int gen9_init_render_ring(struct intel_engine_cs *ring)
+{
+ int ret;
+
+ ret = gen8_init_common_ring(ring);
+ if (ret)
+ return ret;
+
+ return init_workarounds_ring(ring);
+}
+
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
- u64 offset, unsigned flags)
+ u64 offset, unsigned dispatch_flags)
{
- bool ppgtt = !(flags & I915_DISPATCH_SECURE);
+ bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_logical_ring_begin(ringbuf, ctx, 4);
@@ -1316,6 +1336,39 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
return 0;
}
+static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct render_state so;
+ struct drm_i915_file_private *file_priv = ctx->file_priv;
+ struct drm_file *file = file_priv ? file_priv->file : NULL;
+ int ret;
+
+ ret = i915_gem_render_state_prepare(ring, &so);
+ if (ret)
+ return ret;
+
+ if (so.rodata == NULL)
+ return 0;
+
+ ret = ring->emit_bb_start(ringbuf,
+ ctx,
+ so.ggtt_offset,
+ I915_DISPATCH_SECURE);
+ if (ret)
+ goto out;
+
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+
+ ret = __i915_add_request(ring, file, so.obj);
+ /* intel_logical_ring_add_request moves object to inactive if it
+ * fails */
+out:
+ i915_gem_render_state_fini(&so);
+ return ret;
+}
+
static int gen8_init_rcs_context(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
@@ -1399,7 +1452,10 @@ static int logical_render_ring_init(struct drm_device *dev)
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- ring->init_hw = gen8_init_render_ring;
+ if (INTEL_INFO(dev)->gen >= 9)
+ ring->init_hw = gen9_init_render_ring;
+ else
+ ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
@@ -1581,37 +1637,47 @@ cleanup_render_ring:
return ret;
}
-int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static u32
+make_rpcs(struct drm_device *dev)
{
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
- struct render_state so;
- struct drm_i915_file_private *file_priv = ctx->file_priv;
- struct drm_file *file = file_priv ? file_priv->file : NULL;
- int ret;
-
- ret = i915_gem_render_state_prepare(ring, &so);
- if (ret)
- return ret;
+ u32 rpcs = 0;
- if (so.rodata == NULL)
+ /*
+ * No explicit RPCS request is needed to ensure full
+ * slice/subslice/EU enablement prior to Gen9.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
return 0;
- ret = ring->emit_bb_start(ringbuf,
- ctx,
- so.ggtt_offset,
- I915_DISPATCH_SECURE);
- if (ret)
- goto out;
+ /*
+ * Starting in Gen9, render power gating can leave
+ * slice/subslice/EU in a partially enabled state. We
+ * must make an explicit request through RPCS for full
+ * enablement.
+ */
+ if (INTEL_INFO(dev)->has_slice_pg) {
+ rpcs |= GEN8_RPCS_S_CNT_ENABLE;
+ rpcs |= INTEL_INFO(dev)->slice_total <<
+ GEN8_RPCS_S_CNT_SHIFT;
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+ if (INTEL_INFO(dev)->has_subslice_pg) {
+ rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
+ rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+ GEN8_RPCS_SS_CNT_SHIFT;
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
- ret = __i915_add_request(ring, file, so.obj);
- /* intel_logical_ring_add_request moves object to inactive if it
- * fails */
-out:
- i915_gem_render_state_fini(&so);
- return ret;
+ if (INTEL_INFO(dev)->has_eu_pg) {
+ rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ GEN8_RPCS_EU_MIN_SHIFT;
+ rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ GEN8_RPCS_EU_MAX_SHIFT;
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
+
+ return rpcs;
}
static int
@@ -1659,7 +1725,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
reg_state[CTX_CONTEXT_CONTROL+1] =
- _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
+ _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
@@ -1706,18 +1773,18 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
- reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
- reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
- reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
- reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
- reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
- reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
- reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
- reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
+ reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[3]->daddr);
+ reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[3]->daddr);
+ reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[2]->daddr);
+ reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[2]->daddr);
+ reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[1]->daddr);
+ reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[1]->daddr);
+ reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[0]->daddr);
+ reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[0]->daddr);
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
- reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
- reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
+ reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
+ reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev);
}
kunmap_atomic(reg_state);
@@ -1925,3 +1992,38 @@ error_unpin_ctx:
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
+
+void intel_lr_context_reset(struct drm_device *dev,
+ struct intel_context *ctx)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_object *ctx_obj =
+ ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf =
+ ctx->engine[ring->id].ringbuf;
+ uint32_t *reg_state;
+ struct page *page;
+
+ if (!ctx_obj)
+ continue;
+
+ if (i915_gem_object_get_pages(ctx_obj)) {
+ WARN(1, "Failed get_pages for context obj\n");
+ continue;
+ }
+ page = i915_gem_object_get_page(ctx_obj, 1);
+ reg_state = kmap_atomic(page);
+
+ reg_state[CTX_RING_HEAD+1] = 0;
+ reg_state[CTX_RING_TAIL+1] = 0;
+
+ kunmap_atomic(reg_state);
+
+ ringbuf->head = 0;
+ ringbuf->tail = 0;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 6f2d7da594f6..adb731e49c57 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -30,6 +30,8 @@
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
+#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
+#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
@@ -40,10 +42,6 @@ int intel_logical_rings_init(struct drm_device *dev);
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx);
-void intel_logical_ring_advance_and_submit(
- struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- struct drm_i915_gem_request *request);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
@@ -70,13 +68,13 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
int num_dwords);
/* Logical Ring Contexts */
-int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
- struct intel_context *ctx);
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct intel_engine_cs *ring,
struct intel_context *ctx);
+void intel_lr_context_reset(struct drm_device *dev,
+ struct intel_context *ctx);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -86,7 +84,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags);
+ u64 exec_start, u32 dispatch_flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 071b96d6e146..5abda1d2c018 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -286,7 +286,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
unsigned int lvds_bpp;
/* Should never happen!! */
@@ -509,7 +509,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
intel_connector->panel.fitting_mode = value;
crtc = intel_attached_encoder(connector)->base.crtc;
- if (crtc && crtc->enabled) {
+ if (crtc && crtc->state->enable) {
/*
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
@@ -535,6 +535,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -945,6 +946,12 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
+ if (intel_connector_init(&lvds_connector->base) < 0) {
+ kfree(lvds_connector);
+ kfree(lvds_encoder);
+ return;
+ }
+
lvds_encoder->attached_connector = lvds_connector;
intel_encoder = &lvds_encoder->base;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d8de1d5140a7..71e87abdcae7 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -744,10 +744,8 @@ void intel_opregion_init(struct drm_device *dev)
return;
if (opregion->acpi) {
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_didl_outputs(dev);
- intel_setup_cadls(dev);
- }
+ intel_didl_outputs(dev);
+ intel_setup_cadls(dev);
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index f93dfc174495..dd92122ed95c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -720,7 +720,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
+ &i915_ggtt_view_normal);
if (ret != 0)
return ret;
@@ -1065,7 +1066,6 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct put_image_params *params;
int ret;
- /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1261,7 +1261,6 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct overlay_registers __iomem *regs;
int ret;
- /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index d8686ce89160..08532d4ffe0a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 24d77ddcc5f4..fa4ccb346389 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -56,24 +56,42 @@ static void gen9_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * WaDisableSDEUnitClockGating:skl
- * This seems to be a pre-production w/a.
- */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
- GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+ /* WaEnableLbsSlaRetryTimerDecrement:skl */
+ I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+}
- /*
- * WaDisableDgMirrorFixInHalfSliceChicken5:skl
- * This is a pre-production w/a.
- */
- I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
- I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
- ~GEN9_DG_MIRROR_FIX_ENABLE);
+static void skl_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* Wa4x4STCOptimizationDisable:skl */
- I915_WRITE(CACHE_MODE_1,
- _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+ gen9_init_clock_gating(dev);
+
+ if (INTEL_REVID(dev) == SKL_REVID_A0) {
+ /*
+ * WaDisableSDEUnitClockGating:skl
+ * WaSetGAPSunitClckGateDisable:skl
+ */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+ }
+
+ if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+ /* WaDisableHDCInvalidation:skl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
+
+ /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
+ I915_WRITE(FF_SLICE_CS_CHICKEN2,
+ I915_READ(FF_SLICE_CS_CHICKEN2) |
+ GEN9_TSG_BARRIER_ACK_DISABLE);
+ }
+
+ if (INTEL_REVID(dev) <= SKL_REVID_E0)
+ /* WaDisableLSQCROPERFforOCL:skl */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_RO_PERF_DIS);
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -245,6 +263,47 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
return NULL;
}
+static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if (enable)
+ val &= ~FORCE_DDR_HIGH_FREQ;
+ else
+ val |= FORCE_DDR_HIGH_FREQ;
+ val &= ~FORCE_DDR_LOW_FREQ;
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+ DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ if (enable)
+ val |= DSP_MAXFIFO_PM5_ENABLE;
+ else
+ val &= ~DSP_MAXFIFO_PM5_ENABLE;
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+#define FW_WM(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
+
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
struct drm_device *dev = dev_priv->dev;
@@ -252,6 +311,8 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
if (IS_VALLEYVIEW(dev)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ if (IS_CHERRYVIEW(dev))
+ chv_set_memory_pm5(dev_priv, enable);
} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
} else if (IS_PINEVIEW(dev)) {
@@ -274,6 +335,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
enable ? "enabled" : "disabled");
}
+
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
@@ -290,6 +352,61 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
*/
static const int pessimal_latency_ns = 5000;
+#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
+ ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
+
+static int vlv_get_fifo_size(struct drm_device *dev,
+ enum pipe pipe, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int sprite0_start, sprite1_start, size;
+
+ switch (pipe) {
+ uint32_t dsparb, dsparb2, dsparb3;
+ case PIPE_A:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
+ break;
+ case PIPE_B:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
+ break;
+ case PIPE_C:
+ dsparb2 = I915_READ(DSPARB2);
+ dsparb3 = I915_READ(DSPARB3);
+ sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
+ sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
+ break;
+ default:
+ return 0;
+ }
+
+ switch (plane) {
+ case 0:
+ size = sprite0_start;
+ break;
+ case 1:
+ size = sprite1_start - sprite0_start;
+ break;
+ case 2:
+ size = 512 - 1 - sprite1_start;
+ break;
+ default:
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
+ pipe_name(pipe), plane == 0 ? "primary" : "sprite",
+ plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
+ size);
+
+ return size;
+}
+
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -535,7 +652,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
crtc = single_enabled_crtc(dev);
if (crtc) {
const struct drm_display_mode *adjusted_mode;
- int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
int clock;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
@@ -547,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
- reg |= wm << DSPFW_SR_SHIFT;
+ reg |= FW_WM(wm, SR);
I915_WRITE(DSPFW1, reg);
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
@@ -557,7 +674,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
- reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ reg |= FW_WM(wm, CURSOR_SR);
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
@@ -566,7 +683,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
- reg |= wm & DSPFW_HPLL_SR_MASK;
+ reg |= FW_WM(wm, HPLL_SR);
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
@@ -575,7 +692,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ reg |= FW_WM(wm, HPLL_CURSOR);
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
@@ -611,7 +728,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -626,7 +743,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
+ entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -698,7 +815,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -712,7 +829,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
+ entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
@@ -721,232 +838,234 @@ static bool g4x_compute_srwm(struct drm_device *dev,
display, cursor);
}
-static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
- int pixel_size,
- int *prec_mult,
- int *drain_latency)
-{
- struct drm_device *dev = crtc->dev;
- int entries;
- int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
+#define FW_WM_VLV(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
- if (WARN(clock == 0, "Pixel clock is zero!\n"))
- return false;
+static void vlv_write_wm_values(struct intel_crtc *crtc,
+ const struct vlv_wm_values *wm)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
- return false;
+ I915_WRITE(VLV_DDL(pipe),
+ (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
- entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- if (IS_CHERRYVIEW(dev))
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
- DRAIN_LATENCY_PRECISION_16;
- else
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
- DRAIN_LATENCY_PRECISION_32;
- *drain_latency = (64 * (*prec_mult) * 4) / entries;
+ I915_WRITE(DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
+ I915_WRITE(DSPFW2,
+ FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
+ I915_WRITE(DSPFW3,
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ I915_WRITE(DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ I915_WRITE(DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
+ I915_WRITE(DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
+ I915_WRITE(DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ } else {
+ I915_WRITE(DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ I915_WRITE(DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ }
- if (*drain_latency > DRAIN_LATENCY_MASK)
- *drain_latency = DRAIN_LATENCY_MASK;
+ POSTING_READ(DSPFW1);
- return true;
+ dev_priv->wm.vlv = *wm;
}
-/*
- * Update drain latency registers of memory arbiter
- *
- * Valleyview SoC has a new memory arbiter and needs drain latency registers
- * to be programmed. Each plane has a drain latency multiplier and a drain
- * latency value.
- */
+#undef FW_WM_VLV
-static void vlv_update_drain_latency(struct drm_crtc *crtc)
+static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
+ struct drm_plane *plane)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pixel_size;
- int drain_latency;
- enum pipe pipe = intel_crtc->pipe;
- int plane_prec, prec_mult, plane_dl;
- const int high_precision = IS_CHERRYVIEW(dev) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
+ int entries, prec_mult, drain_latency, pixel_size;
+ int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
+ const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
- plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
- DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
- (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
+ /*
+ * FIXME the plane might have an fb
+ * but be invisible (eg. due to clipping)
+ */
+ if (!intel_crtc->active || !plane->state->fb)
+ return 0;
- if (!intel_crtc_active(crtc)) {
- I915_WRITE(VLV_DDL(pipe), plane_dl);
- return;
- }
+ if (WARN(clock == 0, "Pixel clock is zero!\n"))
+ return 0;
- /* Primary plane Drain Latency */
- pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
- if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_PLANE_PRECISION_HIGH :
- DDL_PLANE_PRECISION_LOW;
- plane_dl |= plane_prec | drain_latency;
- }
+ pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
- /* Cursor Drain Latency
- * BPP is always 4 for cursor
- */
- pixel_size = 4;
+ if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
+ return 0;
- /* Program cursor DL only if it is enabled */
- if (intel_crtc->cursor_base &&
- vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_CURSOR_PRECISION_HIGH :
- DDL_CURSOR_PRECISION_LOW;
- plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
+ entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+
+ prec_mult = high_precision;
+ drain_latency = 64 * prec_mult * 4 / entries;
+
+ if (drain_latency > DRAIN_LATENCY_MASK) {
+ prec_mult /= 2;
+ drain_latency = 64 * prec_mult * 4 / entries;
}
- I915_WRITE(VLV_DDL(pipe), plane_dl);
-}
+ if (drain_latency > DRAIN_LATENCY_MASK)
+ drain_latency = DRAIN_LATENCY_MASK;
-#define single_plane_enabled(mask) is_power_of_2(mask)
+ return drain_latency | (prec_mult == high_precision ?
+ DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
+}
-static void valleyview_update_wm(struct drm_crtc *crtc)
+static int vlv_compute_wm(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ int fifo_size)
{
- struct drm_device *dev = crtc->dev;
- static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int plane_sr, cursor_sr;
- int ignore_plane_sr, ignore_cursor_sr;
- unsigned int enabled = 0;
- bool cxsr_enabled;
+ int clock, entries, pixel_size;
- vlv_update_drain_latency(crtc);
+ /*
+ * FIXME the plane might have an fb
+ * but be invisible (eg. due to clipping)
+ */
+ if (!crtc->active || !plane->base.state->fb)
+ return 0;
- if (g4x_compute_wm0(dev, PIPE_A,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1 << PIPE_A;
+ pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
+ clock = crtc->config->base.adjusted_mode.crtc_clock;
- if (g4x_compute_wm0(dev, PIPE_B,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 1 << PIPE_B;
+ entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &plane_sr, &ignore_cursor_sr) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- 2*sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &ignore_plane_sr, &cursor_sr)) {
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- intel_set_memory_cxsr(dev_priv, false);
- plane_sr = cursor_sr = 0;
+ /*
+ * Set up the watermark such that we don't start issuing memory
+ * requests until we are within PND's max deadline value (256us).
+ * Idea being to be idle as long as possible while still taking
+ * advatange of PND's deadline scheduling. The limit of 8
+ * cachelines (used when the FIFO will anyway drain in less time
+ * than 256us) should match what we would be done if trickle
+ * feed were enabled.
+ */
+ return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
+}
+
+static bool vlv_compute_sr_wm(struct drm_device *dev,
+ struct vlv_wm_values *wm)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_crtc *crtc;
+ enum pipe pipe = INVALID_PIPE;
+ int num_planes = 0;
+ int fifo_size = 0;
+ struct intel_plane *plane;
+
+ wm->sr.cursor = wm->sr.plane = 0;
+
+ crtc = single_enabled_crtc(dev);
+ /* maxfifo not supported on pipe C */
+ if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
+ pipe = to_intel_crtc(crtc)->pipe;
+ num_planes = !!wm->pipe[pipe].primary +
+ !!wm->pipe[pipe].sprite[0] +
+ !!wm->pipe[pipe].sprite[1];
+ fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
- "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- plane_sr, cursor_sr);
+ if (fifo_size == 0 || num_planes > 1)
+ return false;
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
+ to_intel_plane(crtc->cursor), 0x3f);
- if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (plane->pipe != pipe)
+ continue;
+
+ wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
+ plane, fifo_size);
+ if (wm->sr.plane != 0)
+ break;
+ }
+
+ return true;
}
-static void cherryview_update_wm(struct drm_crtc *crtc)
+static void valleyview_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, planec_wm;
- int cursora_wm, cursorb_wm, cursorc_wm;
- int plane_sr, cursor_sr;
- int ignore_plane_sr, ignore_cursor_sr;
- unsigned int enabled = 0;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
bool cxsr_enabled;
+ struct vlv_wm_values wm = dev_priv->wm.vlv;
- vlv_update_drain_latency(crtc);
+ wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
+ wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
+ to_intel_plane(crtc->primary),
+ vlv_get_fifo_size(dev, pipe, 0));
- if (g4x_compute_wm0(dev, PIPE_A,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1 << PIPE_A;
+ wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
+ wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
+ to_intel_plane(crtc->cursor),
+ 0x3f);
- if (g4x_compute_wm0(dev, PIPE_B,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 1 << PIPE_B;
+ cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
- if (g4x_compute_wm0(dev, PIPE_C,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planec_wm, &cursorc_wm))
- enabled |= 1 << PIPE_C;
+ if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
+ return;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &plane_sr, &ignore_cursor_sr) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- 2*sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &ignore_plane_sr, &cursor_sr)) {
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- intel_set_memory_cxsr(dev_priv, false);
- plane_sr = cursor_sr = 0;
- }
+ DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+ "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
+ wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+ wm.sr.plane, wm.sr.cursor);
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
- "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
- "SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- planec_wm, cursorc_wm,
- plane_sr, cursor_sr);
+ /*
+ * FIXME DDR DVFS introduces massive memory latencies which
+ * are not known to system agent so any deadline specified
+ * by the display may not be respected. To support DDR DVFS
+ * the watermark code needs to be rewritten to essentially
+ * bypass deadline mechanism and rely solely on the
+ * watermarks. For now disable DDR DVFS.
+ */
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_set_memory_dvfs(dev_priv, false);
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
- I915_WRITE(DSPFW9_CHV,
- (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
- DSPFW_CURSORC_MASK)) |
- (planec_wm << DSPFW_PLANEC_SHIFT) |
- (cursorc_wm << DSPFW_CURSORC_SHIFT));
+ if (!cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(intel_crtc, &wm);
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -961,30 +1080,47 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = to_intel_plane(plane)->pipe;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
int sprite = to_intel_plane(plane)->plane;
- int drain_latency;
- int plane_prec;
- int sprite_dl;
- int prec_mult;
- const int high_precision = IS_CHERRYVIEW(dev) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
+ bool cxsr_enabled;
+ struct vlv_wm_values wm = dev_priv->wm.vlv;
- sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
- (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
+ if (enabled) {
+ wm.ddl[pipe].sprite[sprite] =
+ vlv_compute_drain_latency(crtc, plane);
- if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
- &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_SPRITE_PRECISION_HIGH(sprite) :
- DDL_SPRITE_PRECISION_LOW(sprite);
- sprite_dl |= plane_prec |
- (drain_latency << DDL_SPRITE_SHIFT(sprite));
+ wm.pipe[pipe].sprite[sprite] =
+ vlv_compute_wm(intel_crtc,
+ to_intel_plane(plane),
+ vlv_get_fifo_size(dev, pipe, sprite+1));
+ } else {
+ wm.ddl[pipe].sprite[sprite] = 0;
+ wm.pipe[pipe].sprite[sprite] = 0;
}
- I915_WRITE(VLV_DDL(pipe), sprite_dl);
+ cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
+
+ if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
+ return;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
+ "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
+ sprite_name(pipe, sprite),
+ wm.pipe[pipe].sprite[sprite],
+ wm.sr.plane, wm.sr.cursor);
+
+ if (!cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(intel_crtc, &wm);
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
static void g4x_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -1027,17 +1163,17 @@ static void g4x_update_wm(struct drm_crtc *crtc)
plane_sr, cursor_sr);
I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
+ FW_WM(plane_sr, SR) |
+ FW_WM(cursorb_wm, CURSORB) |
+ FW_WM(planeb_wm, PLANEB) |
+ FW_WM(planea_wm, PLANEA));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
+ FW_WM(cursora_wm, CURSORA));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -1062,7 +1198,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1080,7 +1216,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * to_intel_crtc(crtc)->cursor_width;
+ pixel_size * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1103,19 +1239,21 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
- (8 << DSPFW_CURSORB_SHIFT) |
- (8 << DSPFW_PLANEB_SHIFT) |
- (8 << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
- (8 << DSPFW_PLANEC_SHIFT_OLD));
+ I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
}
+#undef FW_WM
+
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
@@ -1139,7 +1277,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->primary->fb->bits_per_pixel / 8;
+ int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1161,7 +1299,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->primary->fb->bits_per_pixel / 8;
+ int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1184,7 +1322,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_I915GM(dev) && enabled) {
struct drm_i915_gem_object *obj;
- obj = intel_fb_obj(enabled->primary->fb);
+ obj = intel_fb_obj(enabled->primary->state->fb);
/* self-refresh seems busted with untiled */
if (obj->tiling_mode == I915_TILING_NONE)
@@ -1208,7 +1346,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
- int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
+ int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1645,7 +1783,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
u32 linetime, ips_linetime;
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return 0;
/* The WM are computed with base on how long it takes to fill a single
@@ -1711,6 +1849,8 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
GEN9_MEM_LATENCY_LEVEL_MASK;
/*
+ * WaWmMemoryReadLatency:skl
+ *
* punit doesn't take into account the read latency so we need
* to add 2us to the various latency levels we retrieve from
* the punit.
@@ -1898,19 +2038,31 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return;
p->active = true;
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
- p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
- p->cur.bytes_per_pixel = 4;
+
+ if (crtc->primary->state->fb) {
+ p->pri.enabled = true;
+ p->pri.bytes_per_pixel =
+ crtc->primary->state->fb->bits_per_pixel / 8;
+ } else {
+ p->pri.enabled = false;
+ p->pri.bytes_per_pixel = 0;
+ }
+
+ if (crtc->cursor->state->fb) {
+ p->cur.enabled = true;
+ p->cur.bytes_per_pixel = 4;
+ } else {
+ p->cur.enabled = false;
+ p->cur.bytes_per_pixel = 0;
+ }
p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
- p->cur.horiz_pixels = intel_crtc->cursor_width;
- /* TODO: for now, assume primary and cursor planes are always enabled. */
- p->pri.enabled = true;
- p->cur.enabled = true;
+ p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2410,7 +2562,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
nth_active_pipe = 0;
for_each_crtc(dev, crtc) {
- if (!intel_crtc_active(crtc))
+ if (!to_intel_crtc(crtc)->active)
continue;
if (crtc == for_crtc)
@@ -2443,13 +2595,12 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
{
- struct drm_device *dev = dev_priv->dev;
enum pipe pipe;
int plane;
u32 val;
for_each_pipe(dev_priv, pipe) {
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
val);
@@ -2498,10 +2649,12 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
struct skl_ddb_allocation *ddb /* out */)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
+ uint16_t minimum[I915_MAX_PLANES];
unsigned int total_data_rate;
int plane;
@@ -2520,9 +2673,21 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
alloc_size -= cursor_blocks;
alloc->end -= cursor_blocks;
+ /* 1. Allocate the mininum required blocks for each active plane */
+ for_each_plane(dev_priv, pipe, plane) {
+ const struct intel_plane_wm_parameters *p;
+
+ p = &params->plane[plane];
+ if (!p->enabled)
+ continue;
+
+ minimum[plane] = 8;
+ alloc_size -= minimum[plane];
+ }
+
/*
- * Each active plane get a portion of the remaining space, in
- * proportion to the amount of data they need to fetch from memory.
+ * 2. Distribute the remaining space in proportion to the amount of
+ * data each plane needs to fetch from memory.
*
* FIXME: we may not allocate every single block here.
*/
@@ -2544,8 +2709,9 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
* promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1
*/
- plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
- total_data_rate);
+ plane_blocks = minimum[plane];
+ plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
+ total_data_rate);
ddb->plane[pipe][plane].start = start;
ddb->plane[pipe][plane].end = start + plane_blocks;
@@ -2575,7 +2741,7 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
if (latency == 0)
return UINT_MAX;
- wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
+ wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
return ret;
@@ -2583,17 +2749,29 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
- uint32_t latency)
+ uint64_t tiling, uint32_t latency)
{
- uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
+ uint32_t ret;
+ uint32_t plane_bytes_per_line, plane_blocks_per_line;
+ uint32_t wm_intermediate_val;
if (latency == 0)
return UINT_MAX;
plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+
+ if (tiling == I915_FORMAT_MOD_Y_TILED ||
+ tiling == I915_FORMAT_MOD_Yf_TILED) {
+ plane_bytes_per_line *= 4;
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ plane_blocks_per_line /= 4;
+ } else {
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ }
+
wm_intermediate_val = latency * pixel_rate;
ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
- plane_bytes_per_line;
+ plane_blocks_per_line;
return ret;
}
@@ -2624,7 +2802,7 @@ static void skl_compute_wm_global_parameters(struct drm_device *dev,
struct drm_plane *plane;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- config->num_pipes_active += intel_crtc_active(crtc);
+ config->num_pipes_active += to_intel_crtc(crtc)->active;
/* FIXME: I don't think we need those two global parameters on SKL */
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2642,26 +2820,40 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
+ struct drm_framebuffer *fb;
int i = 1; /* Index for sprite planes start */
- p->active = intel_crtc_active(crtc);
+ p->active = intel_crtc->active;
if (p->active) {
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
- /*
- * For now, assume primary and cursor planes are always enabled.
- */
- p->plane[0].enabled = true;
- p->plane[0].bytes_per_pixel =
- crtc->primary->fb->bits_per_pixel / 8;
+ fb = crtc->primary->state->fb;
+ if (fb) {
+ p->plane[0].enabled = true;
+ p->plane[0].bytes_per_pixel = fb->bits_per_pixel / 8;
+ p->plane[0].tiling = fb->modifier[0];
+ } else {
+ p->plane[0].enabled = false;
+ p->plane[0].bytes_per_pixel = 0;
+ p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
+ }
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
-
- p->cursor.enabled = true;
- p->cursor.bytes_per_pixel = 4;
- p->cursor.horiz_pixels = intel_crtc->cursor_width ?
- intel_crtc->cursor_width : 64;
+ p->plane[0].rotation = crtc->primary->state->rotation;
+
+ fb = crtc->cursor->state->fb;
+ if (fb) {
+ p->cursor.enabled = true;
+ p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
+ p->cursor.horiz_pixels = crtc->cursor->state->crtc_w;
+ p->cursor.vert_pixels = crtc->cursor->state->crtc_h;
+ } else {
+ p->cursor.enabled = false;
+ p->cursor.bytes_per_pixel = 0;
+ p->cursor.horiz_pixels = 64;
+ p->cursor.vert_pixels = 64;
+ }
}
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2673,41 +2865,74 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
}
}
-static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
+static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
+ struct skl_pipe_wm_parameters *p,
struct intel_plane_wm_parameters *p_params,
uint16_t ddb_allocation,
- uint32_t mem_value,
+ int level,
uint16_t *out_blocks, /* out */
uint8_t *out_lines /* out */)
{
- uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
- uint32_t result_bytes;
+ uint32_t latency = dev_priv->wm.skl_latency[level];
+ uint32_t method1, method2;
+ uint32_t plane_bytes_per_line, plane_blocks_per_line;
+ uint32_t res_blocks, res_lines;
+ uint32_t selected_result;
- if (mem_value == 0 || !p->active || !p_params->enabled)
+ if (latency == 0 || !p->active || !p_params->enabled)
return false;
method1 = skl_wm_method1(p->pixel_rate,
p_params->bytes_per_pixel,
- mem_value);
+ latency);
method2 = skl_wm_method2(p->pixel_rate,
p->pipe_htotal,
p_params->horiz_pixels,
p_params->bytes_per_pixel,
- mem_value);
+ p_params->tiling,
+ latency);
plane_bytes_per_line = p_params->horiz_pixels *
p_params->bytes_per_pixel;
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+
+ if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
+ p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
+ uint32_t min_scanlines = 4;
+ uint32_t y_tile_minimum;
+ if (intel_rotation_90_or_270(p_params->rotation)) {
+ switch (p_params->bytes_per_pixel) {
+ case 1:
+ min_scanlines = 16;
+ break;
+ case 2:
+ min_scanlines = 8;
+ break;
+ case 8:
+ WARN(1, "Unsupported pixel depth for rotation");
+ }
+ }
+ y_tile_minimum = plane_blocks_per_line * min_scanlines;
+ selected_result = max(method2, y_tile_minimum);
+ } else {
+ if ((ddb_allocation / plane_blocks_per_line) >= 1)
+ selected_result = min(method1, method2);
+ else
+ selected_result = method1;
+ }
- /* For now xtile and linear */
- if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
- result_bytes = min(method1, method2);
- else
- result_bytes = method1;
+ res_blocks = selected_result + 1;
+ res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
- res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
- res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
+ if (level >= 1 && level <= 7) {
+ if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
+ p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
+ res_lines += 4;
+ else
+ res_blocks++;
+ }
- if (res_blocks > ddb_allocation || res_lines > 31)
+ if (res_blocks >= ddb_allocation || res_lines > 31)
return false;
*out_blocks = res_blocks;
@@ -2724,30 +2949,31 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
int num_planes,
struct skl_wm_level *result)
{
- uint16_t latency = dev_priv->wm.skl_latency[level];
uint16_t ddb_blocks;
int i;
for (i = 0; i < num_planes; i++) {
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
- result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
+ result->plane_en[i] = skl_compute_plane_wm(dev_priv,
+ p, &p->plane[i],
ddb_blocks,
- latency,
+ level,
&result->plane_res_b[i],
&result->plane_res_l[i]);
}
ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
- result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
- latency, &result->cursor_res_b,
+ result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
+ ddb_blocks, level,
+ &result->cursor_res_b,
&result->cursor_res_l);
}
static uint32_t
skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
{
- if (!intel_crtc_active(crtc))
+ if (!to_intel_crtc(crtc)->active)
return 0;
return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
@@ -2921,12 +3147,11 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
static void
skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
{
- struct drm_device *dev = dev_priv->dev;
int plane;
DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
I915_WRITE(PLANE_SURF(pipe, plane),
I915_READ(PLANE_SURF(pipe, plane)));
}
@@ -3133,12 +3358,21 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
int pixel_size, bool enabled, bool scaled)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane->state->fb;
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_height;
intel_plane->wm.bytes_per_pixel = pixel_size;
+ intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
+ /*
+ * Framebuffer can be NULL on plane disable, but it does not
+ * matter for watermarks if we assume no tiling in that case.
+ */
+ if (fb)
+ intel_plane->wm.tiling = fb->modifier[0];
+ intel_plane->wm.rotation = plane->state->rotation;
skl_update_wm(crtc);
}
@@ -3287,7 +3521,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return;
hw->dirty[pipe] = true;
@@ -3342,7 +3576,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
- active->pipe_enabled = intel_crtc_active(crtc);
+ active->pipe_enabled = intel_crtc->active;
if (active->pipe_enabled) {
u32 tmp = hw->wm_pipe[pipe];
@@ -3456,41 +3690,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
pixel_size, enabled, scaled);
}
-static struct drm_i915_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
-{
- struct drm_i915_gem_object *ctx;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ctx = i915_gem_alloc_object(dev, 4096);
- if (!ctx) {
- DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
- return NULL;
- }
-
- ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n", ret);
- goto err_unref;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
- if (ret) {
- DRM_ERROR("failed to set-domain on power context: %d\n", ret);
- goto err_unpin;
- }
-
- return ctx;
-
-err_unpin:
- i915_gem_object_ggtt_unpin(ctx);
-err_unref:
- drm_gem_object_unreference(&ctx->base);
- return NULL;
-}
-
/**
* Lock protecting IPS related data structures
*/
@@ -3623,7 +3822,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
* ourselves, instead of doing a rmw cycle (which might result in us clearing
* all limits and the gpu stuck at whatever frequency it is at atm).
*/
-static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
+static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
{
u32 limits;
@@ -3633,9 +3832,15 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- limits = dev_priv->rps.max_freq_softlimit << 24;
- if (val <= dev_priv->rps.min_freq_softlimit)
- limits |= dev_priv->rps.min_freq_softlimit << 16;
+ if (IS_GEN9(dev_priv->dev)) {
+ limits = (dev_priv->rps.max_freq_softlimit) << 23;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= (dev_priv->rps.min_freq_softlimit) << 14;
+ } else {
+ limits = dev_priv->rps.max_freq_softlimit << 24;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= dev_priv->rps.min_freq_softlimit << 16;
+ }
return limits;
}
@@ -3643,6 +3848,8 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
{
int new_power;
+ u32 threshold_up = 0, threshold_down = 0; /* in % */
+ u32 ei_up = 0, ei_down = 0;
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
@@ -3664,9 +3871,9 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
break;
}
/* Max/min bins are special */
- if (val == dev_priv->rps.min_freq_softlimit)
+ if (val <= dev_priv->rps.min_freq_softlimit)
new_power = LOW_POWER;
- if (val == dev_priv->rps.max_freq_softlimit)
+ if (val >= dev_priv->rps.max_freq_softlimit)
new_power = HIGH_POWER;
if (new_power == dev_priv->rps.power)
return;
@@ -3675,59 +3882,53 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
switch (new_power) {
case LOW_POWER:
/* Upclock if more than 95% busy over 16ms */
- I915_WRITE(GEN6_RP_UP_EI, 12500);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+ ei_up = 16000;
+ threshold_up = 95;
/* Downclock if less than 85% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 85;
break;
case BETWEEN:
/* Upclock if more than 90% busy over 13ms */
- I915_WRITE(GEN6_RP_UP_EI, 10250);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+ ei_up = 13000;
+ threshold_up = 90;
/* Downclock if less than 75% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 75;
break;
case HIGH_POWER:
/* Upclock if more than 85% busy over 10ms */
- I915_WRITE(GEN6_RP_UP_EI, 8000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+ ei_up = 10000;
+ threshold_up = 85;
/* Downclock if less than 60% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 60;
break;
}
+ I915_WRITE(GEN6_RP_UP_EI,
+ GT_INTERVAL_FROM_US(dev_priv, ei_up));
+ I915_WRITE(GEN6_RP_UP_THRESHOLD,
+ GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
+
+ I915_WRITE(GEN6_RP_DOWN_EI,
+ GT_INTERVAL_FROM_US(dev_priv, ei_down));
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
+ GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
dev_priv->rps.power = new_power;
dev_priv->rps.last_adj = 0;
}
@@ -3737,11 +3938,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
u32 mask = 0;
if (val > dev_priv->rps.min_freq_softlimit)
- mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
- mask |= GEN6_PM_RP_UP_THRESHOLD;
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
@@ -3750,13 +3950,13 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
/* gen6_set_rps is called to update the frequency request, but should also be
* called when the range (min_delay and max_delay) is modified so that we can
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-void gen6_set_rps(struct drm_device *dev, u8 val)
+static void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_freq_softlimit);
- WARN_ON(val < dev_priv->rps.min_freq_softlimit);
+ WARN_ON(val > dev_priv->rps.max_freq);
+ WARN_ON(val < dev_priv->rps.min_freq);
/* min/max delay may still have been modified so be sure to
* write the limits value.
@@ -3764,7 +3964,10 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN9_FREQUENCY(val));
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
else
@@ -3777,7 +3980,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
POSTING_READ(GEN6_RPNSWREQ);
@@ -3786,6 +3989,27 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(val * 50);
}
+static void valleyview_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(val > dev_priv->rps.max_freq);
+ WARN_ON(val < dev_priv->rps.min_freq);
+
+ if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
+ "Odd GPU freq value\n"))
+ val &= ~1;
+
+ if (val != dev_priv->rps.cur_freq)
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
+ dev_priv->rps.cur_freq = val;
+ trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+}
+
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
*
* * If Gfx is Idle, then
@@ -3798,10 +4022,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ u32 val = dev_priv->rps.idle_freq;
/* CHV and latest VLV don't need to force the gfx clock */
if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ valleyview_set_rps(dev_priv->dev, val);
return;
}
@@ -3809,7 +4034,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
* When we are idle. Drop to min voltage state.
*/
- if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
+ if (dev_priv->rps.cur_freq <= val)
return;
/* Mask turbo interrupt so that they will not come in between */
@@ -3818,10 +4043,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
vlv_force_gfx_clock(dev_priv, true);
- dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
+ dev_priv->rps.cur_freq = val;
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
- dev_priv->rps.min_freq_softlimit);
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
& GENFREQSTATUS) == 0, 100))
@@ -3829,8 +4053,19 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
vlv_force_gfx_clock(dev_priv, false);
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+}
+
+void gen6_rps_busy(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.enabled) {
+ if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+ gen6_rps_reset_ei(dev_priv);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -3842,46 +4077,34 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
void gen6_rps_boost(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ u32 val;
mutex_lock(&dev_priv->rps.hw_lock);
- if (dev_priv->rps.enabled) {
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
- else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
+ val = dev_priv->rps.max_freq_softlimit;
+ if (dev_priv->rps.enabled &&
+ dev_priv->mm.busy &&
+ dev_priv->rps.cur_freq < val) {
+ intel_set_rps(dev_priv->dev, val);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
-void valleyview_set_rps(struct drm_device *dev, u8 val)
+void intel_set_rps(struct drm_device *dev, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_freq_softlimit);
- WARN_ON(val < dev_priv->rps.min_freq_softlimit);
-
- if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
- "Odd GPU freq value\n"))
- val &= ~1;
-
- if (val != dev_priv->rps.cur_freq)
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- dev_priv->rps.cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
}
static void gen9_disable_rps(struct drm_device *dev)
@@ -3995,6 +4218,13 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+ if (IS_SKYLAKE(dev)) {
+ /* Store the frequency values in 16.66 MHZ units, which is
+ the natural hardware unit for SKL */
+ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+ }
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
@@ -4011,6 +4241,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq);
}
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4035,23 +4267,21 @@ static void gen9_enable_rps(struct drm_device *dev)
gen6_init_rps_frequencies(dev);
- I915_WRITE(GEN6_RPNSWREQ, 0xc800000);
- I915_WRITE(GEN6_RC_VIDEO_FREQ, 0xc800000);
+ /* Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
+
+ /* 1 second timeout*/
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
+ GT_INTERVAL_FROM_US(dev_priv, 1000000));
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 0x12060000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 0xe808);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x3bd08);
- I915_WRITE(GEN6_RP_UP_EI, 0x101d0);
- I915_WRITE(GEN6_RP_DOWN_EI, 0x55730);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
- I915_WRITE(GEN6_PMINTRMSK, 0x6);
- I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE | GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
- gen6_enable_rps_interrupts(dev);
+ /* Leaning on the below call to gen6_set_rps to program/setup the
+ * Up/Down EI & threshold registers, as well as the RP_CONTROL,
+ * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
+ dev_priv->rps.power = HIGH_POWER; /* force a reset */
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4179,7 +4409,7 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4273,7 +4503,7 @@ static void gen6_enable_rps(struct drm_device *dev)
}
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -4638,6 +4868,8 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4713,6 +4945,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->rps.min_freq) & 1,
"Odd GPU freq values\n");
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4904,124 +5138,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-void ironlake_teardown_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->ips.renderctx) {
- i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
- drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
- dev_priv->ips.renderctx = NULL;
- }
-
- if (dev_priv->ips.pwrctx) {
- i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
- drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
- dev_priv->ips.pwrctx = NULL;
- }
-}
-
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (I915_READ(PWRCTXA)) {
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
- 50);
-
- I915_WRITE(PWRCTXA, 0);
- POSTING_READ(PWRCTXA);
-
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- POSTING_READ(RSTDBYCTL);
- }
-}
-
-static int ironlake_setup_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->ips.renderctx == NULL)
- dev_priv->ips.renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->ips.renderctx)
- return -ENOMEM;
-
- if (dev_priv->ips.pwrctx == NULL)
- dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->ips.pwrctx) {
- ironlake_teardown_rc6(dev);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void ironlake_enable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
- bool was_interruptible;
- int ret;
-
- /* rc6 disabled by default due to repeated reports of hanging during
- * boot and resume.
- */
- if (!intel_enable_rc6(dev))
- return;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ret = ironlake_setup_rc6(dev);
- if (ret)
- return;
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- /*
- * GPU can automatically power down the render unit if given a page
- * to save state.
- */
- ret = intel_ring_begin(ring, 6);
- if (ret) {
- ironlake_teardown_rc6(dev);
- dev_priv->mm.interruptible = was_interruptible;
- return;
- }
-
- intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
- intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- intel_ring_emit(ring, MI_SUSPEND_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_advance(ring);
-
- /*
- * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
- * does an implicit flush, combined with MI_FLUSH above, it should be
- * safe to assume that renderctx is valid
- */
- ret = intel_ring_idle(ring);
- dev_priv->mm.interruptible = was_interruptible;
- if (ret) {
- DRM_ERROR("failed to enable ironlake power savings\n");
- ironlake_teardown_rc6(dev);
- return;
- }
-
- I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-
- intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
-}
-
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
@@ -5534,12 +5650,7 @@ static void gen6_suspend_rps(struct drm_device *dev)
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
- /*
- * TODO: disable RPS interrupts on GEN9+ too once RPS support
- * is added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_disable_rps_interrupts(dev);
+ gen6_disable_rps_interrupts(dev);
}
/**
@@ -5569,7 +5680,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
- ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6) {
intel_suspend_gt_powersave(dev);
@@ -5597,12 +5707,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
- /*
- * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
- * added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_reset_rps_interrupts(dev);
+ gen6_reset_rps_interrupts(dev);
if (IS_CHERRYVIEW(dev)) {
cherryview_enable_rps(dev);
@@ -5619,10 +5724,16 @@ static void intel_gen6_powersave_work(struct work_struct *work)
gen6_enable_rps(dev);
__gen6_update_ring_freq(dev);
}
+
+ WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
+ WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
+
+ WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
+ WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
+
dev_priv->rps.enabled = true;
- if (INTEL_INFO(dev)->gen < 9)
- gen6_enable_rps_interrupts(dev);
+ gen6_enable_rps_interrupts(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5633,10 +5744,13 @@ void intel_enable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* Powersaving is controlled by the host when inside a VM */
+ if (intel_vgpu_active(dev))
+ return;
+
if (IS_IRONLAKE_M(dev)) {
mutex_lock(&dev->struct_mutex);
ironlake_enable_drps(dev);
- ironlake_enable_rc6(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
} else if (INTEL_INFO(dev)->gen >= 6) {
@@ -6169,11 +6283,22 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
gen6_check_mch_setup(dev);
}
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ I915_WRITE(CBR1_VLV, 0);
+}
+
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+ vlv_init_display_clock_gating(dev_priv);
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
@@ -6221,8 +6346,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_UCGCTL4,
I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-
/*
* BSpec says this must be set, even though
* WaDisable4x2SubspanOptimization isn't listed for VLV.
@@ -6259,9 +6382,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ vlv_init_display_clock_gating(dev_priv);
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
@@ -6396,7 +6517,8 @@ void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->display.init_clock_gating(dev);
+ if (dev_priv->display.init_clock_gating)
+ dev_priv->display.init_clock_gating(dev);
}
void intel_suspend_hw(struct drm_device *dev)
@@ -6422,7 +6544,7 @@ void intel_init_pm(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
- dev_priv->display.init_clock_gating = gen9_init_clock_gating;
+ dev_priv->display.init_clock_gating = skl_init_clock_gating;
dev_priv->display.update_wm = skl_update_wm;
dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -6450,7 +6572,7 @@ void intel_init_pm(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
- dev_priv->display.update_wm = cherryview_update_wm;
+ dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
dev_priv->display.init_clock_gating =
cherryview_init_clock_gating;
@@ -6618,7 +6740,9 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- if (IS_CHERRYVIEW(dev_priv->dev))
+ if (IS_GEN9(dev_priv->dev))
+ return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
+ else if (IS_CHERRYVIEW(dev_priv->dev))
return chv_gpu_freq(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
return byt_gpu_freq(dev_priv, val);
@@ -6628,7 +6752,9 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- if (IS_CHERRYVIEW(dev_priv->dev))
+ if (IS_GEN9(dev_priv->dev))
+ return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
+ else if (IS_CHERRYVIEW(dev_priv->dev))
return chv_freq_opcode(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
return byt_freq_opcode(dev_priv, val);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b9f40c2e0af7..a8f9348259ae 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -532,8 +532,6 @@ static void intel_psr_exit(struct drm_device *dev)
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
-
- dev_priv->psr.active = false;
} else {
val = I915_READ(VLV_PSRCTL(pipe));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e5b3c6dbd467..441e2502b889 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -317,29 +317,6 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
return 0;
}
-static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
-{
- int ret;
-
- if (!ring->fbc_dirty)
- return 0;
-
- ret = intel_ring_begin(ring, 6);
- if (ret)
- return ret;
- /* WaFbcNukeOn3DBlt:ivb/hsw */
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, MSG_FBC_REND_STATE);
- intel_ring_emit(ring, value);
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit(ring, MSG_FBC_REND_STATE);
- intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
- intel_ring_advance(ring);
-
- ring->fbc_dirty = false;
- return 0;
-}
-
static int
gen7_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
@@ -398,9 +375,6 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
- if (!invalidate_domains && flush_domains)
- return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
return 0;
}
@@ -458,14 +432,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
return ret;
}
- ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
- if (ret)
- return ret;
-
- if (!invalidate_domains && flush_domains)
- return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
- return 0;
+ return gen8_emit_pipe_control(ring, flags, scratch_addr);
}
static void ring_write_tail(struct intel_engine_cs *ring,
@@ -502,6 +469,68 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
I915_WRITE(HWS_PGA, addr);
}
+static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 mmio = 0;
+
+ /* The ring status page addresses are no longer next to the rest of
+ * the ring registers as of gen7.
+ */
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ /*
+ * VCS2 actually doesn't exist on Gen7. Only shut up
+ * gcc switch check warning
+ */
+ case VCS2:
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ case VECS:
+ mmio = VEBOX_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(ring->dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ /* XXX: gen8 returns to sanity */
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
+
+ /*
+ * Flush the TLB for this page
+ *
+ * FIXME: These two bits have disappeared on gen8, so a question
+ * arises: do we still need this and if so how should we go about
+ * invalidating the TLB?
+ */
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+
+ /* ring should be idle before issuing a sync flush*/
+ WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
+}
+
static bool stop_ring(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
@@ -788,12 +817,14 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
- /* WaForceEnableNonCoherent:bdw */
- /* WaHdcDisableFetchWhenMasked:bdw */
- /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ /* WaForceEnableNonCoherent:bdw */
HDC_FORCE_NON_COHERENT |
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaHdcDisableFetchWhenMasked:bdw */
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
@@ -870,9 +901,132 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
+ if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+ INTEL_REVID(dev) == SKL_REVID_D0)
+ /* WaBarrierPerformanceFixDisable:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE |
+ HDC_BARRIER_PERFORMANCE_DISABLE);
+
return 0;
}
+static int gen9_init_workarounds(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* WaDisablePartialInstShootdown:skl */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* Syncing dependencies between camera and graphics */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+
+ if (INTEL_REVID(dev) == SKL_REVID_A0 ||
+ INTEL_REVID(dev) == SKL_REVID_B0) {
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:skl */
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_DG_MIRROR_FIX_ENABLE);
+ }
+
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl */
+ WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN9_RHWO_OPTIMIZATION_DISABLE);
+ WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
+ DISABLE_PIXEL_MASK_CAMMING);
+ }
+
+ if (INTEL_REVID(dev) >= SKL_REVID_C0) {
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl */
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX);
+ }
+
+ if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+ /*
+ *Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+ /* WaForceEnableNonCoherent:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+ }
+
+ /* Wa4x4STCOptimizationDisable:skl */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+ /* WaDisablePartialResolveInVc:skl */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+
+ /* WaCcsTlbPrefetchDisable:skl */
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+ return 0;
+}
+
+static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 vals[3] = { 0, 0, 0 };
+ unsigned int i;
+
+ for (i = 0; i < 3; i++) {
+ u8 ss;
+
+ /*
+ * Only consider slices where one, and only one, subslice has 7
+ * EUs
+ */
+ if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
+ continue;
+
+ /*
+ * subslice_7eu[i] != 0 (because of the check above) and
+ * ss_max == 4 (maximum number of subslices possible per slice)
+ *
+ * -> 0 <= ss <= 3;
+ */
+ ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
+ vals[i] = 3 - ss;
+ }
+
+ if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+ return 0;
+
+ /* Tune IZ hashing. See intel_device_info_runtime_init() */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN9_IZ_HASHING_MASK(2) |
+ GEN9_IZ_HASHING_MASK(1) |
+ GEN9_IZ_HASHING_MASK(0),
+ GEN9_IZ_HASHING(2, vals[2]) |
+ GEN9_IZ_HASHING(1, vals[1]) |
+ GEN9_IZ_HASHING(0, vals[0]));
+
+ return 0;
+}
+
+
+static int skl_init_workarounds(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_workarounds(ring);
+
+ /* WaDisablePowerCompilerClockGating:skl */
+ if (INTEL_REVID(dev) == SKL_REVID_B0)
+ WA_SET_BIT_MASKED(HIZ_CHICKEN,
+ BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
+
+ return skl_tune_iz_hashing(ring);
+}
+
int init_workarounds_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -888,6 +1042,11 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
if (IS_CHERRYVIEW(dev))
return chv_init_workarounds(ring);
+ if (IS_SKYLAKE(dev))
+ return skl_init_workarounds(ring);
+ else if (IS_GEN9(dev))
+ return gen9_init_workarounds(ring);
+
return 0;
}
@@ -1386,68 +1545,6 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-void intel_ring_setup_status_page(struct intel_engine_cs *ring)
-{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- u32 mmio = 0;
-
- /* The ring status page addresses are no longer next to the rest of
- * the ring registers as of gen7.
- */
- if (IS_GEN7(dev)) {
- switch (ring->id) {
- case RCS:
- mmio = RENDER_HWS_PGA_GEN7;
- break;
- case BCS:
- mmio = BLT_HWS_PGA_GEN7;
- break;
- /*
- * VCS2 actually doesn't exist on Gen7. Only shut up
- * gcc switch check warning
- */
- case VCS2:
- case VCS:
- mmio = BSD_HWS_PGA_GEN7;
- break;
- case VECS:
- mmio = VEBOX_HWS_PGA_GEN7;
- break;
- }
- } else if (IS_GEN6(ring->dev)) {
- mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
- } else {
- /* XXX: gen8 returns to sanity */
- mmio = RING_HWS_PGA(ring->mmio_base);
- }
-
- I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
- POSTING_READ(mmio);
-
- /*
- * Flush the TLB for this page
- *
- * FIXME: These two bits have disappeared on gen8, so a question
- * arises: do we still need this and if so how should we go about
- * invalidating the TLB?
- */
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
- u32 reg = RING_INSTPM(ring->mmio_base);
-
- /* ring should be idle before issuing a sync flush*/
- WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-
- I915_WRITE(reg,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
- 1000))
- DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- ring->name);
- }
-}
-
static int
bsd_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
@@ -1611,7 +1708,7 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
static int
i965_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 length,
- unsigned flags)
+ unsigned dispatch_flags)
{
int ret;
@@ -1622,7 +1719,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
- (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -1635,8 +1733,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
- u64 offset, u32 len,
- unsigned flags)
+ u64 offset, u32 len,
+ unsigned dispatch_flags)
{
u32 cs_offset = ring->scratch.gtt_offset;
int ret;
@@ -1654,7 +1752,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- if ((flags & I915_DISPATCH_PINNED) == 0) {
+ if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1686,7 +1784,8 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1697,7 +1796,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
i915_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
- unsigned flags)
+ unsigned dispatch_flags)
{
int ret;
@@ -1706,7 +1805,8 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
@@ -2097,6 +2197,7 @@ intel_ring_alloc_request(struct intel_engine_cs *ring)
kref_init(&request->ref);
request->ring = ring;
+ request->ringbuf = ring->buffer;
request->uniq = dev_private->request_uniq++;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);
@@ -2273,9 +2374,10 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
static int
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
- unsigned flags)
+ unsigned dispatch_flags)
{
- bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
+ bool ppgtt = USES_PPGTT(ring->dev) &&
+ !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_ring_begin(ring, 4);
@@ -2294,8 +2396,8 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
- u64 offset, u32 len,
- unsigned flags)
+ u64 offset, u32 len,
+ unsigned dispatch_flags)
{
int ret;
@@ -2305,7 +2407,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
- (flags & I915_DISPATCH_SECURE ?
+ (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
@@ -2317,7 +2419,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
- unsigned flags)
+ unsigned dispatch_flags)
{
int ret;
@@ -2327,7 +2429,8 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
- (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -2341,7 +2444,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
@@ -2350,7 +2452,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(ring->dev)->gen >= 8)
+ if (INTEL_INFO(dev)->gen >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2370,7 +2472,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(ring->dev)->gen >= 8) {
+ if (INTEL_INFO(dev)->gen >= 8) {
intel_ring_emit(ring, 0); /* upper addr */
intel_ring_emit(ring, 0); /* value */
} else {
@@ -2379,13 +2481,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
}
intel_ring_advance(ring);
- if (!invalidate && flush) {
- if (IS_GEN7(dev))
- return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
- else if (IS_BROADWELL(dev))
- dev_priv->fbc.need_sw_cache_clean = true;
- }
-
return 0;
}
@@ -2612,19 +2707,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
/**
- * Initialize the second BSD ring for Broadwell GT3.
- * It is noted that this only exists on Broadwell GT3.
+ * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
*/
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
- if ((INTEL_INFO(dev)->gen != 8)) {
- DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
- return -EINVAL;
- }
-
ring->name = "bsd2 ring";
ring->id = VCS2;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 714f3fdd57d2..c761fe05ad6f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -164,7 +164,7 @@ struct intel_engine_cs {
u32 seqno);
int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
u64 offset, u32 length,
- unsigned flags);
+ unsigned dispatch_flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring);
@@ -242,7 +242,7 @@ struct intel_engine_cs {
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
- u64 offset, unsigned flags);
+ u64 offset, unsigned dispatch_flags);
/**
* List of objects currently involved in rendering from the
@@ -267,7 +267,6 @@ struct intel_engine_cs {
*/
struct drm_i915_gem_request *outstanding_lazy_request;
bool gpu_caches_dirty;
- bool fbc_dirty;
wait_queue_head_t irq_queue;
@@ -373,11 +372,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
* 0x06: ring 2 head pointer (915-class)
* 0x10-0x1b: Context status DWords (GM45)
* 0x1f: Last written status offset. (GM45)
+ * 0x20-0x2f: Reserved (Gen6+)
*
- * The area from dword 0x20 to 0x3ff is available for driver usage.
+ * The area from dword 0x30 to 0x3ff is available for driver usage.
*/
-#define I915_GEM_HWS_INDEX 0x20
-#define I915_GEM_HWS_SCRATCH_INDEX 0x30
+#define I915_GEM_HWS_INDEX 0x30
+#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
@@ -425,7 +425,6 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
-void intel_ring_setup_status_page(struct intel_engine_cs *ring);
int init_workarounds_ring(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 49695d7d51e3..ce00e6994eeb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -194,8 +194,39 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
- gen8_irq_power_well_post_enable(dev_priv);
+ if (IS_BROADWELL(dev))
+ gen8_irq_power_well_post_enable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+}
+
+static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ if (power_well->data == SKL_DISP_PW_2) {
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+ gen8_irq_power_well_post_enable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+ }
+
+ if (power_well->data == SKL_DISP_PW_1) {
+ intel_prepare_ddi(dev);
+ gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
+ }
}
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -230,6 +261,141 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
}
}
+#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
+ SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
+ SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
+#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
+ SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
+ SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
+ BIT(POWER_DOMAIN_INIT))
+
+static void skl_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ uint32_t tmp, fuse_status;
+ uint32_t req_mask, state_mask;
+ bool is_enabled, enable_requested, check_fuse_status = false;
+
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ fuse_status = I915_READ(SKL_FUSE_STATUS);
+
+ switch (power_well->data) {
+ case SKL_DISP_PW_1:
+ if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+ SKL_FUSE_PG0_DIST_STATUS), 1)) {
+ DRM_ERROR("PG0 not enabled\n");
+ return;
+ }
+ break;
+ case SKL_DISP_PW_2:
+ if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
+ DRM_ERROR("PG1 in disabled state\n");
+ return;
+ }
+ break;
+ case SKL_DISP_PW_DDI_A_E:
+ case SKL_DISP_PW_DDI_B:
+ case SKL_DISP_PW_DDI_C:
+ case SKL_DISP_PW_DDI_D:
+ case SKL_DISP_PW_MISC_IO:
+ break;
+ default:
+ WARN(1, "Unknown power well %lu\n", power_well->data);
+ return;
+ }
+
+ req_mask = SKL_POWER_WELL_REQ(power_well->data);
+ enable_requested = tmp & req_mask;
+ state_mask = SKL_POWER_WELL_STATE(power_well->data);
+ is_enabled = tmp & state_mask;
+
+ if (enable) {
+ if (!enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
+ }
+
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ state_mask), 1))
+ DRM_ERROR("%s enable timeout\n",
+ power_well->name);
+ check_fuse_status = true;
+ }
+ } else {
+ if (enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
+ DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
+ }
+ }
+
+ if (check_fuse_status) {
+ if (power_well->data == SKL_DISP_PW_1) {
+ if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+ SKL_FUSE_PG1_DIST_STATUS), 1))
+ DRM_ERROR("PG1 distributing status timeout\n");
+ } else if (power_well->data == SKL_DISP_PW_2) {
+ if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+ SKL_FUSE_PG2_DIST_STATUS), 1))
+ DRM_ERROR("PG2 distributing status timeout\n");
+ }
+ }
+
+ if (enable && !is_enabled)
+ skl_power_well_post_enable(dev_priv, power_well);
+}
+
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -255,6 +421,36 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_set_power_well(dev_priv, power_well, false);
}
+static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
+ SKL_POWER_WELL_STATE(power_well->data);
+
+ return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
+}
+
+static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ skl_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+ /* Clear any request made by BIOS as driver is taking over */
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void skl_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ skl_set_power_well(dev_priv, power_well, true);
+}
+
+static void skl_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ skl_set_power_well(dev_priv, power_well, false);
+}
+
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -829,6 +1025,13 @@ static const struct i915_power_well_ops hsw_power_well_ops = {
.is_enabled = hsw_power_well_enabled,
};
+static const struct i915_power_well_ops skl_power_well_ops = {
+ .sync_hw = skl_power_well_sync_hw,
+ .enable = skl_power_well_enable,
+ .disable = skl_power_well_disable,
+ .is_enabled = skl_power_well_enabled,
+};
+
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
@@ -1059,6 +1262,57 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
return NULL;
}
+static struct i915_power_well skl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "power well 1",
+ .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_1,
+ },
+ {
+ .name = "MISC IO power well",
+ .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_MISC_IO,
+ },
+ {
+ .name = "power well 2",
+ .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_2,
+ },
+ {
+ .name = "DDI A/E power well",
+ .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_A_E,
+ },
+ {
+ .name = "DDI B power well",
+ .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_B,
+ },
+ {
+ .name = "DDI C power well",
+ .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_C,
+ },
+ {
+ .name = "DDI D power well",
+ .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_D,
+ },
+};
+
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -1085,6 +1339,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
+ } else if (IS_SKYLAKE(dev_priv->dev)) {
+ set_power_wells(power_domains, skl_power_wells);
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -1200,7 +1456,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
}
/**
- * intel_aux_display_runtime_get - grab an auxilliary power domain reference
+ * intel_aux_display_runtime_get - grab an auxiliary power domain reference
* @dev_priv: i915 device instance
*
* This function grabs a power domain reference for the auxiliary power domain
@@ -1217,10 +1473,10 @@ void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
}
/**
- * intel_aux_display_runtime_put - release an auxilliary power domain reference
+ * intel_aux_display_runtime_put - release an auxiliary power domain reference
* @dev_priv: i915 device instance
*
- * This function drops the auxilliary power domain reference obtained by
+ * This function drops the auxiliary power domain reference obtained by
* intel_aux_display_runtime_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 64ad2b40179f..e87d2f418de4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1247,7 +1247,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
switch (crtc->config->pixel_multiplier) {
default:
- WARN(1, "unknown pixel mutlipler specified\n");
+ WARN(1, "unknown pixel multiplier specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -2194,6 +2194,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_sdvo_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
@@ -2425,6 +2426,22 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
}
}
+static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
+{
+ struct intel_sdvo_connector *sdvo_connector;
+
+ sdvo_connector = kzalloc(sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!sdvo_connector)
+ return NULL;
+
+ if (intel_connector_init(&sdvo_connector->base) < 0) {
+ kfree(sdvo_connector);
+ return NULL;
+ }
+
+ return sdvo_connector;
+}
+
static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
@@ -2436,7 +2453,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
DRM_DEBUG_KMS("initialising DVI device %d\n", device);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
@@ -2490,7 +2507,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
DRM_DEBUG_KMS("initialising TV type %d\n", type);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
@@ -2569,7 +2586,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9c5451c97942..a4c0a04b5044 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -98,7 +98,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
if (min <= 0 || max <= 0)
return false;
- if (WARN_ON(drm_vblank_get(dev, pipe)))
+ if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
return false;
local_irq_disable();
@@ -132,7 +132,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
finish_wait(wq, &wait);
- drm_vblank_put(dev, pipe);
+ drm_crtc_vblank_put(&crtc->base);
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
@@ -179,7 +179,7 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
static void
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -187,23 +187,16 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- u32 plane_ctl, stride;
+ u32 plane_ctl, stride_div;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ unsigned long surf_addr;
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
-
- /* Mask out pixel format bits in case we change it */
- plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
- plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
- plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
- plane_ctl &= ~PLANE_CTL_TILED_MASK;
- plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
- plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
-
- /* Trickle feed has to be enabled */
- plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
+ plane_ctl = PLANE_CTL_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
@@ -245,39 +238,57 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
BUG();
}
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- stride = fb->pitches[0] >> 6;
+ switch (fb->modifier[0]) {
+ case DRM_FORMAT_MOD_NONE:
break;
- case I915_TILING_X:
+ case I915_FORMAT_MOD_X_TILED:
plane_ctl |= PLANE_CTL_TILED_X;
- stride = fb->pitches[0] >> 9;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ plane_ctl |= PLANE_CTL_TILED_Y;
+ break;
+ case I915_FORMAT_MOD_Yf_TILED:
+ plane_ctl |= PLANE_CTL_TILED_YF;
break;
default:
- BUG();
+ MISSING_CASE(fb->modifier[0]);
}
+
if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
- plane_ctl |= PLANE_CTL_ENABLE;
- plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
-
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
+ stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+ fb->pixel_format);
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
+ if (key->flags) {
+ I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+ I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+ I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+ surf_addr = intel_plane_obj_offset(intel_plane, obj);
+
I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
- I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+ I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div);
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
- I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
@@ -290,73 +301,15 @@ skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- I915_WRITE(PLANE_CTL(pipe, plane),
- I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
+ I915_WRITE(PLANE_CTL(pipe, plane), 0);
/* Activate double buffered register update */
- I915_WRITE(PLANE_CTL(pipe, plane), 0);
- POSTING_READ(PLANE_CTL(pipe, plane));
+ I915_WRITE(PLANE_SURF(pipe, plane), 0);
+ POSTING_READ(PLANE_SURF(pipe, plane));
intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
}
-static int
-skl_update_colorkey(struct drm_plane *drm_plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane;
- u32 plane_ctl;
-
- I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
- I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
- I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
-
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
- plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
- I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
-
- POSTING_READ(PLANE_CTL(pipe, plane));
-
- return 0;
-}
-
-static void
-skl_get_colorkey(struct drm_plane *drm_plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane;
- u32 plane_ctl;
-
- key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
- key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
- key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
-
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
-
- switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
- case PLANE_CTL_KEY_ENABLE_DESTINATION:
- key->flags = I915_SET_COLORKEY_DESTINATION;
- break;
- case PLANE_CTL_KEY_ENABLE_SOURCE:
- key->flags = I915_SET_COLORKEY_SOURCE;
- break;
- default:
- key->flags = I915_SET_COLORKEY_NONE;
- }
-}
-
static void
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
{
@@ -399,7 +352,7 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -408,19 +361,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- sprctl = I915_READ(SPCNTR(pipe, plane));
-
- /* Mask out pixel format bits in case we change it */
- sprctl &= ~SP_PIXFORMAT_MASK;
- sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
- sprctl &= ~SP_TILED;
- sprctl &= ~SP_ROTATE_180;
+ sprctl = SP_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_YUYV:
@@ -474,8 +423,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SP_TILED;
- sprctl |= SP_ENABLE;
-
intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
@@ -503,6 +450,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
+ I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
+ I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SP_SOURCE_KEY;
+
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->pixel_format);
@@ -536,8 +492,8 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
intel_update_primary_plane(intel_crtc);
- I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
- ~SP_ENABLE);
+ I915_WRITE(SPCNTR(pipe, plane), 0);
+
/* Activate double buffered register update */
I915_WRITE(SPSURF(pipe, plane), 0);
@@ -546,61 +502,11 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
-static int
-vlv_update_colorkey(struct drm_plane *dplane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
- u32 sprctl;
-
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- return -EINVAL;
-
- I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
- I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
- I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
-
- sprctl = I915_READ(SPCNTR(pipe, plane));
- sprctl &= ~SP_SOURCE_KEY;
- if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SP_SOURCE_KEY;
- I915_WRITE(SPCNTR(pipe, plane), sprctl);
-
- POSTING_READ(SPKEYMSK(pipe, plane));
-
- return 0;
-}
-
-static void
-vlv_get_colorkey(struct drm_plane *dplane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
- u32 sprctl;
-
- key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
- key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
- key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
-
- sprctl = I915_READ(SPCNTR(pipe, plane));
- if (sprctl & SP_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
-}
static void
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -609,19 +515,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_plane->pipe;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- sprctl = I915_READ(SPRCTL(pipe));
-
- /* Mask out pixel format bits in case we change it */
- sprctl &= ~SPRITE_PIXFORMAT_MASK;
- sprctl &= ~SPRITE_RGB_ORDER_RGBX;
- sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
- sprctl &= ~SPRITE_TILED;
- sprctl &= ~SPRITE_ROTATE_180;
+ sprctl = SPRITE_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -660,8 +561,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- sprctl |= SPRITE_ENABLE;
-
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
@@ -698,6 +597,17 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(SPRKEYVAL(pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(pipe), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -739,73 +649,12 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
I915_WRITE(SPRSURF(pipe), 0);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- /*
- * Avoid underruns when disabling the sprite.
- * FIXME remove once watermark updates are done properly.
- */
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
-}
-
-static int
-ivb_update_colorkey(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 sprctl;
- int ret = 0;
-
- intel_plane = to_intel_plane(plane);
-
- I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
- I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
- I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
-
- sprctl = I915_READ(SPRCTL(intel_plane->pipe));
- sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- sprctl |= SPRITE_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SPRITE_SOURCE_KEY;
- I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
-
- POSTING_READ(SPRKEYMSK(intel_plane->pipe));
-
- return ret;
-}
-
-static void
-ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 sprctl;
-
- intel_plane = to_intel_plane(plane);
-
- key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
- key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
- key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
- key->flags = 0;
-
- sprctl = I915_READ(SPRCTL(intel_plane->pipe));
-
- if (sprctl & SPRITE_DEST_KEY)
- key->flags = I915_SET_COLORKEY_DESTINATION;
- else if (sprctl & SPRITE_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
}
static void
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -814,19 +663,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- dvscntr = I915_READ(DVSCNTR(pipe));
-
- /* Mask out pixel format bits in case we change it */
- dvscntr &= ~DVS_PIXFORMAT_MASK;
- dvscntr &= ~DVS_RGB_ORDER_XBGR;
- dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
- dvscntr &= ~DVS_TILED;
- dvscntr &= ~DVS_ROTATE_180;
+ dvscntr = DVS_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -862,7 +706,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (IS_GEN6(dev))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
- dvscntr |= DVS_ENABLE;
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
pixel_size, true,
@@ -894,6 +737,17 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(DVSKEYVAL(pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(pipe), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -922,20 +776,14 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
intel_update_primary_plane(intel_crtc);
- I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ I915_WRITE(DVSCNTR(pipe), 0);
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
+
/* Flush double buffered register updates */
I915_WRITE(DVSSURF(pipe), 0);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- /*
- * Avoid underruns when disabling the sprite.
- * FIXME remove once watermark updates are done properly.
- */
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
}
/**
@@ -993,7 +841,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
mutex_lock(&dev->struct_mutex);
- if (dev_priv->fbc.plane == intel_crtc->plane)
+ if (dev_priv->fbc.crtc == intel_crtc)
intel_fbc_disable(dev);
mutex_unlock(&dev->struct_mutex);
@@ -1006,67 +854,9 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
hsw_disable_ips(intel_crtc);
}
-static int
-ilk_update_colorkey(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 dvscntr;
- int ret = 0;
-
- intel_plane = to_intel_plane(plane);
-
- I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
- I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
- I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
-
- dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
- dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- dvscntr |= DVS_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- dvscntr |= DVS_SOURCE_KEY;
- I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
-
- POSTING_READ(DVSKEYMSK(intel_plane->pipe));
-
- return ret;
-}
-
-static void
-ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 dvscntr;
-
- intel_plane = to_intel_plane(plane);
-
- key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
- key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
- key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
- key->flags = 0;
-
- dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
-
- if (dvscntr & DVS_DEST_KEY)
- key->flags = I915_SET_COLORKEY_DESTINATION;
- else if (dvscntr & DVS_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
-}
-
static bool colorkey_enabled(struct intel_plane *intel_plane)
{
- struct drm_intel_sprite_colorkey key;
-
- intel_plane->get_colorkey(&intel_plane->base, &key);
-
- return key.flags != I915_SET_COLORKEY_NONE;
+ return intel_plane->ckey.flags != I915_SET_COLORKEY_NONE;
}
static int
@@ -1076,7 +866,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
@@ -1106,16 +895,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
return -EINVAL;
}
- /* Sprite planes can be linear or x-tiled surfaces */
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- case I915_TILING_X:
- break;
- default:
- DRM_DEBUG_KMS("Unsupported tiling mode\n");
- return -EINVAL;
- }
-
/*
* FIXME the following code does a bunch of fuzzy adjustments to the
* coordinates and sizes. We probably need some way to decide whether
@@ -1259,6 +1038,19 @@ finish:
if (!intel_crtc->primary_enabled && !state->hides_primary)
intel_crtc->atomic.post_enable_primary = true;
+
+ if (intel_wm_need_update(plane, &state->base))
+ intel_crtc->atomic.update_wm = true;
+
+ if (!state->visible) {
+ /*
+ * Avoid underruns when disabling the sprite.
+ * FIXME remove once watermark updates are done properly.
+ */
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_sprite_watermarks |=
+ (1 << drm_plane_index(plane));
+ }
}
return 0;
@@ -1272,7 +1064,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
@@ -1280,8 +1071,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
- plane->fb = state->base.fb;
- intel_plane->obj = obj;
+ plane->fb = fb;
if (intel_crtc->active) {
intel_crtc->primary_enabled = !state->hides_primary;
@@ -1295,7 +1085,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
src_y = state->src.y1;
src_w = drm_rect_width(&state->src);
src_h = drm_rect_height(&state->src);
- intel_plane->update_plane(plane, crtc, fb, obj,
+ intel_plane->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
} else {
@@ -1312,13 +1102,14 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct intel_plane *intel_plane;
int ret = 0;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
+ if (IS_VALLEYVIEW(dev) &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, set->plane_id);
@@ -1328,34 +1119,15 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
}
intel_plane = to_intel_plane(plane);
- ret = intel_plane->update_colorkey(plane, set);
-
-out_unlock:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-
-int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_intel_sprite_colorkey *get = data;
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- drm_modeset_lock_all(dev);
-
- plane = drm_plane_find(dev, get->plane_id);
- if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ intel_plane->ckey = *set;
- intel_plane = to_intel_plane(plane);
- intel_plane->get_colorkey(plane, get);
+ /*
+ * The only way this could fail would be due to
+ * the current plane state being unsupportable already,
+ * and we dont't consider that an error for the
+ * colorkey ioctl. So just ignore any error.
+ */
+ intel_plane_restore(plane);
out_unlock:
drm_modeset_unlock_all(dev);
@@ -1364,10 +1136,10 @@ out_unlock:
int intel_plane_restore(struct drm_plane *plane)
{
- if (!plane->crtc || !plane->fb)
+ if (!plane->crtc || !plane->state->fb)
return 0;
- return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
+ return plane->funcs->update_plane(plane, plane->crtc, plane->state->fb,
plane->state->crtc_x, plane->state->crtc_y,
plane->state->crtc_w, plane->state->crtc_h,
plane->state->src_x, plane->state->src_y,
@@ -1448,8 +1220,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
- intel_plane->update_colorkey = ilk_update_colorkey;
- intel_plane->get_colorkey = ilk_get_colorkey;
if (IS_GEN6(dev)) {
plane_formats = snb_plane_formats;
@@ -1473,16 +1243,12 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (IS_VALLEYVIEW(dev)) {
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
- intel_plane->update_colorkey = vlv_update_colorkey;
- intel_plane->get_colorkey = vlv_get_colorkey;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
} else {
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
- intel_plane->update_colorkey = ivb_update_colorkey;
- intel_plane->get_colorkey = ivb_get_colorkey;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1497,8 +1263,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 1;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
- intel_plane->update_colorkey = skl_update_colorkey;
- intel_plane->get_colorkey = skl_get_colorkey;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 892d23c8479d..8b9d325bda3c 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1332,7 +1332,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(connector, &tmp);
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
status = type < 0 ?
connector_status_disconnected :
connector_status_connected;
@@ -1516,6 +1516,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
@@ -1620,7 +1621,7 @@ intel_tv_init(struct drm_device *dev)
return;
}
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_tv);
return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4e8fb891d4ea..ab5cc94588e1 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -23,6 +23,7 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "i915_vgpu.h"
#include <linux/pm_runtime.h>
@@ -210,6 +211,13 @@ static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
gen6_gt_check_fifodbg(dev_priv);
}
+static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
+{
+ u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
+
+ return count & GT_FIFO_FREE_ENTRIES_MASK;
+}
+
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
@@ -217,16 +225,15 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
if (IS_VALLEYVIEW(dev_priv->dev))
- dev_priv->uncore.fifo_count =
- __raw_i915_read32(dev_priv, GTFIFOCTL) &
- GT_FIFO_FREE_ENTRIES_MASK;
+ dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
- u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
+ u32 fifo = fifo_free_entries(dev_priv);
+
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
- fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
+ fifo = fifo_free_entries(dev_priv);
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
@@ -314,8 +321,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (IS_GEN6(dev) || IS_GEN7(dev))
dev_priv->uncore.fifo_count =
- __raw_i915_read32(dev_priv, GTFIFOCTL) &
- GT_FIFO_FREE_ENTRIES_MASK;
+ fifo_free_entries(dev_priv);
}
if (!restore)
@@ -328,8 +334,9 @@ static void intel_uncore_ellc_detect(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
- (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
+ if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
+ INTEL_INFO(dev)->gen >= 9) &&
+ (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
@@ -550,18 +557,24 @@ hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
when, op, reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ i915.mmio_debug--; /* Only report the first N failures */
}
}
static void
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
{
- if (i915.mmio_debug)
+ static bool mmio_debug_once = true;
+
+ if (i915.mmio_debug || !mmio_debug_once)
return;
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
+ DRM_DEBUG("Unclaimed register detected, "
+ "enabling oneshot unclaimed register reporting. "
+ "Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ i915.mmio_debug = mmio_debug_once--;
}
}
@@ -640,6 +653,14 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}
+#define __vgpu_read(x) \
+static u##x \
+vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ GEN6_READ_HEADER(x); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ GEN6_READ_FOOTER; \
+}
+
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
@@ -703,6 +724,10 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
GEN6_READ_FOOTER; \
}
+__vgpu_read(8)
+__vgpu_read(16)
+__vgpu_read(32)
+__vgpu_read(64)
__gen9_read(8)
__gen9_read(16)
__gen9_read(32)
@@ -724,6 +749,7 @@ __gen6_read(64)
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
+#undef __vgpu_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -807,6 +833,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
GEN6_WRITE_FOOTER; \
}
+#define __vgpu_write(x) \
+static void vgpu_write##x(struct drm_i915_private *dev_priv, \
+ off_t reg, u##x val, bool trace) { \
+ GEN6_WRITE_HEADER; \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ GEN6_WRITE_FOOTER; \
+}
+
static const u32 gen8_shadowed_regs[] = {
FORCEWAKE_MT,
GEN6_RPNSWREQ,
@@ -924,12 +958,17 @@ __gen6_write(8)
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
+__vgpu_write(8)
+__vgpu_write(16)
+__vgpu_write(32)
+__vgpu_write(64)
#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
+#undef __vgpu_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
@@ -972,6 +1011,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
d->val_set = FORCEWAKE_KERNEL;
d->val_clear = 0;
} else {
+ /* WaRsClearFWBitsAtReset:bdw,skl */
d->val_reset = _MASKED_BIT_DISABLE(0xffff);
d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
@@ -1088,6 +1128,8 @@ void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ i915_check_vgpu(dev);
+
intel_uncore_ellc_detect(dev);
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
@@ -1136,6 +1178,11 @@ void intel_uncore_init(struct drm_device *dev)
break;
}
+ if (intel_vgpu_active(dev)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
+ ASSIGN_READ_MMIO_VFUNCS(vgpu);
+ }
+
i915_check_and_clear_faults(dev);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS