aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_context.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_context.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c64
1 files changed, 40 insertions, 24 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 403309c2a7d6..72a3df32292f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -73,7 +73,7 @@
*
* There are two confusing terms used above:
* The "current context" means the context which is currently running on the
- * GPU. The GPU has loaded it's state already and has stored away the gtt
+ * GPU. The GPU has loaded its state already and has stored away the gtt
* offset of the BO. The GPU is not actively referencing the data at this
* offset, but it will on the next context switch. The only way to avoid this
* is to do a GPU reset.
@@ -117,6 +117,9 @@ static int get_context_size(struct drm_device *dev)
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
+ case 8:
+ ret = GEN8_CXT_TOTAL_SIZE;
+ break;
default:
BUG();
}
@@ -129,6 +132,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
+ list_del(&ctx->link);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
}
@@ -147,6 +151,7 @@ create_hw_context(struct drm_device *dev,
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+ INIT_LIST_HEAD(&ctx->link);
if (ctx->obj == NULL) {
kfree(ctx);
DRM_DEBUG_DRIVER("Context object allocated failed\n");
@@ -166,6 +171,7 @@ create_hw_context(struct drm_device *dev,
* assertion in the context switch code.
*/
ctx->ring = &dev_priv->ring[RCS];
+ list_add_tail(&ctx->link, &dev_priv->context_list);
/* Default context will never have a file_priv */
if (file_priv == NULL)
@@ -178,6 +184,10 @@ create_hw_context(struct drm_device *dev,
ctx->file_priv = file_priv;
ctx->id = ret;
+ /* NB: Mark all slices as needing a remap so that when the context first
+ * loads it will restore whatever remap state already exists. If there
+ * is no remap info, it will be a NOP. */
+ ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
return ctx;
@@ -213,7 +223,6 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* may not be available. To avoid this we always pin the
* default context.
*/
- dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -226,6 +235,8 @@ static int create_default_context(struct drm_i915_private *dev_priv)
goto err_unpin;
}
+ dev_priv->ring[RCS].default_context = ctx;
+
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
@@ -281,16 +292,24 @@ void i915_gem_context_fini(struct drm_device *dev)
* other code, leading to spurious errors. */
intel_gpu_reset(dev);
- i915_gem_object_unpin(dctx->obj);
-
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
- drm_gem_object_unreference(&dctx->obj->base);
+ WARN_ON(!dev_priv->ring[RCS].last_context);
+ if (dev_priv->ring[RCS].last_context == dctx) {
+ /* Fake switch to NULL context */
+ WARN_ON(dctx->obj->active);
+ i915_gem_object_unpin(dctx->obj);
+ i915_gem_context_unreference(dctx);
+ }
+
+ i915_gem_object_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
+ dev_priv->ring[RCS].default_context = NULL;
+ dev_priv->ring[RCS].last_context = NULL;
}
static int context_idr_cleanup(int id, void *p, void *data)
@@ -393,11 +412,11 @@ static int do_switch(struct i915_hw_context *to)
struct intel_ring_buffer *ring = to->ring;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
- int ret;
+ int ret, i;
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
- if (from == to)
+ if (from == to && !to->remap_slice)
return 0;
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -420,8 +439,6 @@ static int do_switch(struct i915_hw_context *to)
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
- else if (WARN_ON_ONCE(from == to)) /* not yet expected */
- hw_flags |= MI_FORCE_RESTORE;
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
@@ -429,6 +446,18 @@ static int do_switch(struct i915_hw_context *to)
return ret;
}
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(to->remap_slice & (1<<i)))
+ continue;
+
+ ret = i915_gem_l3_remap(ring, i);
+ /* If it failed, try again next round */
+ if (ret)
+ DRM_DEBUG_DRIVER("L3 remapping failed\n");
+ else
+ to->remap_slice &= ~(1<<i);
+ }
+
/* The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. In fact, the below code
@@ -436,11 +465,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
- struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
- i915_gem_object_move_to_active(from->obj, ring);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -451,17 +477,7 @@ static int do_switch(struct i915_hw_context *to)
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
- ret = i915_add_request(ring, NULL);
- if (ret) {
- /* Too late, we've already scheduled a context switch.
- * Try to undo the change so that the hw state is
- * consistent with out tracking. In case of emergency,
- * scream.
- */
- WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
- return ret;
- }
-
+ /* obj is kept alive until the next request by its active ref */
i915_gem_object_unpin(from->obj);
i915_gem_context_unreference(from);
}