aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c34
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c42
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c54
11 files changed, 47 insertions, 118 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 87275f9883ac..43194fbcbc2e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -160,7 +160,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
- GEM_BUG_ON(i915_gem_object_is_active(obj));
list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index dfebd5706f16..20754c15412a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -159,12 +159,6 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
}
static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
- return READ_ONCE(obj->active_count);
-}
-
-static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
return READ_ONCE(obj->framebuffer_references);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 18bf4f8d6d80..34b51fad02de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -154,7 +154,6 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
atomic_t bind_count;
- unsigned int active_count;
/** Count of how many global VMA are currently pinned for use by HW */
unsigned int pin_global;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 1bbc690494c7..d99f1a600b96 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -229,8 +229,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
continue;
if (!(shrink & I915_SHRINK_ACTIVE) &&
- (i915_gem_object_is_active(obj) ||
- i915_gem_object_is_framebuffer(obj)))
+ (i915_gem_object_is_framebuffer(obj) ||
+ !reservation_object_test_signaled_rcu(obj->base.resv,
+ true)))
continue;
if (!(shrink & I915_SHRINK_BOUND) &&
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 2812f7fa27fe..24a3c677ccd5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -475,15 +475,6 @@ static int igt_mmap_offset_exhaustion(void *arg)
pr_err("[loop %d] Failed to busy the object\n", loop);
goto err_obj;
}
-
- /* NB we rely on the _active_ reference to access obj now */
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- err = create_mmap_offset(obj);
- if (err) {
- pr_err("[loop %d] create_mmap_offset failed with err=%d\n",
- loop, err);
- goto out;
- }
}
out:
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b3e0e25c5d80..ad7638da785d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1486,9 +1486,7 @@ static void execlists_submit_request(struct i915_request *request)
static void __execlists_context_fini(struct intel_context *ce)
{
intel_ring_put(ce->ring);
-
- GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
- i915_gem_object_put(ce->state->obj);
+ i915_vma_put(ce->state);
}
static void execlists_context_destroy(struct kref *kref)
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index d65b8cba1a8f..c9337e4b5ee0 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1327,7 +1327,6 @@ void intel_ring_free(struct kref *ref)
static void __ring_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 0dc3896e49f5..3ceb397c8645 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -131,35 +131,29 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
+ struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
+ void *vaddr;
u32 *batch;
int err;
- h->gt = engine->gt;
-
- if (i915_gem_object_is_active(h->obj)) {
- struct drm_i915_gem_object *obj;
- void *vaddr;
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vaddr = i915_gem_object_pin_map(obj,
- i915_coherent_map_type(i915));
- if (IS_ERR(vaddr)) {
- i915_gem_object_put(obj);
- return ERR_CAST(vaddr);
- }
+ vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(i915));
+ if (IS_ERR(vaddr)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(vaddr);
+ }
- i915_gem_object_unpin_map(h->obj);
- i915_gem_object_put(h->obj);
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
- h->obj = obj;
- h->batch = vaddr;
- }
+ h->obj = obj;
+ h->batch = vaddr;
vma = i915_vma_instance(h->obj, vm, NULL);
if (IS_ERR(vma))
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 62cf34db9280..eeecdad0e3ca 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -75,11 +75,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static char get_active_flag(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_is_active(obj) ? '*' : ' ';
-}
-
static char get_pin_flag(struct drm_i915_gem_object *obj)
{
return obj->pin_global ? 'p' : ' ';
@@ -144,9 +139,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
unsigned int frontbuffer_bits;
int pin_count = 0;
- seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
+ seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
- get_active_flag(obj),
get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 25a3e4d09a2f..b17f23991253 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -94,34 +94,26 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
list = &pool->cache_list[n];
list_for_each_entry(obj, list, batch_pool_link) {
+ struct reservation_object *resv = obj->base.resv;
+
/* The batches are strictly LRU ordered */
- if (i915_gem_object_is_active(obj)) {
- struct reservation_object *resv = obj->base.resv;
-
- if (!reservation_object_test_signaled_rcu(resv, true))
- break;
-
- i915_retire_requests(pool->engine->i915);
- GEM_BUG_ON(i915_gem_object_is_active(obj));
-
- /*
- * The object is now idle, clear the array of shared
- * fences before we add a new request. Although, we
- * remain on the same engine, we may be on a different
- * timeline and so may continually grow the array,
- * trapping a reference to all the old fences, rather
- * than replace the existing fence.
- */
- if (rcu_access_pointer(resv->fence)) {
- reservation_object_lock(resv, NULL);
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
+ if (!reservation_object_test_signaled_rcu(resv, true))
+ break;
+
+ /*
+ * The object is now idle, clear the array of shared
+ * fences before we add a new request. Although, we
+ * remain on the same engine, we may be on a different
+ * timeline and so may continually grow the array,
+ * trapping a reference to all the old fences, rather
+ * than replace the existing fence.
+ */
+ if (rcu_access_pointer(resv->fence)) {
+ reservation_object_lock(resv, NULL);
+ reservation_object_add_excl_fence(resv, NULL);
+ reservation_object_unlock(resv);
}
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
- true));
-
if (obj->base.size >= size)
goto found;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 503f1180af12..c13b86e6ef1f 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -78,43 +78,11 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
#endif
-static void obj_bump_mru(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
- obj->mm.dirty = true; /* be paranoid */
-}
-
static void __i915_vma_retire(struct i915_active *ref)
{
struct i915_vma *vma = container_of(ref, typeof(*vma), active);
- struct drm_i915_gem_object *obj = vma->obj;
-
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- if (--obj->active_count)
- return;
-
- /* Prune the shared fence arrays iff completely idle (inc. external) */
- if (reservation_object_trylock(obj->base.resv)) {
- if (reservation_object_test_signaled_rcu(obj->base.resv, true))
- reservation_object_add_excl_fence(obj->base.resv, NULL);
- reservation_object_unlock(obj->base.resv);
- }
- /*
- * Bump our place on the bound list to keep it roughly in LRU order
- * so that we don't steal from recently used but inactive objects
- * (unless we are forced to ofc!)
- */
- if (i915_gem_object_is_shrinkable(obj))
- obj_bump_mru(obj);
-
- i915_gem_object_put(obj); /* and drop the active reference */
+ i915_vma_put(vma);
}
static struct i915_vma *
@@ -922,6 +890,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
+ int err;
assert_vma_held(vma);
assert_object_held(obj);
@@ -935,17 +904,13 @@ int i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!vma->active.count && !obj->active_count++)
- i915_gem_object_get(obj); /* once more for the active ref */
-
- if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
- if (!vma->active.count && !--obj->active_count)
- i915_gem_object_put(obj);
- return -ENOMEM;
- }
+ if (i915_active_acquire(&vma->active))
+ i915_vma_get(vma);
- GEM_BUG_ON(!i915_vma_is_active(vma));
- GEM_BUG_ON(!obj->active_count);
+ err = i915_active_ref(&vma->active, rq->fence.context, rq);
+ i915_active_release(&vma->active);
+ if (unlikely(err))
+ return err;
obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) {
@@ -957,11 +922,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
obj->read_domains = 0;
}
obj->read_domains |= I915_GEM_GPU_DOMAINS;
+ obj->mm.dirty = true;
if (flags & EXEC_OBJECT_NEEDS_FENCE)
__i915_active_request_set(&vma->last_fence, rq);
export_fence(vma, rq, flags);
+
+ GEM_BUG_ON(!i915_vma_is_active(vma));
return 0;
}