aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-05-28 09:24:27 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2020-05-28 16:55:15 +0100
commitbffa18dd0bca90112746bafd333386c71fe55efe (patch)
treeca4dd36ead9e5dc0f5d9e4506e23249940260811 /drivers/gpu/drm/i915
parentdrm/i915/gt: Restore both GGTT bindings on resume (diff)
downloadlinux-dev-bffa18dd0bca90112746bafd333386c71fe55efe.tar.xz
linux-dev-bffa18dd0bca90112746bafd333386c71fe55efe.zip
drm/i915/gt: Remove local entries from GGTT on suspend
Across suspend/resume, we clear the entire GGTT and rebuild from scratch. In particular, we want to only preserve the global entries for use by the HW, and delay reinstating the local binds until required by the user. This means that we can evict any local binds in the global GTT, saving any time in preserving their state, as they will be rebound on demand. References: https://gitlab.freedesktop.org/drm/intel/-/issues/1947 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200528082427.21402-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c23
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c59
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h1
3 files changed, 54 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index ffe285b0b3bd..323c328d444a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -108,13 +108,32 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
- struct i915_vma *vma;
+ struct i915_vma *vma, *vn;
+ int open;
+
+ mutex_lock(&ggtt->vm.mutex);
+
+ /* Skip rewriting PTE on VMA unbind. */
+ open = atomic_xchg(&ggtt->vm.open, 0);
- list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
i915_vma_wait_for_bind(vma);
+ if (i915_vma_is_pinned(vma))
+ continue;
+
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ __i915_vma_evict(vma);
+ drm_mm_remove_node(&vma->node);
+ }
+ }
+
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
ggtt->invalidate(ggtt);
+ atomic_set(&ggtt->vm.open, open);
+
+ mutex_unlock(&ggtt->vm.mutex);
intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 22198b758459..9b30ddc49e4b 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1229,31 +1229,9 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return 0;
}
-int __i915_vma_unbind(struct i915_vma *vma)
+void __i915_vma_evict(struct i915_vma *vma)
{
- int ret;
-
- lockdep_assert_held(&vma->vm->mutex);
-
- if (i915_vma_is_pinned(vma)) {
- vma_print_allocator(vma, "is pinned");
- return -EAGAIN;
- }
-
- /*
- * After confirming that no one else is pinning this vma, wait for
- * any laggards who may have crept in during the wait (through
- * a residual pin skipping the vm->mutex) to complete.
- */
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
-
- if (!drm_mm_node_allocated(&vma->node))
- return 0;
-
GEM_BUG_ON(i915_vma_is_pinned(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_map_and_fenceable(vma)) {
/* Force a pagefault for domain tracking on next user access */
@@ -1292,6 +1270,33 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_detach(vma);
vma_unbind_pages(vma);
+}
+
+int __i915_vma_unbind(struct i915_vma *vma)
+{
+ int ret;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
+ return -EAGAIN;
+ }
+
+ /*
+ * After confirming that no one else is pinning this vma, wait for
+ * any laggards who may have crept in during the wait (through
+ * a residual pin skipping the vm->mutex) to complete.
+ */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ __i915_vma_evict(vma);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
@@ -1303,13 +1308,13 @@ int i915_vma_unbind(struct i915_vma *vma)
intel_wakeref_t wakeref = 0;
int err;
- if (!drm_mm_node_allocated(&vma->node))
- return 0;
-
/* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma);
if (err)
- goto out_rpm;
+ return err;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 8ad1daabcd58..d0d01f909548 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -203,6 +203,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+void __i915_vma_evict(struct i915_vma *vma);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);