aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_execbuffer.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-02-08 12:10:18 +1000
committerDave Airlie <airlied@redhat.com>2013-02-08 12:10:18 +1000
commit6dc1c49da6dd3bf020a66b2a135b9625ac01c2c7 (patch)
tree38a6c5d4896de01449e9d224088ae223161fcd3c /drivers/gpu/drm/i915/i915_gem_execbuffer.c
parentMerge tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel into drm-next (diff)
parentRevert "Revert "console: implement lockdep support for console_lock"" (diff)
downloadlinux-dev-6dc1c49da6dd3bf020a66b2a135b9625ac01c2c7.tar.xz
linux-dev-6dc1c49da6dd3bf020a66b2a135b9625ac01c2c7.zip
Merge branch 'fbcon-locking-fixes' of ssh://people.freedesktop.org/~airlied/linux into drm-next
This pulls in most of Linus tree up to -rc6, this fixes the worst lockdep reported issues and re-enables fbcon lockdep. (not the fbcon maintainer) * 'fbcon-locking-fixes' of ssh://people.freedesktop.org/~airlied/linux: (529 commits) Revert "Revert "console: implement lockdep support for console_lock"" fbcon: fix locking harder fb: Yet another band-aid for fixing lockdep mess fb: rework locking to fix lock ordering on takeover
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 27269103b621..2f2daebd0eef 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -615,6 +615,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
total = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
+ u64 invalid_offset = (u64)-1;
+ int j;
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
@@ -625,6 +627,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
}
+ /* As we do not update the known relocation offsets after
+ * relocating (due to the complexities in lock handling),
+ * we need to mark them as invalid now so that we force the
+ * relocation processing next time. Just in case the target
+ * object is evicted and then rebound into its old
+ * presumed_offset before the next execbuffer - if that
+ * happened we would make the mistake of assuming that the
+ * relocations were valid.
+ */
+ for (j = 0; j < exec[i].relocation_count; j++) {
+ if (copy_to_user(&user_relocs[j].presumed_offset,
+ &invalid_offset,
+ sizeof(invalid_offset))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+ }
+
reloc_offset[i] = total;
total += exec[i].relocation_count;
}