aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_bo.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-09-04 20:58:45 +0200
committerAlex Deucher <alexander.deucher@amd.com>2017-09-12 14:23:41 -0400
commit378e2d5b504fe0231c557751e58b80fcf717cc20 (patch)
tree43adcb89b887991c2837558a469bd6a4c0d0726f /drivers/gpu/drm/ttm/ttm_bo.c
parentdrm/amdgpu: revert "fix deadlock of reservation between cs and gpu reset v2" (diff)
downloadlinux-dev-378e2d5b504fe0231c557751e58b80fcf717cc20.tar.xz
linux-dev-378e2d5b504fe0231c557751e58b80fcf717cc20.zip
drm/ttm: fix ttm_bo_cleanup_refs_or_queue once more
With shared reservation objects __ttm_bo_reserve() can easily fail even on destroyed BOs. This prevents correct handling when we need to individualize the reservation object. Fix this by individualizing the object before even trying to reserve it. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 180ce6296416..bee77d31895b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -440,28 +440,29 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
struct ttm_bo_global *glob = bo->glob;
int ret;
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle
+ */
+ reservation_object_wait_timeout_rcu(bo->resv, true, false,
+ 30 * HZ);
+ spin_lock(&glob->lru_lock);
+ goto error;
+ }
+
spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, NULL);
-
if (!ret) {
- if (!ttm_bo_wait(bo, false, true)) {
+ if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
+ if (bo->resv != &bo->ttm_resv)
+ reservation_object_unlock(&bo->ttm_resv);
ttm_bo_cleanup_memtype_use(bo);
-
return;
}
- ret = ttm_bo_individualize_resv(bo);
- if (ret) {
- /* Last resort, if we fail to allocate memory for the
- * fences block for the BO to become idle and free it.
- */
- spin_unlock(&glob->lru_lock);
- ttm_bo_wait(bo, true, true);
- ttm_bo_cleanup_memtype_use(bo);
- return;
- }
ttm_bo_flush_all_fences(bo);
/*
@@ -474,11 +475,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
- if (bo->resv != &bo->ttm_resv)
- reservation_object_unlock(&bo->ttm_resv);
__ttm_bo_unreserve(bo);
}
+ if (bo->resv != &bo->ttm_resv)
+ reservation_object_unlock(&bo->ttm_resv);
+error:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);