aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2021-11-23 11:30:35 +0100
committerChristian König <christian.koenig@amd.com>2022-04-07 12:53:54 +0200
commit8bb31587820a6e04cb613b49238b1800d1a97223 (patch)
treebb50afa7a4403e36d5955f48ff44d333ef8e38ce
parentdrm/i915: drop bo->moving dependency (diff)
downloadlinux-dev-8bb31587820a6e04cb613b49238b1800d1a97223.tar.xz
linux-dev-8bb31587820a6e04cb613b49238b1800d1a97223.zip
drm/ttm: remove bo->moving
This is now handled by the DMA-buf framework in the dma_resv obj. Also remove the workaround inside VMWGFX to update the moving fence. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-14-christian.koenig@amd.com
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--include/drm/ttm/ttm_bo_api.h2
9 files changed, 39 insertions, 60 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 5031e26e6716..3dc5ab2764ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2447,6 +2447,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
struct kfd_mem_attachment *attachment;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
total_size += amdgpu_bo_size(bo);
@@ -2461,10 +2463,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
goto validate_map_fail;
}
}
- ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
- if (ret) {
- pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
- goto validate_map_fail;
+ dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL, fence) {
+ ret = amdgpu_sync_fence(&sync_obj, fence);
+ if (ret) {
+ pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+ goto validate_map_fail;
+ }
}
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5832c05ab10d..ef93abec13b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -612,9 +612,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (unlikely(r))
goto fail_unreserve;
- amdgpu_bo_fence(bo, fence, false);
- dma_fence_put(bo->tbo.moving);
- bo->tbo.moving = dma_fence_get(fence);
+ dma_resv_add_fence(bo->tbo.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
}
if (!bp->resv)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index e3fbf0f10add..31913ae86de6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -74,13 +74,12 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
{
unsigned int i;
uint64_t value;
- int r;
+ long r;
- if (vmbo->bo.tbo.moving) {
- r = dma_fence_wait(vmbo->bo.tbo.moving, true);
- if (r)
- return r;
- }
+ r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+ true, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index dbb551762805..bdb44cee19d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -204,14 +204,19 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo = &vmbo->bo;
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
+ struct dma_resv_iter cursor;
unsigned int i, ndw, nptes;
+ struct dma_fence *fence;
uint64_t *pte;
int r;
/* Wait for PD/PT moves to be completed */
- r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving);
- if (r)
- return r;
+ dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL, fence) {
+ r = amdgpu_sync_fence(&p->job->sync, fence);
+ if (r)
+ return r;
+ }
do {
ndw = p->num_dw_left;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 360f980c7e10..015a94f766de 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -418,7 +418,6 @@ static void ttm_bo_release(struct kref *kref)
dma_resv_unlock(bo->base.resv);
atomic_dec(&ttm_glob.bo_count);
- dma_fence_put(bo->moving);
bo->destroy(bo);
}
@@ -714,9 +713,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unpin);
/*
- * Add the last move fence to the BO and reserve a new shared slot. We only use
- * a shared slot to avoid unecessary sync and rely on the subsequent bo move to
- * either stall or use an exclusive fence respectively set bo->moving.
+ * Add the last move fence to the BO as kernel dependency and reserve a new
+ * fence slot.
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man,
@@ -746,9 +744,6 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
dma_fence_put(fence);
return ret;
}
-
- dma_fence_put(bo->moving);
- bo->moving = fence;
return 0;
}
@@ -951,7 +946,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = page_alignment;
- bo->moving = NULL;
bo->pin_count = 0;
bo->sg = sg;
bo->bulk_move = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 99deb45894f4..bc5190340b9c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -228,7 +228,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
atomic_inc(&ttm_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
- fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
kref_init(&fbo->base.kref);
@@ -500,9 +499,6 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
* operation has completed.
*/
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
-
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
@@ -546,9 +542,6 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
spin_unlock(&from->move_lock);
ttm_resource_free(bo, &bo->resource);
-
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
}
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 08ba083a80d2..5b324f245265 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -46,17 +46,13 @@
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
- vm_fault_t ret = 0;
- int err = 0;
-
- if (likely(!bo->moving))
- goto out_unlock;
+ long err = 0;
/*
* Quick non-stalling check for idle.
*/
- if (dma_fence_is_signaled(bo->moving))
- goto out_clear;
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
+ return 0;
/*
* If possible, avoid waiting for GPU with mmap_lock
@@ -64,34 +60,30 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
* is the first attempt.
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
- ret = VM_FAULT_RETRY;
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
- goto out_unlock;
+ return VM_FAULT_RETRY;
ttm_bo_get(bo);
mmap_read_unlock(vmf->vma->vm_mm);
- (void) dma_fence_wait(bo->moving, true);
+ (void)dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
- goto out_unlock;
+ return VM_FAULT_RETRY;
}
/*
* Ordinary wait.
*/
- err = dma_fence_wait(bo->moving, true);
- if (unlikely(err != 0)) {
- ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+ err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(err < 0)) {
+ return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
- goto out_unlock;
}
-out_clear:
- dma_fence_put(bo->moving);
- bo->moving = NULL;
-
-out_unlock:
- return ret;
+ return 0;
}
static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a84d1d5628d0..a7d62a4eb47b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1161,12 +1161,6 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
PAGE_SIZE);
vmw_bo_fence_single(bo, NULL);
- if (bo->moving)
- dma_fence_put(bo->moving);
-
- return dma_resv_get_singleton(bo->base.resv,
- DMA_RESV_USAGE_WRITE,
- &bo->moving);
}
return 0;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index c76932b68a33..2d524f8b0802 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -94,7 +94,6 @@ struct ttm_tt;
* @deleted: True if the object is only a zombie and already deleted.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
- * @moving: Fence set when BO is moving
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
@@ -147,7 +146,6 @@ struct ttm_buffer_object {
* Members protected by a bo reservation.
*/
- struct dma_fence *moving;
unsigned priority;
unsigned pin_count;