aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_cs.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c111
1 files changed, 58 insertions, 53 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 142f89462aa4..b4a0db24f4dd 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -115,36 +115,20 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
-static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
+static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
- bool sync_to_ring[RADEON_NUM_RINGS] = { };
- bool need_sync = false;
- int i, r;
+ int i;
for (i = 0; i < p->nrelocs; i++) {
- struct radeon_fence *fence;
+ struct radeon_fence *a, *b;
if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
continue;
- fence = p->relocs[i].robj->tbo.sync_obj;
- if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
- sync_to_ring[fence->ring] = true;
- need_sync = true;
- }
- }
-
- if (!need_sync) {
- return 0;
- }
-
- r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
- if (r) {
- return r;
+ a = p->relocs[i].robj->tbo.sync_obj;
+ b = p->ib.sync_to[a->ring];
+ p->ib.sync_to[a->ring] = radeon_fence_later(a, b);
}
-
- return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
- sync_to_ring, p->ring);
}
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -294,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return 0;
}
+static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
+ struct radeon_fence *fence)
+{
+ struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_list *lobj;
+
+ if (parser->chunk_ib_idx == -1) {
+ return;
+ }
+ if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
+ return;
+ }
+
+ list_for_each_entry(lobj, &parser->validated, tv.head) {
+ struct radeon_bo_va *bo_va;
+ struct radeon_bo *rbo = lobj->bo;
+
+ bo_va = radeon_bo_va(rbo, vm);
+ radeon_fence_unref(&bo_va->fence);
+ bo_va->fence = radeon_fence_ref(fence);
+ }
+}
+
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
@@ -306,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
- if (!error)
+ if (!error) {
+ /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
+ radeon_bo_vm_fence_va(parser, parser->ib.fence);
ttm_eu_fence_buffer_objects(&parser->validated,
parser->ib.fence);
- else
+ } else {
ttm_eu_backoff_reservation(&parser->validated);
+ }
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
@@ -368,16 +379,13 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Invalid command stream !\n");
return r;
}
- r = radeon_cs_sync_rings(parser);
- if (r) {
- DRM_ERROR("Failed to synchronize rings !\n");
- }
+ radeon_cs_sync_rings(parser);
parser->ib.vm_id = 0;
- r = radeon_ib_schedule(rdev, &parser->ib);
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
- return 0;
+ return r;
}
static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
@@ -407,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (parser->chunk_ib_idx == -1)
return 0;
-
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
@@ -459,6 +466,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return r;
}
+ mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
r = radeon_vm_bind(rdev, vm);
if (r) {
@@ -468,30 +476,26 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (r) {
goto out;
}
- r = radeon_cs_sync_rings(parser);
- if (r) {
- DRM_ERROR("Failed to synchronize rings !\n");
- }
+ radeon_cs_sync_rings(parser);
+
+ parser->ib.vm_id = vm->id;
+ /* ib pool is bind at 0 in virtual address space,
+ * so gpu_addr is the offset inside the pool bo
+ */
+ parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
parser->const_ib.vm_id = vm->id;
- /* ib pool is bind at 0 in virtual address space to gpu_addr is the
- * offset inside the pool bo
+ /* ib pool is bind at 0 in virtual address space,
+ * so gpu_addr is the offset inside the pool bo
*/
parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
- r = radeon_ib_schedule(rdev, &parser->const_ib);
- if (r)
- goto out;
+ r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+ } else {
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL);
}
- parser->ib.vm_id = vm->id;
- /* ib pool is bind at 0 in virtual address space to gpu_addr is the
- * offset inside the pool bo
- */
- parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
- parser->ib.is_const_ib = false;
- r = radeon_ib_schedule(rdev, &parser->ib);
out:
if (!r) {
if (vm->fence) {
@@ -499,7 +503,8 @@ out:
}
vm->fence = radeon_fence_ref(parser->ib.fence);
}
- mutex_unlock(&fpriv->vm.mutex);
+ mutex_unlock(&vm->mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
return r;
}
@@ -519,9 +524,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_cs_parser parser;
int r;
- radeon_mutex_lock(&rdev->cs_mutex);
+ down_read(&rdev->exclusive_lock);
if (!rdev->accel_working) {
- radeon_mutex_unlock(&rdev->cs_mutex);
+ up_read(&rdev->exclusive_lock);
return -EBUSY;
}
/* initialize parser */
@@ -534,8 +539,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_parser_relocs(&parser);
@@ -543,8 +548,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_ib_chunk(rdev, &parser);
@@ -557,8 +562,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
out:
radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}