aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c309
1 files changed, 174 insertions, 135 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4e6b9501ab0a..5599c01b265d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -27,81 +27,10 @@
#include <linux/pagemap.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
-int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
- u32 ip_instance, u32 ring,
- struct amdgpu_ring **out_ring)
-{
- /* Right now all IPs have only one instance - multiple rings. */
- if (ip_instance != 0) {
- DRM_ERROR("invalid ip instance: %d\n", ip_instance);
- return -EINVAL;
- }
-
- switch (ip_type) {
- default:
- DRM_ERROR("unknown ip type: %d\n", ip_type);
- return -EINVAL;
- case AMDGPU_HW_IP_GFX:
- if (ring < adev->gfx.num_gfx_rings) {
- *out_ring = &adev->gfx.gfx_ring[ring];
- } else {
- DRM_ERROR("only %d gfx rings are supported now\n",
- adev->gfx.num_gfx_rings);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_COMPUTE:
- if (ring < adev->gfx.num_compute_rings) {
- *out_ring = &adev->gfx.compute_ring[ring];
- } else {
- DRM_ERROR("only %d compute rings are supported now\n",
- adev->gfx.num_compute_rings);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_DMA:
- if (ring < adev->sdma.num_instances) {
- *out_ring = &adev->sdma.instance[ring].ring;
- } else {
- DRM_ERROR("only %d SDMA rings are supported\n",
- adev->sdma.num_instances);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_UVD:
- *out_ring = &adev->uvd.ring;
- break;
- case AMDGPU_HW_IP_VCE:
- if (ring < adev->vce.num_rings){
- *out_ring = &adev->vce.ring[ring];
- } else {
- DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- if (ring < adev->uvd.num_enc_rings){
- *out_ring = &adev->uvd.ring_enc[ring];
- } else {
- DRM_ERROR("only %d UVD ENC rings are supported\n",
- adev->uvd.num_enc_rings);
- return -EINVAL;
- }
- break;
- }
-
- if (!(*out_ring && (*out_ring)->adev)) {
- DRM_ERROR("Ring %d is not initialized on IP %d\n",
- ring, ip_type);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *data,
uint32_t *offset)
@@ -135,7 +64,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
return 0;
}
-int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
@@ -194,7 +123,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
size = p->chunks[i].length_dw;
cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
- p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+ p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
ret = -ENOMEM;
i--;
@@ -226,6 +155,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
break;
case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
break;
default:
@@ -247,7 +178,7 @@ free_all_kdata:
i = p->nchunks - 1;
free_partial_kdata:
for (; i >= 0; i--)
- drm_free_large(p->chunks[i].kdata);
+ kvfree(p->chunks[i].kdata);
kfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
@@ -505,7 +436,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
return r;
if (binding_userptr) {
- drm_free_large(lobj->user_pages);
+ kvfree(lobj->user_pages);
lobj->user_pages = NULL;
}
}
@@ -566,12 +497,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
&e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody
- * invalidated it. Free it an try again
+ * invalidated it. Free it and try again
*/
release_pages(e->user_pages,
e->robj->tbo.ttm->num_pages,
false);
- drm_free_large(e->user_pages);
+ kvfree(e->user_pages);
e->user_pages = NULL;
}
@@ -597,12 +528,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_free_pages;
}
- /* Fill the page arrays for all useptrs. */
+ /* Fill the page arrays for all userptrs. */
list_for_each_entry(e, &need_pages, tv.head) {
struct ttm_tt *ttm = e->robj->tbo.ttm;
- e->user_pages = drm_calloc_large(ttm->num_pages,
- sizeof(struct page*));
+ e->user_pages = kvmalloc_array(ttm->num_pages,
+ sizeof(struct page*),
+ GFP_KERNEL | __GFP_ZERO);
if (!e->user_pages) {
r = -ENOMEM;
DRM_ERROR("calloc failure in %s\n", __func__);
@@ -612,7 +544,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
if (r) {
DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
- drm_free_large(e->user_pages);
+ kvfree(e->user_pages);
e->user_pages = NULL;
goto error_free_pages;
}
@@ -708,7 +640,7 @@ error_free_pages:
release_pages(e->user_pages,
e->robj->tbo.ttm->num_pages,
false);
- drm_free_large(e->user_pages);
+ kvfree(e->user_pages);
}
}
@@ -753,6 +685,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
+
+ for (i = 0; i < parser->num_post_dep_syncobjs; i++)
+ drm_syncobj_put(parser->post_dep_syncobjs[i]);
+ kfree(parser->post_dep_syncobjs);
+
dma_fence_put(parser->fence);
if (parser->ctx)
@@ -761,7 +698,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
amdgpu_bo_list_put(parser->bo_list);
for (i = 0; i < parser->nchunks; i++)
- drm_free_large(parser->chunks[i].kdata);
+ kvfree(parser->chunks[i].kdata);
kfree(parser->chunks);
if (parser->job)
amdgpu_job_free(parser->job);
@@ -916,9 +853,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return -EINVAL;
}
- r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
- chunk_ib->ip_instance, chunk_ib->ring,
- &ring);
+ r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
+ chunk_ib->ip_instance, chunk_ib->ring, &ring);
if (r)
return r;
@@ -995,62 +931,148 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *p)
+static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- int i, j, r;
-
- for (i = 0; i < p->nchunks; ++i) {
- struct drm_amdgpu_cs_chunk_dep *deps;
- struct amdgpu_cs_chunk *chunk;
- unsigned num_deps;
+ unsigned num_deps;
+ int i, r;
+ struct drm_amdgpu_cs_chunk_dep *deps;
- chunk = &p->chunks[i];
+ deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_dep);
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
- continue;
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_ring *ring;
+ struct amdgpu_ctx *ctx;
+ struct dma_fence *fence;
- deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_dep);
+ ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
- for (j = 0; j < num_deps; ++j) {
- struct amdgpu_ring *ring;
- struct amdgpu_ctx *ctx;
- struct dma_fence *fence;
+ r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr,
+ deps[i].ip_type,
+ deps[i].ip_instance,
+ deps[i].ring, &ring);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return r;
+ }
- r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
- deps[j].ip_instance,
- deps[j].ring, &ring);
+ fence = amdgpu_ctx_get_fence(ctx, ring,
+ deps[i].handle);
+ if (IS_ERR(fence)) {
+ r = PTR_ERR(fence);
+ amdgpu_ctx_put(ctx);
+ return r;
+ } else if (fence) {
+ r = amdgpu_sync_fence(p->adev, &p->job->sync,
+ fence);
+ dma_fence_put(fence);
+ amdgpu_ctx_put(ctx);
if (r)
return r;
+ }
+ }
+ return 0;
+}
- ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
- if (ctx == NULL)
- return -EINVAL;
+static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
+ uint32_t handle)
+{
+ int r;
+ struct dma_fence *fence;
+ r = drm_syncobj_fence_get(p->filp, handle, &fence);
+ if (r)
+ return r;
- fence = amdgpu_ctx_get_fence(ctx, ring,
- deps[j].handle);
- if (IS_ERR(fence)) {
- r = PTR_ERR(fence);
- amdgpu_ctx_put(ctx);
- return r;
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence);
+ dma_fence_put(fence);
- } else if (fence) {
- r = amdgpu_sync_fence(adev, &p->job->sync,
- fence);
- dma_fence_put(fence);
- amdgpu_ctx_put(ctx);
- if (r)
- return r;
- }
+ return r;
+}
+
+static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ unsigned num_deps;
+ int i, r;
+ struct drm_amdgpu_cs_chunk_sem *deps;
+
+ deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ unsigned num_deps;
+ int i;
+ struct drm_amdgpu_cs_chunk_sem *deps;
+ deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+
+ p->post_dep_syncobjs = kmalloc_array(num_deps,
+ sizeof(struct drm_syncobj *),
+ GFP_KERNEL);
+ p->num_post_dep_syncobjs = 0;
+
+ for (i = 0; i < num_deps; ++i) {
+ p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
+ if (!p->post_dep_syncobjs[i])
+ return -EINVAL;
+ p->num_post_dep_syncobjs++;
+ }
+ return 0;
+}
+
+static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
+ struct amdgpu_cs_parser *p)
+{
+ int i, r;
+
+ for (i = 0; i < p->nchunks; ++i) {
+ struct amdgpu_cs_chunk *chunk;
+
+ chunk = &p->chunks[i];
+
+ if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
+ r = amdgpu_cs_process_fence_dep(p, chunk);
+ if (r)
+ return r;
+ } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
+ r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
+ if (r)
+ return r;
+ } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
+ r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
+ if (r)
+ return r;
}
}
return 0;
}
+static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
+{
+ int i;
+
+ for (i = 0; i < p->num_post_dep_syncobjs; ++i)
+ drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
+}
+
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
@@ -1071,6 +1093,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished);
+
+ amdgpu_cs_post_dependencies(p);
+
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
@@ -1078,13 +1103,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
-
return 0;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
@@ -1092,6 +1117,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!adev->accel_working)
return -EBUSY;
+ if (amdgpu_kms_vram_lost(adev, fpriv))
+ return -ENODEV;
parser.adev = adev;
parser.filp = filp;
@@ -1153,21 +1180,28 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
struct dma_fence *fence;
long r;
- r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
- wait->in.ring, &ring);
- if (r)
- return r;
+ if (amdgpu_kms_vram_lost(adev, fpriv))
+ return -ENODEV;
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
+ r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
+ wait->in.ip_type, wait->in.ip_instance,
+ wait->in.ring, &ring);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return r;
+ }
+
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
if (IS_ERR(fence))
r = PTR_ERR(fence);
@@ -1203,15 +1237,17 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
struct dma_fence *fence;
int r;
- r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
- user->ring, &ring);
- if (r)
- return ERR_PTR(r);
-
ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
if (ctx == NULL)
return ERR_PTR(-EINVAL);
+ r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
+ user->ip_instance, user->ring, &ring);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return ERR_PTR(r);
+ }
+
fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
amdgpu_ctx_put(ctx);
@@ -1332,12 +1368,15 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
+ if (amdgpu_kms_vram_lost(adev, fpriv))
+ return -ENODEV;
/* Get the fences from userspace */
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
GFP_KERNEL);