aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem_submit.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_submit.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c157
1 files changed, 100 insertions, 57 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index aa5c60a7132d..d04c349d8112 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -42,6 +42,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (!submit)
return NULL;
+ kref_init(&submit->ref);
submit->dev = dev;
submit->aspace = queue->ctx->aspace;
submit->gpu = gpu;
@@ -60,13 +61,19 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return submit;
}
-void msm_gem_submit_free(struct msm_gem_submit *submit)
+void __msm_gem_submit_destroy(struct kref *kref)
{
+ struct msm_gem_submit *submit =
+ container_of(kref, struct msm_gem_submit, ref);
+ unsigned i;
+
dma_fence_put(submit->fence);
- list_del(&submit->node);
put_pid(submit->pid);
msm_submitqueue_put(submit->queue);
+ for (i = 0; i < submit->nr_cmds; i++)
+ kfree(submit->cmd[i].relocs);
+
kfree(submit);
}
@@ -150,13 +157,73 @@ out:
return ret;
}
+static int submit_lookup_cmds(struct msm_gem_submit *submit,
+ struct drm_msm_gem_submit *args, struct drm_file *file)
+{
+ unsigned i, sz;
+ int ret = 0;
+
+ for (i = 0; i < args->nr_cmds; i++) {
+ struct drm_msm_gem_submit_cmd submit_cmd;
+ void __user *userptr =
+ u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
+
+ ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* validate input from userspace: */
+ switch (submit_cmd.type) {
+ case MSM_SUBMIT_CMD_BUF:
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ break;
+ default:
+ DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
+ return -EINVAL;
+ }
+
+ if (submit_cmd.size % 4) {
+ DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
+ submit_cmd.size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ submit->cmd[i].type = submit_cmd.type;
+ submit->cmd[i].size = submit_cmd.size / 4;
+ submit->cmd[i].offset = submit_cmd.submit_offset / 4;
+ submit->cmd[i].idx = submit_cmd.submit_idx;
+ submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
+
+ sz = array_size(submit_cmd.nr_relocs,
+ sizeof(struct drm_msm_gem_submit_reloc));
+ /* check for overflow: */
+ if (sz == SIZE_MAX) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
+ ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
int i, bool backoff)
{
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
+ msm_gem_unpin_iova_locked(&msm_obj->base, submit->aspace);
if (submit->bos[i].flags & BO_LOCKED)
dma_resv_unlock(msm_obj->base.resv);
@@ -259,7 +326,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
uint64_t iova;
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_and_pin_iova(&msm_obj->base,
+ ret = msm_gem_get_and_pin_iova_locked(&msm_obj->base,
submit->aspace, &iova);
if (ret)
@@ -301,7 +368,7 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
/* process the reloc's and patch up the cmdstream as needed: */
static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
- uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
+ uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
{
uint32_t i, last_offset = 0;
uint32_t *ptr;
@@ -318,7 +385,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_get_vaddr(&obj->base);
+ ptr = msm_gem_get_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -327,18 +394,11 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
}
for (i = 0; i < nr_relocs; i++) {
- struct drm_msm_gem_submit_reloc submit_reloc;
- void __user *userptr =
- u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
+ struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
uint32_t off;
uint64_t iova;
bool valid;
- if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
- ret = -EFAULT;
- goto out;
- }
-
if (submit_reloc.submit_offset % 4) {
DRM_ERROR("non-aligned reloc offset: %u\n",
submit_reloc.submit_offset);
@@ -376,7 +436,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
}
out:
- msm_gem_put_vaddr(&obj->base);
+ msm_gem_put_vaddr_locked(&obj->base);
return ret;
}
@@ -692,7 +752,20 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = submit_lookup_objects(submit, args, file);
if (ret)
- goto out;
+ goto out_pre_pm;
+
+ ret = submit_lookup_cmds(submit, args, file);
+ if (ret)
+ goto out_pre_pm;
+
+ /*
+ * Thanks to dev_pm_opp opp_table_lock interactions with mm->mmap_sem
+ * in the resume path, we need to to rpm get before we lock objs.
+ * Which unfortunately might involve powering up the GPU sooner than
+ * is necessary. But at least in the explicit fencing case, we will
+ * have already done all the fence waiting.
+ */
+ pm_runtime_get_sync(&gpu->pdev->dev);
/* copy_*_user while holding a ww ticket upsets lockdep */
ww_acquire_init(&submit->ticket, &reservation_ww_class);
@@ -710,60 +783,29 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
for (i = 0; i < args->nr_cmds; i++) {
- struct drm_msm_gem_submit_cmd submit_cmd;
- void __user *userptr =
- u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
uint64_t iova;
- ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
-
- /* validate input from userspace: */
- switch (submit_cmd.type) {
- case MSM_SUBMIT_CMD_BUF:
- case MSM_SUBMIT_CMD_IB_TARGET_BUF:
- case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- break;
- default:
- DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
- ret = -EINVAL;
- goto out;
- }
-
- ret = submit_bo(submit, submit_cmd.submit_idx,
+ ret = submit_bo(submit, submit->cmd[i].idx,
&msm_obj, &iova, NULL);
if (ret)
goto out;
- if (submit_cmd.size % 4) {
- DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
- submit_cmd.size);
+ if (!submit->cmd[i].size ||
+ ((submit->cmd[i].size + submit->cmd[i].offset) >
+ msm_obj->base.size / 4)) {
+ DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
ret = -EINVAL;
goto out;
}
- if (!submit_cmd.size ||
- ((submit_cmd.size + submit_cmd.submit_offset) >
- msm_obj->base.size)) {
- DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
- ret = -EINVAL;
- goto out;
- }
-
- submit->cmd[i].type = submit_cmd.type;
- submit->cmd[i].size = submit_cmd.size / 4;
- submit->cmd[i].iova = iova + submit_cmd.submit_offset;
- submit->cmd[i].idx = submit_cmd.submit_idx;
+ submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
if (submit->valid)
continue;
- ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
- submit_cmd.nr_relocs, submit_cmd.relocs);
+ ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4,
+ submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
if (ret)
goto out;
}
@@ -800,11 +842,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
+ pm_runtime_put(&gpu->pdev->dev);
+out_pre_pm:
submit_cleanup(submit);
if (has_ww_ticket)
ww_acquire_fini(&submit->ticket);
- if (ret)
- msm_gem_submit_free(submit);
+ msm_gem_submit_put(submit);
out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);