aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2022-04-11 14:58:39 -0700
committerRob Clark <robdclark@chromium.org>2022-04-21 15:03:12 -0700
commita636a0ff11483a4c158acc31ee7ca083d98a0d13 (patch)
treeff1593b0178b87fd101d67faf58adb2c40b71314 /drivers/gpu/drm/msm/msm_gem.c
parentdrm/msm/gem: Add fenced vma unpin (diff)
downloadlinux-dev-a636a0ff11483a4c158acc31ee7ca083d98a0d13.tar.xz
linux-dev-a636a0ff11483a4c158acc31ee7ca083d98a0d13.zip
drm/msm: Add a way for userspace to allocate GPU iova
The motivation at this point is mainly native userspace mesa driver in a VM guest. The one remaining synchronous "hotpath" is buffer allocation, because guest needs to wait to know the bo's iova before it can start emitting cmdstream/state that references the new bo. By allocating the iova in the guest userspace, we no longer need to wait for a response from the host, but can just rely on the allocation request being processed before the cmdstream submission. Allocation failures (OoM, etc) would just be treated as context-lost (ie. GL_GUILTY_CONTEXT_RESET) or subsequent allocations (or readpix, etc) can raise GL_OUT_OF_MEMORY. v2: Fix inuse check v3: Change mismatched iova case to -EBUSY Signed-off-by: Rob Clark <robdclark@chromium.org> Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Link: https://lore.kernel.org/r/20220411215849.297838-11-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 21986c321458..52fe6428a341 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -525,6 +525,54 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
return ret;
}
+static int clear_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_vma *vma = lookup_vma(obj, aspace);
+
+ if (!vma)
+ return 0;
+
+ if (msm_gem_vma_inuse(vma))
+ return -EBUSY;
+
+ msm_gem_purge_vma(vma->aspace, vma);
+ msm_gem_close_vma(vma->aspace, vma);
+ del_vma(vma);
+
+ return 0;
+}
+
+/*
+ * Get the requested iova but don't pin it. Fails if the requested iova is
+ * not available. Doesn't need a put because iovas are currently valid for
+ * the life of the object.
+ *
+ * Setting an iova of zero will clear the vma.
+ */
+int msm_gem_set_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t iova)
+{
+ int ret = 0;
+
+ msm_gem_lock(obj);
+ if (!iova) {
+ ret = clear_iova(obj, aspace);
+ } else {
+ struct msm_gem_vma *vma;
+ vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ } else if (GEM_WARN_ON(vma->iova != iova)) {
+ clear_iova(obj, aspace);
+ ret = -EBUSY;
+ }
+ }
+ msm_gem_unlock(obj);
+
+ return ret;
+}
+
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides