aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_ringbuffer.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2021-07-30 16:05:58 +1000
committerDave Airlie <airlied@redhat.com>2021-07-30 16:24:01 +1000
commitf1b7996551a40a4ebb551130c83077a0cabcb935 (patch)
tree5f4341566519775e2d6a3a6c0c3654e5b2e57982 /drivers/gpu/drm/msm/msm_ringbuffer.c
parentMerge tag 'drm-misc-next-2021-07-29' of git://anongit.freedesktop.org/drm/drm-misc into drm-next (diff)
parentdrm/msm/gem: Mark active before pinning (diff)
downloadlinux-dev-f1b7996551a40a4ebb551130c83077a0cabcb935.tar.xz
linux-dev-f1b7996551a40a4ebb551130c83077a0cabcb935.zip
Merge tag 'drm-msm-next-2021-07-28' of https://gitlab.freedesktop.org/drm/msm into drm-next
An early pull for v5.15 (there'll be more coming in a week or two), consisting of the drm/scheduler conversion and a couple other small series that one was based one. Mostly sending this now because IIUC danvet wanted it in drm-next so he could rebase on it. (Daniel, if you disagree then speak up, and I'll instead include this in the main pull request once that is ready.) This also has a core patch to drop drm_gem_object_put_locked() now that the last use of it is removed. [airlied: add NULL to drm_sched_init] Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGumRk7H88bqV=H9Fb1SM0zPBo5B7NsCU3jFFKBYxf5k+Q@mail.gmail.com
Diffstat (limited to 'drivers/gpu/drm/msm/msm_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c69
1 files changed, 66 insertions, 3 deletions
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 4d2a2a4abef8..bd54c1412649 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -7,10 +7,61 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
+static uint num_hw_submissions = 8;
+MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
+module_param(num_hw_submissions, uint, 0600);
+
+static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *s_entity)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+
+ if (!xa_empty(&submit->deps))
+ return xa_erase(&submit->deps, submit->last_dep++);
+
+ return NULL;
+}
+
+static struct dma_fence *msm_job_run(struct drm_sched_job *job)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+ struct msm_gpu *gpu = submit->gpu;
+
+ submit->hw_fence = msm_fence_alloc(submit->ring->fctx);
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ /* TODO move submit path over to using a per-ring lock.. */
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ msm_gpu_submit(gpu, submit);
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+
+ pm_runtime_put(&gpu->pdev->dev);
+
+ return dma_fence_get(submit->hw_fence);
+}
+
+static void msm_job_free(struct drm_sched_job *job)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+
+ drm_sched_job_cleanup(job);
+ msm_gem_submit_put(submit);
+}
+
+const struct drm_sched_backend_ops msm_sched_ops = {
+ .dependency = msm_job_dependency,
+ .run_job = msm_job_run,
+ .free_job = msm_job_free
+};
+
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
+ long sched_timeout;
char name[32];
int ret;
@@ -32,7 +83,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
- ring->start = 0;
+ ring->start = NULL;
goto fail;
}
@@ -45,13 +96,23 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->memptrs = memptrs;
ring->memptrs_iova = memptrs_iova;
+ /* currently managing hangcheck ourselves: */
+ sched_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ ret = drm_sched_init(&ring->sched, &msm_sched_ops,
+ num_hw_submissions, 0, sched_timeout,
+ NULL, NULL, to_msm_bo(ring->bo)->name);
+ if (ret) {
+ goto fail;
+ }
+
INIT_LIST_HEAD(&ring->submits);
spin_lock_init(&ring->submit_lock);
spin_lock_init(&ring->preempt_lock);
snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
- ring->fctx = msm_fence_context_alloc(gpu->dev, name);
+ ring->fctx = msm_fence_context_alloc(gpu->dev, &ring->memptrs->fence, name);
return ring;
@@ -65,9 +126,11 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
if (IS_ERR_OR_NULL(ring))
return;
+ drm_sched_fini(&ring->sched);
+
msm_fence_context_free(ring->fctx);
- msm_gem_kernel_put(ring->bo, ring->gpu->aspace, false);
+ msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
kfree(ring);
}