aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2021-07-27 18:06:14 -0700
committerRob Clark <robdclark@chromium.org>2021-07-28 09:19:00 -0700
commit1d8a5ca436ee4a28eec14cb813f790f7cdeeee19 (patch)
tree4e7ef645394d0de1d8fe506ed776e24fc7198611 /drivers/gpu/drm/msm/msm_gem.c
parentdrm/msm: Return ERR_PTR() from submit_create() (diff)
downloadlinux-dev-1d8a5ca436ee4a28eec14cb813f790f7cdeeee19.tar.xz
linux-dev-1d8a5ca436ee4a28eec14cb813f790f7cdeeee19.zip
drm/msm: Conversion to drm scheduler
For existing adrenos, there is one or more ringbuffer, depending on whether preemption is supported. When preemption is supported, each ringbuffer has it's own priority. A submitqueue (which maps to a gl context or vk queue in userspace) is mapped to a specific ring- buffer at creation time, based on the submitqueue's priority. Each ringbuffer has it's own drm_gpu_scheduler. Each submitqueue maps to a drm_sched_entity. And each submit maps to a drm_sched_job. Closes: https://gitlab.freedesktop.org/drm/msm/-/issues/4 Signed-off-by: Rob Clark <robdclark@chromium.org> Acked-by: Christian König <christian.koenig@amd.com> Link: https://lore.kernel.org/r/20210728010632.2633470-10-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c35
1 files changed, 0 insertions, 35 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 9be4e0a8ebb7..3ab5c8620893 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -804,41 +804,6 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
}
-/* must be called before _move_to_active().. */
-int msm_gem_sync_object(struct drm_gem_object *obj,
- struct msm_fence_context *fctx, bool exclusive)
-{
- struct dma_resv_list *fobj;
- struct dma_fence *fence;
- int i, ret;
-
- fobj = dma_resv_shared_list(obj->resv);
- if (!fobj || (fobj->shared_count == 0)) {
- fence = dma_resv_excl_fence(obj->resv);
- /* don't need to wait on our own fences, since ring is fifo */
- if (fence && (fence->context != fctx->context)) {
- ret = dma_fence_wait(fence, true);
- if (ret)
- return ret;
- }
- }
-
- if (!exclusive || !fobj)
- return 0;
-
- for (i = 0; i < fobj->shared_count; i++) {
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(obj->resv));
- if (fence->context != fctx->context) {
- ret = dma_fence_wait(fence, true);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);