diff options
Diffstat (limited to '')
-rw-r--r-- | drivers/gpu/drm/scheduler/gpu_scheduler.c (renamed from drivers/gpu/drm/amd/scheduler/gpu_scheduler.c) | 396 |
1 files changed, 213 insertions, 183 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 92ec663fdada..2c18996d59c5 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -19,32 +19,36 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * */ + #include <linux/kthread.h> #include <linux/wait.h> #include <linux/sched.h> #include <uapi/linux/sched/types.h> #include <drm/drmP.h> -#include "gpu_scheduler.h" +#include <drm/gpu_scheduler.h> +#include <drm/spsc_queue.h> #define CREATE_TRACE_POINTS -#include "gpu_sched_trace.h" +#include <drm/gpu_scheduler_trace.h> + +#define to_drm_sched_job(sched_job) \ + container_of((sched_job), struct drm_sched_job, queue_node) -static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); -static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); -static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); +static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); +static void drm_sched_wakeup(struct drm_gpu_scheduler *sched); +static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); /* Initialize a given run queue struct */ -static void amd_sched_rq_init(struct amd_sched_rq *rq) +static void drm_sched_rq_init(struct drm_sched_rq *rq) { spin_lock_init(&rq->lock); INIT_LIST_HEAD(&rq->entities); rq->current_entity = NULL; } -static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, - struct amd_sched_entity *entity) +static void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity) { if (!list_empty(&entity->list)) return; @@ -53,8 +57,8 @@ static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, spin_unlock(&rq->lock); } -static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, - struct amd_sched_entity *entity) +static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity) { if (list_empty(&entity->list)) return; @@ -72,17 +76,17 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, * * Try to find a ready entity, returns NULL if none found. */ -static struct amd_sched_entity * -amd_sched_rq_select_entity(struct amd_sched_rq *rq) +static struct drm_sched_entity * +drm_sched_rq_select_entity(struct drm_sched_rq *rq) { - struct amd_sched_entity *entity; + struct drm_sched_entity *entity; spin_lock(&rq->lock); entity = rq->current_entity; if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { - if (amd_sched_entity_is_ready(entity)) { + if (drm_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); return entity; @@ -92,7 +96,7 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq) list_for_each_entry(entity, &rq->entities, list) { - if (amd_sched_entity_is_ready(entity)) { + if (drm_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); return entity; @@ -111,39 +115,37 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq) * Init a context entity used by scheduler when submit to HW ring. * * @sched The pointer to the scheduler - * @entity The pointer to a valid amd_sched_entity + * @entity The pointer to a valid drm_sched_entity * @rq The run queue this entity belongs * @kernel If this is an entity for the kernel * @jobs The max number of jobs in the job queue * * return 0 if succeed. negative error code on failure */ -int amd_sched_entity_init(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - struct amd_sched_rq *rq, - uint32_t jobs) +int drm_sched_entity_init(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + uint32_t jobs, atomic_t *guilty) { - int r; - if (!(sched && entity && rq)) return -EINVAL; - memset(entity, 0, sizeof(struct amd_sched_entity)); + memset(entity, 0, sizeof(struct drm_sched_entity)); INIT_LIST_HEAD(&entity->list); entity->rq = rq; entity->sched = sched; + entity->guilty = guilty; spin_lock_init(&entity->rq_lock); spin_lock_init(&entity->queue_lock); - r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); - if (r) - return r; + spsc_queue_init(&entity->job_queue); atomic_set(&entity->fence_seq, 0); entity->fence_context = dma_fence_context_alloc(2); return 0; } +EXPORT_SYMBOL(drm_sched_entity_init); /** * Query if entity is initialized @@ -153,8 +155,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, * * return true if entity is initialized, false otherwise */ -static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity) +static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity) { return entity->sched == sched && entity->rq != NULL; @@ -167,10 +169,10 @@ static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, * * Return true if entity don't has any unscheduled jobs. */ -static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) +static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) { rmb(); - if (kfifo_is_empty(&entity->job_queue)) + if (spsc_queue_peek(&entity->job_queue) == NULL) return true; return false; @@ -183,9 +185,9 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) * * Return true if entity could provide a job. */ -static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) +static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) { - if (kfifo_is_empty(&entity->job_queue)) + if (spsc_queue_peek(&entity->job_queue) == NULL) return false; if (READ_ONCE(entity->dependency)) @@ -202,12 +204,12 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) * * Cleanup and free the allocated resources. */ -void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity) +void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity) { int r; - if (!amd_sched_entity_is_initialized(sched, entity)) + if (!drm_sched_entity_is_initialized(sched, entity)) return; /** * The client will not queue more IBs during this fini, consume existing @@ -217,48 +219,55 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, r = -ERESTARTSYS; else r = wait_event_killable(sched->job_scheduled, - amd_sched_entity_is_idle(entity)); - amd_sched_entity_set_rq(entity, NULL); + drm_sched_entity_is_idle(entity)); + drm_sched_entity_set_rq(entity, NULL); if (r) { - struct amd_sched_job *job; + struct drm_sched_job *job; /* Park the kernel for a moment to make sure it isn't processing * our enity. */ kthread_park(sched->thread); kthread_unpark(sched->thread); - while (kfifo_out(&entity->job_queue, &job, sizeof(job))) { - struct amd_sched_fence *s_fence = job->s_fence; - amd_sched_fence_scheduled(s_fence); + if (entity->dependency) { + dma_fence_remove_callback(entity->dependency, + &entity->cb); + dma_fence_put(entity->dependency); + entity->dependency = NULL; + } + + while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { + struct drm_sched_fence *s_fence = job->s_fence; + drm_sched_fence_scheduled(s_fence); dma_fence_set_error(&s_fence->finished, -ESRCH); - amd_sched_fence_finished(s_fence); + drm_sched_fence_finished(s_fence); + WARN_ON(s_fence->parent); dma_fence_put(&s_fence->finished); sched->ops->free_job(job); } - } - kfifo_free(&entity->job_queue); } +EXPORT_SYMBOL(drm_sched_entity_fini); -static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) +static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) { - struct amd_sched_entity *entity = - container_of(cb, struct amd_sched_entity, cb); + struct drm_sched_entity *entity = + container_of(cb, struct drm_sched_entity, cb); entity->dependency = NULL; dma_fence_put(f); - amd_sched_wakeup(entity->sched); + drm_sched_wakeup(entity->sched); } -static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) +static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) { - struct amd_sched_entity *entity = - container_of(cb, struct amd_sched_entity, cb); + struct drm_sched_entity *entity = + container_of(cb, struct drm_sched_entity, cb); entity->dependency = NULL; dma_fence_put(f); } -void amd_sched_entity_set_rq(struct amd_sched_entity *entity, - struct amd_sched_rq *rq) +void drm_sched_entity_set_rq(struct drm_sched_entity *entity, + struct drm_sched_rq *rq) { if (entity->rq == rq) return; @@ -266,37 +275,39 @@ void amd_sched_entity_set_rq(struct amd_sched_entity *entity, spin_lock(&entity->rq_lock); if (entity->rq) - amd_sched_rq_remove_entity(entity->rq, entity); + drm_sched_rq_remove_entity(entity->rq, entity); entity->rq = rq; if (rq) - amd_sched_rq_add_entity(rq, entity); + drm_sched_rq_add_entity(rq, entity); spin_unlock(&entity->rq_lock); } +EXPORT_SYMBOL(drm_sched_entity_set_rq); -bool amd_sched_dependency_optimized(struct dma_fence* fence, - struct amd_sched_entity *entity) +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity) { - struct amd_gpu_scheduler *sched = entity->sched; - struct amd_sched_fence *s_fence; + struct drm_gpu_scheduler *sched = entity->sched; + struct drm_sched_fence *s_fence; if (!fence || dma_fence_is_signaled(fence)) return false; if (fence->context == entity->fence_context) return true; - s_fence = to_amd_sched_fence(fence); + s_fence = to_drm_sched_fence(fence); if (s_fence && s_fence->sched == sched) return true; return false; } +EXPORT_SYMBOL(drm_sched_dependency_optimized); -static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) +static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) { - struct amd_gpu_scheduler *sched = entity->sched; + struct drm_gpu_scheduler *sched = entity->sched; struct dma_fence * fence = entity->dependency; - struct amd_sched_fence *s_fence; + struct drm_sched_fence *s_fence; if (fence->context == entity->fence_context) { /* We can ignore fences from ourself */ @@ -304,7 +315,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) return false; } - s_fence = to_amd_sched_fence(fence); + s_fence = to_drm_sched_fence(fence); if (s_fence && s_fence->sched == sched) { /* @@ -315,7 +326,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) dma_fence_put(entity->dependency); entity->dependency = fence; if (!dma_fence_add_callback(fence, &entity->cb, - amd_sched_entity_clear_dep)) + drm_sched_entity_clear_dep)) return true; /* Ignore it when it is already scheduled */ @@ -324,48 +335,52 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) } if (!dma_fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) + drm_sched_entity_wakeup)) return true; dma_fence_put(entity->dependency); return false; } -static struct amd_sched_job * -amd_sched_entity_peek_job(struct amd_sched_entity *entity) +static struct drm_sched_job * +drm_sched_entity_pop_job(struct drm_sched_entity *entity) { - struct amd_gpu_scheduler *sched = entity->sched; - struct amd_sched_job *sched_job; + struct drm_gpu_scheduler *sched = entity->sched; + struct drm_sched_job *sched_job = to_drm_sched_job( + spsc_queue_peek(&entity->job_queue)); - if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) + if (!sched_job) return NULL; - while ((entity->dependency = sched->ops->dependency(sched_job))) - if (amd_sched_entity_add_dependency_cb(entity)) + while ((entity->dependency = sched->ops->dependency(sched_job, entity))) + if (drm_sched_entity_add_dependency_cb(entity)) return NULL; + /* skip jobs from entity that marked guilty */ + if (entity->guilty && atomic_read(entity->guilty)) + dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); + + spsc_queue_pop(&entity->job_queue); return sched_job; } /** - * Helper to submit a job to the job queue + * Submit a job to the job queue * * @sched_job The pointer to job required to submit * - * Returns true if we could submit the job. + * Returns 0 for success, negative error code otherwise. */ -static bool amd_sched_entity_in(struct amd_sched_job *sched_job) +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity) { - struct amd_gpu_scheduler *sched = sched_job->sched; - struct amd_sched_entity *entity = sched_job->s_entity; - bool added, first = false; + struct drm_gpu_scheduler *sched = sched_job->sched; + bool first = false; - spin_lock(&entity->queue_lock); - added = kfifo_in(&entity->job_queue, &sched_job, - sizeof(sched_job)) == sizeof(sched_job); + trace_drm_sched_job(sched_job, entity); - if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) - first = true; + spin_lock(&entity->queue_lock); + first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); spin_unlock(&entity->queue_lock); @@ -373,26 +388,26 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) if (first) { /* Add the entity to the run queue */ spin_lock(&entity->rq_lock); - amd_sched_rq_add_entity(entity->rq, entity); + drm_sched_rq_add_entity(entity->rq, entity); spin_unlock(&entity->rq_lock); - amd_sched_wakeup(sched); + drm_sched_wakeup(sched); } - return added; } +EXPORT_SYMBOL(drm_sched_entity_push_job); /* job_finish is called after hw fence signaled */ -static void amd_sched_job_finish(struct work_struct *work) +static void drm_sched_job_finish(struct work_struct *work) { - struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, + struct drm_sched_job *s_job = container_of(work, struct drm_sched_job, finish_work); - struct amd_gpu_scheduler *sched = s_job->sched; + struct drm_gpu_scheduler *sched = s_job->sched; /* remove job from ring_mirror_list */ spin_lock(&sched->job_list_lock); list_del_init(&s_job->node); if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - struct amd_sched_job *next; + struct drm_sched_job *next; spin_unlock(&sched->job_list_lock); cancel_delayed_work_sync(&s_job->work_tdr); @@ -400,7 +415,7 @@ static void amd_sched_job_finish(struct work_struct *work) /* queue TDR for next job */ next = list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node); + struct drm_sched_job, node); if (next) schedule_delayed_work(&next->work_tdr, sched->timeout); @@ -410,41 +425,43 @@ static void amd_sched_job_finish(struct work_struct *work) sched->ops->free_job(s_job); } -static void amd_sched_job_finish_cb(struct dma_fence *f, +static void drm_sched_job_finish_cb(struct dma_fence *f, struct dma_fence_cb *cb) { - struct amd_sched_job *job = container_of(cb, struct amd_sched_job, + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, finish_cb); schedule_work(&job->finish_work); } -static void amd_sched_job_begin(struct amd_sched_job *s_job) +static void drm_sched_job_begin(struct drm_sched_job *s_job) { - struct amd_gpu_scheduler *sched = s_job->sched; + struct drm_gpu_scheduler *sched = s_job->sched; dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb, - amd_sched_job_finish_cb); + drm_sched_job_finish_cb); spin_lock(&sched->job_list_lock); list_add_tail(&s_job->node, &sched->ring_mirror_list); if (sched->timeout != MAX_SCHEDULE_TIMEOUT && list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node) == s_job) + struct drm_sched_job, node) == s_job) schedule_delayed_work(&s_job->work_tdr, sched->timeout); spin_unlock(&sched->job_list_lock); } -static void amd_sched_job_timedout(struct work_struct *work) +static void drm_sched_job_timedout(struct work_struct *work) { - struct amd_sched_job *job = container_of(work, struct amd_sched_job, + struct drm_sched_job *job = container_of(work, struct drm_sched_job, work_tdr.work); job->sched->ops->timedout_job(job); } -void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) { - struct amd_sched_job *s_job; + struct drm_sched_job *s_job; + struct drm_sched_entity *entity, *tmp; + int i;; spin_lock(&sched->job_list_lock); list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { @@ -457,31 +474,57 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) } } spin_unlock(&sched->job_list_lock); -} - -void amd_sched_job_kickout(struct amd_sched_job *s_job) -{ - struct amd_gpu_scheduler *sched = s_job->sched; - spin_lock(&sched->job_list_lock); - list_del_init(&s_job->node); - spin_unlock(&sched->job_list_lock); + if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { + atomic_inc(&bad->karma); + /* don't increase @bad's karma if it's from KERNEL RQ, + * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs) + * corrupt but keep in mind that kernel jobs always considered good. + */ + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) { + struct drm_sched_rq *rq = &sched->sched_rq[i]; + + spin_lock(&rq->lock); + list_for_each_entry_safe(entity, tmp, &rq->entities, list) { + if (bad->s_fence->scheduled.context == entity->fence_context) { + if (atomic_read(&bad->karma) > bad->sched->hang_limit) + if (entity->guilty) + atomic_set(entity->guilty, 1); + break; + } + } + spin_unlock(&rq->lock); + if (&entity->list != &rq->entities) + break; + } + } } +EXPORT_SYMBOL(drm_sched_hw_job_reset); -void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched) { - struct amd_sched_job *s_job, *tmp; + struct drm_sched_job *s_job, *tmp; + bool found_guilty = false; int r; spin_lock(&sched->job_list_lock); s_job = list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node); + struct drm_sched_job, node); if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) schedule_delayed_work(&s_job->work_tdr, sched->timeout); list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { - struct amd_sched_fence *s_fence = s_job->s_fence; + struct drm_sched_fence *s_fence = s_job->s_fence; struct dma_fence *fence; + uint64_t guilty_context; + + if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { + found_guilty = true; + guilty_context = s_job->s_fence->scheduled.context; + } + + if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) + dma_fence_set_error(&s_fence->finished, -ECANCELED); spin_unlock(&sched->job_list_lock); fence = sched->ops->run_job(s_job); @@ -489,62 +532,47 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) if (fence) { s_fence->parent = dma_fence_get(fence); r = dma_fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); + drm_sched_process_job); if (r == -ENOENT) - amd_sched_process_job(fence, &s_fence->cb); + drm_sched_process_job(fence, &s_fence->cb); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); dma_fence_put(fence); } else { - DRM_ERROR("Failed to run job!\n"); - amd_sched_process_job(NULL, &s_fence->cb); + drm_sched_process_job(NULL, &s_fence->cb); } spin_lock(&sched->job_list_lock); } spin_unlock(&sched->job_list_lock); } - -/** - * Submit a job to the job queue - * - * @sched_job The pointer to job required to submit - * - * Returns 0 for success, negative error code otherwise. - */ -void amd_sched_entity_push_job(struct amd_sched_job *sched_job) -{ - struct amd_sched_entity *entity = sched_job->s_entity; - - trace_amd_sched_job(sched_job); - wait_event(entity->sched->job_scheduled, - amd_sched_entity_in(sched_job)); -} +EXPORT_SYMBOL(drm_sched_job_recovery); /* init a sched_job with basic field */ -int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, void *owner) { job->sched = sched; - job->s_entity = entity; - job->s_fence = amd_sched_fence_create(entity, owner); + job->s_priority = entity->rq - sched->sched_rq; + job->s_fence = drm_sched_fence_create(entity, owner); if (!job->s_fence) return -ENOMEM; job->id = atomic64_inc_return(&sched->job_id_count); - INIT_WORK(&job->finish_work, amd_sched_job_finish); + INIT_WORK(&job->finish_work, drm_sched_job_finish); INIT_LIST_HEAD(&job->node); - INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); + INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout); return 0; } +EXPORT_SYMBOL(drm_sched_job_init); /** * Return ture if we can push more jobs to the hw. */ -static bool amd_sched_ready(struct amd_gpu_scheduler *sched) +static bool drm_sched_ready(struct drm_gpu_scheduler *sched) { return atomic_read(&sched->hw_rq_count) < sched->hw_submission_limit; @@ -553,27 +581,27 @@ static bool amd_sched_ready(struct amd_gpu_scheduler *sched) /** * Wake up the scheduler when it is ready */ -static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) +static void drm_sched_wakeup(struct drm_gpu_scheduler *sched) { - if (amd_sched_ready(sched)) + if (drm_sched_ready(sched)) wake_up_interruptible(&sched->wake_up_worker); } /** * Select next entity to process */ -static struct amd_sched_entity * -amd_sched_select_entity(struct amd_gpu_scheduler *sched) +static struct drm_sched_entity * +drm_sched_select_entity(struct drm_gpu_scheduler *sched) { - struct amd_sched_entity *entity; + struct drm_sched_entity *entity; int i; - if (!amd_sched_ready(sched)) + if (!drm_sched_ready(sched)) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) { - entity = amd_sched_rq_select_entity(&sched->sched_rq[i]); + for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); if (entity) break; } @@ -581,22 +609,22 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched) return entity; } -static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) +static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) { - struct amd_sched_fence *s_fence = - container_of(cb, struct amd_sched_fence, cb); - struct amd_gpu_scheduler *sched = s_fence->sched; + struct drm_sched_fence *s_fence = + container_of(cb, struct drm_sched_fence, cb); + struct drm_gpu_scheduler *sched = s_fence->sched; dma_fence_get(&s_fence->finished); atomic_dec(&sched->hw_rq_count); - amd_sched_fence_finished(s_fence); + drm_sched_fence_finished(s_fence); - trace_amd_sched_process_job(s_fence); + trace_drm_sched_process_job(s_fence); dma_fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } -static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) +static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) { if (kthread_should_park()) { kthread_parkme(); @@ -606,58 +634,54 @@ static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) return false; } -static int amd_sched_main(void *param) +static int drm_sched_main(void *param) { struct sched_param sparam = {.sched_priority = 1}; - struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; - int r, count; + struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; + int r; sched_setscheduler(current, SCHED_FIFO, &sparam); while (!kthread_should_stop()) { - struct amd_sched_entity *entity = NULL; - struct amd_sched_fence *s_fence; - struct amd_sched_job *sched_job; + struct drm_sched_entity *entity = NULL; + struct drm_sched_fence *s_fence; + struct drm_sched_job *sched_job; struct dma_fence *fence; wait_event_interruptible(sched->wake_up_worker, - (!amd_sched_blocked(sched) && - (entity = amd_sched_select_entity(sched))) || + (!drm_sched_blocked(sched) && + (entity = drm_sched_select_entity(sched))) || kthread_should_stop()); if (!entity) continue; - sched_job = amd_sched_entity_peek_job(entity); + sched_job = drm_sched_entity_pop_job(entity); if (!sched_job) continue; s_fence = sched_job->s_fence; atomic_inc(&sched->hw_rq_count); - amd_sched_job_begin(sched_job); + drm_sched_job_begin(sched_job); fence = sched->ops->run_job(sched_job); - amd_sched_fence_scheduled(s_fence); + drm_sched_fence_scheduled(s_fence); if (fence) { s_fence->parent = dma_fence_get(fence); r = dma_fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); + drm_sched_process_job); if (r == -ENOENT) - amd_sched_process_job(fence, &s_fence->cb); + drm_sched_process_job(fence, &s_fence->cb); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); dma_fence_put(fence); } else { - DRM_ERROR("Failed to run job!\n"); - amd_sched_process_job(NULL, &s_fence->cb); + drm_sched_process_job(NULL, &s_fence->cb); } - count = kfifo_out(&entity->job_queue, &sched_job, - sizeof(sched_job)); - WARN_ON(count != sizeof(sched_job)); wake_up(&sched->job_scheduled); } return 0; @@ -673,17 +697,21 @@ static int amd_sched_main(void *param) * * Return 0 on success, otherwise error code. */ -int amd_sched_init(struct amd_gpu_scheduler *sched, - const struct amd_sched_backend_ops *ops, - unsigned hw_submission, long timeout, const char *name) +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + unsigned hw_submission, + unsigned hang_limit, + long timeout, + const char *name) { int i; sched->ops = ops; sched->hw_submission_limit = hw_submission; sched->name = name; sched->timeout = timeout; - for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++) - amd_sched_rq_init(&sched->sched_rq[i]); + sched->hang_limit = hang_limit; + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) + drm_sched_rq_init(&sched->sched_rq[i]); init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); @@ -693,7 +721,7 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, atomic64_set(&sched->job_id_count, 0); /* Each scheduler will run on a seperate kernel thread */ - sched->thread = kthread_run(amd_sched_main, sched, sched->name); + sched->thread = kthread_run(drm_sched_main, sched, sched->name); if (IS_ERR(sched->thread)) { DRM_ERROR("Failed to create scheduler for %s.\n", name); return PTR_ERR(sched->thread); @@ -701,14 +729,16 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, return 0; } +EXPORT_SYMBOL(drm_sched_init); /** * Destroy a gpu scheduler * * @sched The pointer to the scheduler */ -void amd_sched_fini(struct amd_gpu_scheduler *sched) +void drm_sched_fini(struct drm_gpu_scheduler *sched) { if (sched->thread) kthread_stop(sched->thread); } +EXPORT_SYMBOL(drm_sched_fini); |