diff options
Diffstat (limited to 'drivers/gpu/drm/scheduler')
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_entity.c | 89 | ||||
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 35 |
2 files changed, 64 insertions, 60 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 461a7a8129f4..63bccd201b97 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -38,46 +38,40 @@ * submit to HW ring. * * @entity: scheduler entity to init - * @rq_list: the list of run queue on which jobs from this + * @priority: priority of the entity + * @sched_list: the list of drm scheds on which jobs from this * entity can be submitted - * @num_rq_list: number of run queue in rq_list + * @num_sched_list: number of drm sched in sched_list * @guilty: atomic_t set to 1 when a job on this queue * is found to be guilty causing a timeout * - * Note: the rq_list should have atleast one element to schedule + * Note: the sched_list should have at least one element to schedule * the entity * * Returns 0 on success or a negative error code on failure. */ int drm_sched_entity_init(struct drm_sched_entity *entity, - struct drm_sched_rq **rq_list, - unsigned int num_rq_list, + enum drm_sched_priority priority, + struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list, atomic_t *guilty) { - int i; - - if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0]))) + if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) return -EINVAL; memset(entity, 0, sizeof(struct drm_sched_entity)); INIT_LIST_HEAD(&entity->list); entity->rq = NULL; entity->guilty = guilty; - entity->num_rq_list = num_rq_list; - entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), - GFP_KERNEL); - if (!entity->rq_list) - return -ENOMEM; - - init_completion(&entity->entity_idle); - - for (i = 0; i < num_rq_list; ++i) - entity->rq_list[i] = rq_list[i]; + entity->num_sched_list = num_sched_list; + entity->priority = priority; + entity->sched_list = num_sched_list > 1 ? sched_list : NULL; + entity->last_scheduled = NULL; - if (num_rq_list) - entity->rq = rq_list[0]; + if(num_sched_list) + entity->rq = &sched_list[0]->sched_rq[entity->priority]; - entity->last_scheduled = NULL; + init_completion(&entity->entity_idle); spin_lock_init(&entity->rq_lock); spsc_queue_init(&entity->job_queue); @@ -136,21 +130,21 @@ static struct drm_sched_rq * drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) { struct drm_sched_rq *rq = NULL; - unsigned int min_jobs = UINT_MAX, num_jobs; + unsigned int min_score = UINT_MAX, num_score; int i; - for (i = 0; i < entity->num_rq_list; ++i) { - struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched; + for (i = 0; i < entity->num_sched_list; ++i) { + struct drm_gpu_scheduler *sched = entity->sched_list[i]; - if (!entity->rq_list[i]->sched->ready) { + if (!entity->sched_list[i]->ready) { DRM_WARN("sched%s is not ready, skipping", sched->name); continue; } - num_jobs = atomic_read(&sched->num_jobs); - if (num_jobs < min_jobs) { - min_jobs = num_jobs; - rq = entity->rq_list[i]; + num_score = atomic_read(&sched->score); + if (num_score < min_score) { + min_score = num_score; + rq = &entity->sched_list[i]->sched_rq[entity->priority]; } } @@ -308,7 +302,6 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) dma_fence_put(entity->last_scheduled); entity->last_scheduled = NULL; - kfree(entity->rq_list); } EXPORT_SYMBOL(drm_sched_entity_fini); @@ -354,15 +347,6 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, } /** - * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority - */ -static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq, - enum drm_sched_priority priority) -{ - *rq = &(*rq)->sched->sched_rq[priority]; -} - -/** * drm_sched_entity_set_priority - Sets priority of the entity * * @entity: scheduler entity @@ -373,19 +357,8 @@ static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq, void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority) { - unsigned int i; - spin_lock(&entity->rq_lock); - - for (i = 0; i < entity->num_rq_list; ++i) - drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority); - - if (entity->rq) { - drm_sched_rq_remove_entity(entity->rq, entity); - drm_sched_entity_set_rq_priority(&entity->rq, priority); - drm_sched_rq_add_entity(entity->rq, entity); - } - + entity->priority = priority; spin_unlock(&entity->rq_lock); } EXPORT_SYMBOL(drm_sched_entity_set_priority); @@ -490,20 +463,20 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) struct dma_fence *fence; struct drm_sched_rq *rq; - if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1) + if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1) return; fence = READ_ONCE(entity->last_scheduled); if (fence && !dma_fence_is_signaled(fence)) return; + spin_lock(&entity->rq_lock); rq = drm_sched_entity_get_free_sched(entity); - if (rq == entity->rq) - return; + if (rq != entity->rq) { + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + } - spin_lock(&entity->rq_lock); - drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; spin_unlock(&entity->rq_lock); } @@ -525,7 +498,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, bool first; trace_drm_sched_job(sched_job, entity); - atomic_inc(&entity->rq->sched->num_jobs); + atomic_inc(&entity->rq->sched->score); WRITE_ONCE(entity->last_user, current->group_leader); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 3c57e84222ca..60c4c6a1aac6 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -92,6 +92,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq, if (!list_empty(&entity->list)) return; spin_lock(&rq->lock); + atomic_inc(&rq->sched->score); list_add_tail(&entity->list, &rq->entities); spin_unlock(&rq->lock); } @@ -110,6 +111,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, if (list_empty(&entity->list)) return; spin_lock(&rq->lock); + atomic_dec(&rq->sched->score); list_del_init(&entity->list); if (rq->current_entity == entity) rq->current_entity = NULL; @@ -287,10 +289,21 @@ static void drm_sched_job_timedout(struct work_struct *work) unsigned long flags; sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); + + /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ + spin_lock_irqsave(&sched->job_list_lock, flags); job = list_first_entry_or_null(&sched->ring_mirror_list, struct drm_sched_job, node); if (job) { + /* + * Remove the bad job so it cannot be freed by concurrent + * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread + * is parked at which point it's safe. + */ + list_del_init(&job->node); + spin_unlock_irqrestore(&sched->job_list_lock, flags); + job->sched->ops->timedout_job(job); /* @@ -301,6 +314,8 @@ static void drm_sched_job_timedout(struct work_struct *work) job->sched->ops->free_job(job); sched->free_guilty = false; } + } else { + spin_unlock_irqrestore(&sched->job_list_lock, flags); } spin_lock_irqsave(&sched->job_list_lock, flags); @@ -373,6 +388,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) kthread_park(sched->thread); /* + * Reinsert back the bad job here - now it's safe as + * drm_sched_get_cleanup_job cannot race against us and release the + * bad job at this point - we parked (waited for) any in progress + * (earlier) cleanups and drm_sched_get_cleanup_job will not be called + * now until the scheduler thread is unparked. + */ + if (bad && bad->sched == sched) + /* + * Add at the head of the queue to reflect it was the earliest + * job extracted. + */ + list_add(&bad->node, &sched->ring_mirror_list); + + /* * Iterate the job list from later to earlier one and either deactive * their HW callbacks or remove them from mirror list if they already * signaled. @@ -628,11 +657,13 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) struct drm_gpu_scheduler *sched = s_fence->sched; atomic_dec(&sched->hw_rq_count); - atomic_dec(&sched->num_jobs); + atomic_dec(&sched->score); trace_drm_sched_process_job(s_fence); + dma_fence_get(&s_fence->finished); drm_sched_fence_finished(s_fence); + dma_fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } @@ -803,7 +834,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); - atomic_set(&sched->num_jobs, 0); + atomic_set(&sched->score, 0); atomic64_set(&sched->job_id_count, 0); /* Each scheduler will run on a seperate kernel thread */ |