aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c120
1 files changed, 64 insertions, 56 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index b498d474ef9e..997aa15dd8d9 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,8 +60,6 @@
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
-
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@@ -164,6 +162,40 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
}
/**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
+ *
+ * Finish the job's fence and wake up the worker thread.
+ */
+static void drm_sched_job_done(struct drm_sched_job *s_job)
+{
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+ struct drm_gpu_scheduler *sched = s_fence->sched;
+
+ atomic_dec(&sched->hw_rq_count);
+ atomic_dec(&sched->score);
+
+ trace_drm_sched_process_job(s_fence);
+
+ dma_fence_get(&s_fence->finished);
+ drm_sched_fence_finished(s_fence);
+ dma_fence_put(&s_fence->finished);
+ wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_job_done_cb - the callback for a done job
+ * @f: fence
+ * @cb: fence callbacks
+ */
+static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
+
+ drm_sched_job_done(s_job);
+}
+
+/**
* drm_sched_dependency_optimized
*
* @fence: the dependency fence
@@ -199,7 +231,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !list_empty(&sched->ring_mirror_list))
+ !list_empty(&sched->pending_list))
schedule_delayed_work(&sched->work_tdr, sched->timeout);
}
@@ -259,7 +291,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
{
spin_lock(&sched->job_list_lock);
- if (list_empty(&sched->ring_mirror_list))
+ if (list_empty(&sched->pending_list))
cancel_delayed_work(&sched->work_tdr);
else
mod_delayed_work(system_wq, &sched->work_tdr, remaining);
@@ -273,7 +305,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
struct drm_gpu_scheduler *sched = s_job->sched;
spin_lock(&sched->job_list_lock);
- list_add_tail(&s_job->node, &sched->ring_mirror_list);
+ list_add_tail(&s_job->list, &sched->pending_list);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
@@ -287,8 +319,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
spin_lock(&sched->job_list_lock);
- job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
if (job) {
/*
@@ -296,7 +328,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
* drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
* is parked at which point it's safe.
*/
- list_del_init(&job->node);
+ list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
job->sched->ops->timedout_job(job);
@@ -372,7 +404,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
* Stop the scheduler and also removes and frees all completed jobs.
* Note: bad job will not be freed as it might be used later and so it's
* callers responsibility to release it manually if it's not part of the
- * mirror list any more.
+ * pending list any more.
*
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
@@ -393,26 +425,27 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Add at the head of the queue to reflect it was the earliest
* job extracted.
*/
- list_add(&bad->node, &sched->ring_mirror_list);
+ list_add(&bad->list, &sched->pending_list);
/*
* Iterate the job list from later to earlier one and either deactive
- * their HW callbacks or remove them from mirror list if they already
+ * their HW callbacks or remove them from pending list if they already
* signaled.
* This iteration is thread safe as sched thread is stopped.
*/
- list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
+ list) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
&s_job->cb)) {
atomic_dec(&sched->hw_rq_count);
} else {
/*
- * remove job from ring_mirror_list.
+ * remove job from pending_list.
* Locking here is for concurrent resume timeout
*/
spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
+ list_del_init(&s_job->list);
spin_unlock(&sched->job_list_lock);
/*
@@ -463,7 +496,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
* so no new jobs are being inserted or removed. Also concurrent
* GPU recovers can't run in parallel.
*/
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
atomic_inc(&sched->hw_rq_count);
@@ -473,14 +506,14 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
if (fence) {
r = dma_fence_add_callback(fence, &s_job->cb,
- drm_sched_process_job);
+ drm_sched_job_done_cb);
if (r == -ENOENT)
- drm_sched_process_job(fence, &s_job->cb);
+ drm_sched_job_done(s_job);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
} else
- drm_sched_process_job(NULL, &s_job->cb);
+ drm_sched_job_done(s_job);
}
if (full_recovery) {
@@ -494,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
EXPORT_SYMBOL(drm_sched_start);
/**
- * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
+ * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
*
* @sched: scheduler instance
*
@@ -506,7 +539,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
bool found_guilty = false;
struct dma_fence *fence;
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
@@ -566,7 +599,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
- INIT_LIST_HEAD(&job->node);
+ INIT_LIST_HEAD(&job->list);
return 0;
}
@@ -636,36 +669,11 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
}
/**
- * drm_sched_process_job - process a job
- *
- * @f: fence
- * @cb: fence callbacks
- *
- * Called after job has finished execution.
- */
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
-{
- struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
- struct drm_sched_fence *s_fence = s_job->s_fence;
- struct drm_gpu_scheduler *sched = s_fence->sched;
-
- atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->score);
-
- trace_drm_sched_process_job(s_fence);
-
- dma_fence_get(&s_fence->finished);
- drm_sched_fence_finished(s_fence);
- dma_fence_put(&s_fence->finished);
- wake_up_interruptible(&sched->wake_up_worker);
-}
-
-/**
* drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
- * Returns the next finished job from the mirror list (if there is one)
+ * Returns the next finished job from the pending list (if there is one)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
@@ -675,7 +683,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
/*
* Don't destroy jobs while the timeout worker is running OR thread
- * is being parked and hence assumed to not touch ring_mirror_list
+ * is being parked and hence assumed to not touch pending_list
*/
if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr)) ||
@@ -684,12 +692,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
- job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
- /* remove job from ring_mirror_list */
- list_del_init(&job->node);
+ /* remove job from pending_list */
+ list_del_init(&job->list);
} else {
job = NULL;
/* queue timeout for next job */
@@ -809,9 +817,9 @@ static int drm_sched_main(void *param)
if (!IS_ERR_OR_NULL(fence)) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &sched_job->cb,
- drm_sched_process_job);
+ drm_sched_job_done_cb);
if (r == -ENOENT)
- drm_sched_process_job(fence, &sched_job->cb);
+ drm_sched_job_done(sched_job);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
@@ -820,7 +828,7 @@ static int drm_sched_main(void *param)
if (IS_ERR(fence))
dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
- drm_sched_process_job(NULL, &sched_job->cb);
+ drm_sched_job_done(sched_job);
}
wake_up(&sched->job_scheduled);
@@ -858,7 +866,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
- INIT_LIST_HEAD(&sched->ring_mirror_list);
+ INIT_LIST_HEAD(&sched->pending_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);