aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c91
1 files changed, 77 insertions, 14 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 9d4cd196037a..dbb69063b3d5 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -211,6 +211,62 @@ void drm_sched_fault(struct drm_gpu_scheduler *sched)
}
EXPORT_SYMBOL(drm_sched_fault);
+/**
+ * drm_sched_suspend_timeout - Suspend scheduler job timeout
+ *
+ * @sched: scheduler instance for which to suspend the timeout
+ *
+ * Suspend the delayed work timeout for the scheduler. This is done by
+ * modifying the delayed work timeout to an arbitrary large value,
+ * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
+ * called from an IRQ context.
+ *
+ * Returns the timeout remaining
+ *
+ */
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
+{
+ unsigned long sched_timeout, now = jiffies;
+
+ sched_timeout = sched->work_tdr.timer.expires;
+
+ /*
+ * Modify the timeout to an arbitrarily large value. This also prevents
+ * the timeout to be restarted when new submissions arrive
+ */
+ if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
+ && time_after(sched_timeout, now))
+ return sched_timeout - now;
+ else
+ return sched->timeout;
+}
+EXPORT_SYMBOL(drm_sched_suspend_timeout);
+
+/**
+ * drm_sched_resume_timeout - Resume scheduler job timeout
+ *
+ * @sched: scheduler instance for which to resume the timeout
+ * @remaining: remaining timeout
+ *
+ * Resume the delayed work timeout for the scheduler. Note that
+ * this function can be called from an IRQ context.
+ */
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+
+ if (list_empty(&sched->ring_mirror_list))
+ cancel_delayed_work(&sched->work_tdr);
+ else
+ mod_delayed_work(system_wq, &sched->work_tdr, remaining);
+
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+EXPORT_SYMBOL(drm_sched_resume_timeout);
+
/* job_finish is called after hw fence signaled
*/
static void drm_sched_job_finish(struct work_struct *work)
@@ -218,6 +274,7 @@ static void drm_sched_job_finish(struct work_struct *work)
struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
finish_work);
struct drm_gpu_scheduler *sched = s_job->sched;
+ unsigned long flags;
/*
* Canceling the timeout without removing our job from the ring mirror
@@ -228,12 +285,12 @@ static void drm_sched_job_finish(struct work_struct *work)
*/
cancel_delayed_work_sync(&sched->work_tdr);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
/* remove job from ring_mirror_list */
list_del_init(&s_job->node);
/* queue TDR for next job */
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched->ops->free_job(s_job);
}
@@ -249,20 +306,22 @@ static void drm_sched_job_finish_cb(struct dma_fence *f,
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
+ unsigned long flags;
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
drm_sched_job_finish_cb);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
+ unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
job = list_first_entry_or_null(&sched->ring_mirror_list,
@@ -271,9 +330,9 @@ static void drm_sched_job_timedout(struct work_struct *work)
if (job)
job->sched->ops->timedout_job(job);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
/**
@@ -287,9 +346,10 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
{
struct drm_sched_job *s_job;
struct drm_sched_entity *entity, *tmp;
+ unsigned long flags;
int i;
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
@@ -299,7 +359,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
atomic_dec(&sched->hw_rq_count);
}
}
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
@@ -337,9 +397,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
bool found_guilty = false;
+ unsigned long flags;
int r;
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
@@ -353,7 +414,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
@@ -372,10 +433,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
drm_sched_expel_job_unlocked(s_job);
drm_sched_process_job(NULL, &s_fence->cb);
}
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
}
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
EXPORT_SYMBOL(drm_sched_job_recovery);
@@ -612,7 +673,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
long timeout,
const char *name)
{
- int i;
+ int i, ret;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
@@ -633,8 +694,10 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(drm_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
+ ret = PTR_ERR(sched->thread);
+ sched->thread = NULL;
DRM_ERROR("Failed to create scheduler for %s.\n", name);
- return PTR_ERR(sched->thread);
+ return ret;
}
sched->ready = true;