aboutsummaryrefslogtreecommitdiffstats
path: root/include/drm/gpu_scheduler.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm/gpu_scheduler.h')
-rw-r--r--include/drm/gpu_scheduler.h48
1 files changed, 33 insertions, 15 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 21c648b0b2a1..d87b268f1781 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -50,7 +50,10 @@ enum drm_sched_priority {
*
* @list: used to append this struct to the list of entities in the
* runqueue.
- * @rq: runqueue to which this entity belongs.
+ * @rq: runqueue on which this entity is currently scheduled.
+ * @rq_list: a list of run queues on which jobs from this entity can
+ * be scheduled
+ * @num_rq_list: number of run queues in the rq_list
* @rq_lock: lock to modify the runqueue to which this entity belongs.
* @job_queue: the list of jobs of this entity.
* @fence_seq: a linearly increasing seqno incremented with each
@@ -67,6 +70,7 @@ enum drm_sched_priority {
* @fini_status: contains the exit status in case the process was signalled.
* @last_scheduled: points to the finished fence of the last scheduled job.
* @last_user: last group leader pushing a job into the entity.
+ * @stopped: Marks the enity as removed from rq and destined for termination.
*
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
@@ -75,6 +79,8 @@ enum drm_sched_priority {
struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
+ struct drm_sched_rq **rq_list;
+ unsigned int num_rq_list;
spinlock_t rq_lock;
struct spsc_queue job_queue;
@@ -87,6 +93,7 @@ struct drm_sched_entity {
atomic_t *guilty;
struct dma_fence *last_scheduled;
struct task_struct *last_user;
+ bool stopped;
};
/**
@@ -168,8 +175,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* finished to remove the job from the
* @drm_gpu_scheduler.ring_mirror_list.
* @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
- * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout
- * interval is over.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
@@ -188,7 +193,6 @@ struct drm_sched_job {
struct dma_fence_cb finish_cb;
struct work_struct finish_work;
struct list_head node;
- struct delayed_work work_tdr;
uint64_t id;
atomic_t karma;
enum drm_sched_priority s_priority;
@@ -252,11 +256,14 @@ struct drm_sched_backend_ops {
* finished.
* @hw_rq_count: the number of jobs currently in the hardware queue.
* @job_id_count: used to assign unique id to the each job.
+ * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
+ * timeout interval is over.
* @thread: the kthread on which the scheduler which run.
* @ring_mirror_list: the list of jobs which are currently in the job queue.
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
+ * @num_jobs: the number of jobs in queue in the scheduler
*
* One scheduler is implemented for each hardware ring.
*/
@@ -270,10 +277,12 @@ struct drm_gpu_scheduler {
wait_queue_head_t job_scheduled;
atomic_t hw_rq_count;
atomic64_t job_id_count;
+ struct delayed_work work_tdr;
struct task_struct *thread;
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
+ atomic_t num_jobs;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
@@ -281,6 +290,21 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
uint32_t hw_submission, unsigned hang_limit, long timeout,
const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_sched_entity *entity,
+ void *owner);
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
+ struct drm_sched_job *job);
+void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
+bool drm_sched_dependency_optimized(struct dma_fence* fence,
+ struct drm_sched_entity *entity);
+void drm_sched_job_kickout(struct drm_sched_job *s_job);
+
+void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity);
+void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity);
int drm_sched_entity_init(struct drm_sched_entity *entity,
struct drm_sched_rq **rq_list,
@@ -289,23 +313,17 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
+void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
+struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity);
-void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
- struct drm_sched_rq *rq);
+void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
+ enum drm_sched_priority priority);
+bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
struct drm_sched_fence *drm_sched_fence_create(
struct drm_sched_entity *s_entity, void *owner);
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
-int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_sched_entity *entity,
- void *owner);
-void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
- struct drm_sched_job *job);
-void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity);
-void drm_sched_job_kickout(struct drm_sched_job *s_job);
#endif