aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/drm/gpu_scheduler.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm/gpu_scheduler.h')
-rw-r--r--include/drm/gpu_scheduler.h563
1 files changed, 444 insertions, 119 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 92436553fd6a..1a7e377d4cbb 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -27,99 +27,235 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
#include <linux/completion.h>
+#include <linux/xarray.h>
+#include <linux/workqueue.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+/**
+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining
+ *
+ * Setting this flag on a scheduler fence prevents pipelining of jobs depending
+ * on this fence. In other words we always insert a full CPU round trip before
+ * dependent jobs are pushed to the hw queue.
+ */
+#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
+
+/**
+ * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
+ *
+ * Because we could have a deadline hint can be set before the backing hw
+ * fence is created, we need to keep track of whether a deadline has already
+ * been set.
+ */
+#define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
+
+enum dma_resv_usage;
+struct dma_resv;
+struct drm_gem_object;
+
struct drm_gpu_scheduler;
struct drm_sched_rq;
+struct drm_file;
+
/* These are often used as an (initial) index
* to an array, and as such should start at 0.
*/
enum drm_sched_priority {
- DRM_SCHED_PRIORITY_MIN,
- DRM_SCHED_PRIORITY_NORMAL,
- DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
+ DRM_SCHED_PRIORITY_HIGH,
+ DRM_SCHED_PRIORITY_NORMAL,
+ DRM_SCHED_PRIORITY_LOW,
- DRM_SCHED_PRIORITY_COUNT,
- DRM_SCHED_PRIORITY_UNSET = -2
+ DRM_SCHED_PRIORITY_COUNT
};
/**
* struct drm_sched_entity - A wrapper around a job queue (typically
* attached to the DRM file_priv).
*
- * @list: used to append this struct to the list of entities in the
- * runqueue.
- * @rq: runqueue on which this entity is currently scheduled.
- * @sched_list: A list of schedulers (drm_gpu_schedulers).
- * Jobs from this entity can be scheduled on any scheduler
- * on this list.
- * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
- * @priority: priority of the entity
- * @rq_lock: lock to modify the runqueue to which this entity belongs.
- * @job_queue: the list of jobs of this entity.
- * @fence_seq: a linearly increasing seqno incremented with each
- * new &drm_sched_fence which is part of the entity.
- * @fence_context: a unique context for all the fences which belong
- * to this entity.
- * The &drm_sched_fence.scheduled uses the
- * fence_context but &drm_sched_fence.finished uses
- * fence_context + 1.
- * @dependency: the dependency fence of the job which is on the top
- * of the job queue.
- * @cb: callback for the dependency fence above.
- * @guilty: points to ctx's guilty.
- * @fini_status: contains the exit status in case the process was signalled.
- * @last_scheduled: points to the finished fence of the last scheduled job.
- * @last_user: last group leader pushing a job into the entity.
- * @stopped: Marks the enity as removed from rq and destined for termination.
- * @entity_idle: Signals when enityt is not in use
- *
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
* scheduling policy.
*/
struct drm_sched_entity {
+ /**
+ * @list:
+ *
+ * Used to append this struct to the list of entities in the runqueue
+ * @rq under &drm_sched_rq.entities.
+ *
+ * Protected by &drm_sched_rq.lock of @rq.
+ */
struct list_head list;
+
+ /**
+ * @lock:
+ *
+ * Lock protecting the run-queue (@rq) to which this entity belongs,
+ * @priority and the list of schedulers (@sched_list, @num_sched_list).
+ */
+ spinlock_t lock;
+
+ /**
+ * @rq:
+ *
+ * Runqueue on which this entity is currently scheduled.
+ *
+ * FIXME: Locking is very unclear for this. Writers are protected by
+ * @lock, but readers are generally lockless and seem to just race with
+ * not even a READ_ONCE.
+ */
struct drm_sched_rq *rq;
+
+ /**
+ * @sched_list:
+ *
+ * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
+ * be scheduled on any scheduler on this list.
+ *
+ * This can be modified by calling drm_sched_entity_modify_sched().
+ * Locking is entirely up to the driver, see the above function for more
+ * details.
+ *
+ * This will be set to NULL if &num_sched_list equals 1 and @rq has been
+ * set already.
+ *
+ * FIXME: This means priority changes through
+ * drm_sched_entity_set_priority() will be lost henceforth in this case.
+ */
struct drm_gpu_scheduler **sched_list;
+
+ /**
+ * @num_sched_list:
+ *
+ * Number of drm_gpu_schedulers in the @sched_list.
+ */
unsigned int num_sched_list;
+
+ /**
+ * @priority:
+ *
+ * Priority of the entity. This can be modified by calling
+ * drm_sched_entity_set_priority(). Protected by @lock.
+ */
enum drm_sched_priority priority;
- spinlock_t rq_lock;
+ /**
+ * @job_queue: the list of jobs of this entity.
+ */
struct spsc_queue job_queue;
+ /**
+ * @fence_seq:
+ *
+ * A linearly increasing seqno incremented with each new
+ * &drm_sched_fence which is part of the entity.
+ *
+ * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
+ * this doesn't need to be atomic.
+ */
atomic_t fence_seq;
+
+ /**
+ * @fence_context:
+ *
+ * A unique context for all the fences which belong to this entity. The
+ * &drm_sched_fence.scheduled uses the fence_context but
+ * &drm_sched_fence.finished uses fence_context + 1.
+ */
uint64_t fence_context;
+ /**
+ * @dependency:
+ *
+ * The dependency fence of the job which is on the top of the job queue.
+ */
struct dma_fence *dependency;
+
+ /**
+ * @cb:
+ *
+ * Callback for the dependency fence above.
+ */
struct dma_fence_cb cb;
+
+ /**
+ * @guilty:
+ *
+ * Points to entities' guilty.
+ */
atomic_t *guilty;
- struct dma_fence *last_scheduled;
+
+ /**
+ * @last_scheduled:
+ *
+ * Points to the finished fence of the last scheduled job. Only written
+ * by the scheduler thread, can be accessed locklessly from
+ * drm_sched_job_arm() if the queue is empty.
+ */
+ struct dma_fence __rcu *last_scheduled;
+
+ /**
+ * @last_user: last group leader pushing a job into the entity.
+ */
struct task_struct *last_user;
+
+ /**
+ * @stopped:
+ *
+ * Marks the enity as removed from rq and destined for
+ * termination. This is set by calling drm_sched_entity_flush() and by
+ * drm_sched_fini().
+ */
bool stopped;
+
+ /**
+ * @entity_idle:
+ *
+ * Signals when entity is not in use, used to sequence entity cleanup in
+ * drm_sched_entity_fini().
+ */
struct completion entity_idle;
+
+ /**
+ * @oldest_job_waiting:
+ *
+ * Marks earliest job waiting in SW queue
+ */
+ ktime_t oldest_job_waiting;
+
+ /**
+ * @rb_tree_node:
+ *
+ * The node used to insert this entity into time based priority queue
+ */
+ struct rb_node rb_tree_node;
+
};
/**
* struct drm_sched_rq - queue of entities to be scheduled.
*
- * @lock: to modify the entities list.
* @sched: the scheduler to which this rq belongs to.
- * @entities: list of the entities to be scheduled.
+ * @lock: protects @entities, @rb_tree_root and @current_entity.
* @current_entity: the entity which is to be scheduled.
+ * @entities: list of the entities to be scheduled.
+ * @rb_tree_root: root of time based priority queue of entities for FIFO scheduling
*
* Run queue is a set of entities scheduling command submissions for
* one specific ring. It implements the scheduling policy that selects
* the next entity to emit commands from.
*/
struct drm_sched_rq {
- spinlock_t lock;
struct drm_gpu_scheduler *sched;
- struct list_head entities;
+
+ spinlock_t lock;
+ /* Following members are protected by the @lock: */
struct drm_sched_entity *current_entity;
+ struct list_head entities;
+ struct rb_root_cached rb_tree_root;
};
/**
@@ -144,6 +280,12 @@ struct drm_sched_fence {
*/
struct dma_fence finished;
+ /**
+ * @deadline: deadline set on &drm_sched_fence.finished which
+ * potentially needs to be propagated to &drm_sched_fence.parent
+ */
+ ktime_t deadline;
+
/**
* @parent: the fence returned by &drm_sched_backend_ops.run_job
* when scheduling the job on hardware. We signal the
@@ -171,10 +313,12 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* struct drm_sched_job - A job to be run by an entity.
*
* @queue_node: used to append this struct to the queue of jobs in an entity.
+ * @list: a job participates in a "pending" and "done" lists.
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
- * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
+ * @credits: the number of credits this job contributes to the scheduler
+ * @work: Helper to reschedule job kill to different context.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
@@ -188,52 +332,176 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* to schedule the job.
*/
struct drm_sched_job {
- struct spsc_node queue_node;
+ u64 id;
+
+ /**
+ * @submit_ts:
+ *
+ * When the job was pushed into the entity queue.
+ */
+ ktime_t submit_ts;
+
+ /**
+ * @sched:
+ *
+ * The scheduler this job is or will be scheduled on. Gets set by
+ * drm_sched_job_arm(). Valid until drm_sched_backend_ops.free_job()
+ * has finished.
+ */
struct drm_gpu_scheduler *sched;
+
struct drm_sched_fence *s_fence;
- struct dma_fence_cb finish_cb;
- struct list_head node;
- uint64_t id;
- atomic_t karma;
+ struct drm_sched_entity *entity;
+
enum drm_sched_priority s_priority;
- struct drm_sched_entity *entity;
+ u32 credits;
+ /** @last_dependency: tracks @dependencies as they signal */
+ unsigned int last_dependency;
+ atomic_t karma;
+
+ struct spsc_node queue_node;
+ struct list_head list;
+
+ /*
+ * work is used only after finish_cb has been used and will not be
+ * accessed anymore.
+ */
+ union {
+ struct dma_fence_cb finish_cb;
+ struct work_struct work;
+ };
+
struct dma_fence_cb cb;
+
+ /**
+ * @dependencies:
+ *
+ * Contains the dependencies as struct dma_fence for this job, see
+ * drm_sched_job_add_dependency() and
+ * drm_sched_job_add_implicit_dependencies().
+ */
+ struct xarray dependencies;
};
-static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
- int threshold)
-{
- return (s_job && atomic_inc_return(&s_job->karma) > threshold);
-}
+/**
+ * enum drm_gpu_sched_stat - the scheduler's status
+ *
+ * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use.
+ * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded.
+ * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore.
+ */
+enum drm_gpu_sched_stat {
+ DRM_GPU_SCHED_STAT_NONE,
+ DRM_GPU_SCHED_STAT_NOMINAL,
+ DRM_GPU_SCHED_STAT_ENODEV,
+};
/**
- * struct drm_sched_backend_ops
+ * struct drm_sched_backend_ops - Define the backend operations
+ * called by the scheduler
*
- * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side.
+ * These functions should be implemented in the driver side.
*/
struct drm_sched_backend_ops {
/**
- * @dependency: Called when the scheduler is considering scheduling
- * this job next, to get another struct dma_fence for this job to
- * block on. Once it returns NULL, run_job() may be called.
+ * @prepare_job:
+ *
+ * Called when the scheduler is considering scheduling this job next, to
+ * get another struct dma_fence for this job to block on. Once it
+ * returns NULL, run_job() may be called.
+ *
+ * Can be NULL if no additional preparation to the dependencies are
+ * necessary. Skipped when jobs are killed instead of run.
*/
- struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity);
+ struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity);
/**
- * @run_job: Called to execute the job once all of the dependencies
- * have been resolved. This may be called multiple times, if
- * timedout_job() has happened and drm_sched_job_recovery()
- * decides to try it again.
+ * @run_job: Called to execute the job once all of the dependencies
+ * have been resolved.
+ *
+ * @sched_job: the job to run
+ *
+ * The deprecated drm_sched_resubmit_jobs() (called by &struct
+ * drm_sched_backend_ops.timedout_job) can invoke this again with the
+ * same parameters. Using this is discouraged because it violates
+ * dma_fence rules, notably dma_fence_init() has to be called on
+ * already initialized fences for a second time. Moreover, this is
+ * dangerous because attempts to allocate memory might deadlock with
+ * memory management code waiting for the reset to complete.
+ *
+ * TODO: Document what drivers should do / use instead.
+ *
+ * This method is called in a workqueue context - either from the
+ * submit_wq the driver passed through drm_sched_init(), or, if the
+ * driver passed NULL, a separate, ordered workqueue the scheduler
+ * allocated.
+ *
+ * Note that the scheduler expects to 'inherit' its own reference to
+ * this fence from the callback. It does not invoke an extra
+ * dma_fence_get() on it. Consequently, this callback must take a
+ * reference for the scheduler, and additional ones for the driver's
+ * respective needs.
+ *
+ * Return:
+ * * On success: dma_fence the driver must signal once the hardware has
+ * completed the job ("hardware fence").
+ * * On failure: NULL or an ERR_PTR.
*/
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
/**
- * @timedout_job: Called when a job has taken too long to execute,
- * to trigger GPU recovery.
+ * @timedout_job: Called when a job has taken too long to execute,
+ * to trigger GPU recovery.
+ *
+ * @sched_job: The job that has timed out
+ *
+ * Drivers typically issue a reset to recover from GPU hangs.
+ * This procedure looks very different depending on whether a firmware
+ * or a hardware scheduler is being used.
+ *
+ * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each
+ * scheduler has one entity. Hence, the steps taken typically look as
+ * follows:
+ *
+ * 1. Stop the scheduler using drm_sched_stop(). This will pause the
+ * scheduler workqueues and cancel the timeout work, guaranteeing
+ * that nothing is queued while the ring is being removed.
+ * 2. Remove the ring. The firmware will make sure that the
+ * corresponding parts of the hardware are resetted, and that other
+ * rings are not impacted.
+ * 3. Kill the entity and the associated scheduler.
+ *
+ *
+ * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from
+ * one or more entities to one ring. This implies that all entities
+ * associated with the affected scheduler cannot be torn down, because
+ * this would effectively also affect innocent userspace processes which
+ * did not submit faulty jobs (for example).
+ *
+ * Consequently, the procedure to recover with a hardware scheduler
+ * should look like this:
+ *
+ * 1. Stop all schedulers impacted by the reset using drm_sched_stop().
+ * 2. Kill the entity the faulty job stems from.
+ * 3. Issue a GPU reset on all faulty rings (driver-specific).
+ * 4. Re-submit jobs on all schedulers impacted by re-submitting them to
+ * the entities which are still alive.
+ * 5. Restart all schedulers that were stopped in step #1 using
+ * drm_sched_start().
+ *
+ * Note that some GPUs have distinct hardware queues but need to reset
+ * the GPU globally, which requires extra synchronization between the
+ * timeout handlers of different schedulers. One way to achieve this
+ * synchronization is to create an ordered workqueue (using
+ * alloc_ordered_workqueue()) at the driver level, and pass this queue
+ * as drm_sched_init()'s @timeout_wq parameter. This will guarantee
+ * that timeout handlers are executed sequentially.
+ *
+ * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat
+ *
*/
- void (*timedout_job)(struct drm_sched_job *sched_job);
+ enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
/**
* @free_job: Called once the job's finished fence has been signaled
@@ -243,81 +511,151 @@ struct drm_sched_backend_ops {
};
/**
- * struct drm_gpu_scheduler
+ * struct drm_gpu_scheduler - scheduler instance-specific data
*
* @ops: backend operations provided by the driver.
- * @hw_submission_limit: the max size of the hardware queue.
+ * @credit_limit: the credit limit of this scheduler
+ * @credit_count: the current credit count of this scheduler
* @timeout: the time after which a job is removed from the scheduler.
* @name: name of the ring for which this scheduler is being used.
- * @sched_rq: priority wise array of run queues.
- * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
- * is ready to be scheduled.
+ * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
+ * as there's usually one run-queue per priority, but could be less.
+ * @sched_rq: An allocated array of run-queues of size @num_rqs;
* @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
* waits on this wait queue until all the scheduled jobs are
* finished.
- * @hw_rq_count: the number of jobs currently in the hardware queue.
* @job_id_count: used to assign unique id to the each job.
+ * @submit_wq: workqueue used to queue @work_run_job and @work_free_job
+ * @timeout_wq: workqueue used to queue @work_tdr
+ * @work_run_job: work which calls run_job op of each scheduler.
+ * @work_free_job: work which calls free_job op of each scheduler.
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
* timeout interval is over.
- * @thread: the kthread on which the scheduler which run.
- * @ring_mirror_list: the list of jobs which are currently in the job queue.
- * @job_list_lock: lock to protect the ring_mirror_list.
+ * @pending_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
- * guilty and it will be considered for scheduling further.
+ * guilty and it will no longer be considered for scheduling.
* @score: score to help loadbalancer pick a idle sched
+ * @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
+ * @pause_submit: pause queuing of @work_run_job on @submit_wq
+ * @own_submit_wq: scheduler owns allocation of @submit_wq
+ * @dev: system &struct device
*
* One scheduler is implemented for each hardware ring.
*/
struct drm_gpu_scheduler {
const struct drm_sched_backend_ops *ops;
- uint32_t hw_submission_limit;
+ u32 credit_limit;
+ atomic_t credit_count;
long timeout;
const char *name;
- struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
- wait_queue_head_t wake_up_worker;
+ u32 num_rqs;
+ struct drm_sched_rq **sched_rq;
wait_queue_head_t job_scheduled;
- atomic_t hw_rq_count;
atomic64_t job_id_count;
+ struct workqueue_struct *submit_wq;
+ struct workqueue_struct *timeout_wq;
+ struct work_struct work_run_job;
+ struct work_struct work_free_job;
struct delayed_work work_tdr;
- struct task_struct *thread;
- struct list_head ring_mirror_list;
+ struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t score;
+ atomic_t *score;
+ atomic_t _score;
bool ready;
bool free_guilty;
+ bool pause_submit;
+ bool own_submit_wq;
+ struct device *dev;
};
+/**
+ * struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler
+ *
+ * @ops: backend operations provided by the driver
+ * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
+ * allocated and used.
+ * @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT,
+ * as there's usually one run-queue per priority, but may be less.
+ * @credit_limit: the number of credits this scheduler can hold from all jobs
+ * @hang_limit: number of times to allow a job to hang before dropping it.
+ * This mechanism is DEPRECATED. Set it to 0.
+ * @timeout: timeout value in jiffies for submitted jobs.
+ * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used.
+ * @score: score atomic shared with other schedulers. May be NULL.
+ * @name: name (typically the driver's name). Used for debugging
+ * @dev: associated device. Used for debugging
+ */
+struct drm_sched_init_args {
+ const struct drm_sched_backend_ops *ops;
+ struct workqueue_struct *submit_wq;
+ struct workqueue_struct *timeout_wq;
+ u32 num_rqs;
+ u32 credit_limit;
+ unsigned int hang_limit;
+ long timeout;
+ atomic_t *score;
+ const char *name;
+ struct device *dev;
+};
+
+/* Scheduler operations */
+
int drm_sched_init(struct drm_gpu_scheduler *sched,
- const struct drm_sched_backend_ops *ops,
- uint32_t hw_submission, unsigned hang_limit, long timeout,
- const char *name);
+ const struct drm_sched_init_args *args);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
-int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_sched_entity *entity,
- void *owner);
-void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
- struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
-void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining);
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
+void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
-void drm_sched_increase_karma(struct drm_sched_job *bad);
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
-void drm_sched_job_kickout(struct drm_sched_job *s_job);
-void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
-void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
+
+/* Jobs */
+
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_sched_entity *entity,
+ u32 credits, void *owner);
+void drm_sched_job_arm(struct drm_sched_job *job);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
+int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
+ struct drm_file *file,
+ u32 handle,
+ u32 point);
+int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage);
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write);
+bool drm_sched_job_has_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
+void drm_sched_increase_karma(struct drm_sched_job *bad);
+
+static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
+ int threshold)
+{
+ return s_job && atomic_inc_return(&s_job->karma) > threshold;
+}
+
+/* Entities */
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
@@ -327,24 +665,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
-void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
-struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
-bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
-
-struct drm_sched_fence *drm_sched_fence_create(
- struct drm_sched_entity *s_entity, void *owner);
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
-void drm_sched_fence_finished(struct drm_sched_fence *fence);
-
-unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
-void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
- unsigned long remaining);
-struct drm_gpu_scheduler *
-drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
+int drm_sched_entity_error(struct drm_sched_entity *entity);
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
#endif