aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/drm/gpu_scheduler.h
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2018-11-29 16:12:27 +0100
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2018-11-29 16:12:50 +0100
commit65ffc51aba406636a901b02067287d8535c02417 (patch)
tree206de4631c3f7d61ea552e50bde2841c558c7812 /include/drm/gpu_scheduler.h
parentdrm/virtio: virtio_gpu_cmd_resource_create_3d: drop unused fence arg (diff)
parentMerge v4.20-rc4 into drm-next (diff)
downloadwireguard-linux-65ffc51aba406636a901b02067287d8535c02417.tar.xz
wireguard-linux-65ffc51aba406636a901b02067287d8535c02417.zip
Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
Requested by Boris Brezillon for some vc4 fixes that are needed for future vc4 work. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'include/drm/gpu_scheduler.h')
-rw-r--r--include/drm/gpu_scheduler.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d87b268f1781..926379d53484 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -264,6 +264,7 @@ struct drm_sched_backend_ops {
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
* @num_jobs: the number of jobs in queue in the scheduler
+ * @ready: marks if the underlying HW is ready to work
*
* One scheduler is implemented for each hardware ring.
*/
@@ -283,22 +284,26 @@ struct drm_gpu_scheduler {
spinlock_t job_list_lock;
int hang_limit;
atomic_t num_jobs;
+ bool ready;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit, long timeout,
const char *name);
+
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
struct drm_sched_job *job);
void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
void drm_sched_job_kickout(struct drm_sched_job *s_job);
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,