aboutsummaryrefslogtreecommitdiffstats
path: root/include/drm/gpu_scheduler.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm/gpu_scheduler.h')
-rw-r--r--include/drm/gpu_scheduler.h34
1 files changed, 28 insertions, 6 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index f011e4c407f2..2ae4fd62e01c 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -28,9 +28,19 @@
#include <linux/dma-fence.h>
#include <linux/completion.h>
#include <linux/xarray.h>
+#include <linux/workqueue.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+/**
+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
+ *
+ * Setting this flag on a scheduler fence prevents pipelining of jobs depending
+ * on this fence. In other words we always insert a full CPU round trip before
+ * dependen jobs are pushed to the hw queue.
+ */
+#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
+
struct drm_gem_object;
struct drm_gpu_scheduler;
@@ -269,6 +279,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
+ * @work: Helper to reschdeule job kill to different context.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
@@ -286,7 +297,16 @@ struct drm_sched_job {
struct list_head list;
struct drm_gpu_scheduler *sched;
struct drm_sched_fence *s_fence;
- struct dma_fence_cb finish_cb;
+
+ /*
+ * work is used only after finish_cb has been used and will not be
+ * accessed anymore.
+ */
+ union {
+ struct dma_fence_cb finish_cb;
+ struct work_struct work;
+ };
+
uint64_t id;
atomic_t karma;
enum drm_sched_priority s_priority;
@@ -318,10 +338,10 @@ enum drm_gpu_sched_stat {
};
/**
- * struct drm_sched_backend_ops
+ * struct drm_sched_backend_ops - Define the backend operations
+ * called by the scheduler
*
- * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side.
+ * These functions should be implemented in the driver side.
*/
struct drm_sched_backend_ops {
/**
@@ -398,7 +418,7 @@ struct drm_sched_backend_ops {
};
/**
- * struct drm_gpu_scheduler
+ * struct drm_gpu_scheduler - scheduler instance-specific data
*
* @ops: backend operations provided by the driver.
* @hw_submission_limit: the max size of the hardware queue.
@@ -424,6 +444,7 @@ struct drm_sched_backend_ops {
* @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
+ * @dev: system &struct device
*
* One scheduler is implemented for each hardware ring.
*/
@@ -447,13 +468,14 @@ struct drm_gpu_scheduler {
atomic_t _score;
bool ready;
bool free_guilty;
+ struct device *dev;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
- atomic_t *score, const char *name);
+ atomic_t *score, const char *name, struct device *dev);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,