aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/gpu/drm-kms.rst15
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_internal.h27
-rw-r--r--drivers/gpu/drm/drm_vblank.c44
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c267
-rw-r--r--include/drm/drm_vblank.h20
-rw-r--r--include/drm/drm_vblank_work.h71
7 files changed, 430 insertions, 16 deletions
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 975cfeb8a353..3c5ae4f6dfd2 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -543,3 +543,18 @@ Vertical Blanking and Interrupt Handling Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_vblank.c
:export:
+
+Vertical Blank Work
+===================
+
+.. kernel-doc:: drivers/gpu/drm/drm_vblank_work.c
+ :doc: vblank works
+
+Vertical Blank Work Functions Reference
+---------------------------------------
+
+.. kernel-doc:: include/drm/drm_vblank_work.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_vblank_work.c
+ :export:
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2c0e5a7e5953..02ee5faf1a92 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_cache.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
- drm_managed.o
+ drm_managed.o drm_vblank_work.o
drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 2470a352730b..8e01caaf95cc 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -21,7 +21,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/kthread.h>
+
#include <drm/drm_ioctl.h>
+#include <drm/drm_vblank.h>
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 4
@@ -38,6 +41,7 @@ struct drm_master;
struct drm_minor;
struct drm_prime_file_private;
struct drm_printer;
+struct drm_vblank_crtc;
/* drm_file.c */
extern struct mutex drm_global_mutex;
@@ -93,7 +97,30 @@ void drm_minor_release(struct drm_minor *minor);
void drm_managed_release(struct drm_device *dev);
/* drm_vblank.c */
+static inline bool drm_vblank_passed(u64 seq, u64 ref)
+{
+ return (seq - ref) <= (1 << 23);
+}
+
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
+int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
+void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
+u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
+
+/* drm_vblank_work.c */
+static inline void drm_vblank_flush_worker(struct drm_vblank_crtc *vblank)
+{
+ kthread_flush_worker(vblank->worker);
+}
+
+static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank)
+{
+ kthread_destroy_worker(vblank->worker);
+}
+
+int drm_vblank_worker_init(struct drm_vblank_crtc *vblank);
+void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank);
+void drm_handle_vblank_works(struct drm_vblank_crtc *vblank);
/* IOCTLS */
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index e277e40e5b82..f402c75b9d34 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -25,6 +25,7 @@
*/
#include <linux/export.h>
+#include <linux/kthread.h>
#include <linux/moduleparam.h>
#include <drm/drm_crtc.h>
@@ -363,7 +364,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
}
-static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
+u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u64 count;
@@ -497,6 +498,7 @@ static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
drm_WARN_ON(dev, READ_ONCE(vblank->enabled) &&
drm_core_check_feature(dev, DRIVER_MODESET));
+ drm_vblank_destroy_worker(vblank);
del_timer_sync(&vblank->disable_timer);
}
@@ -539,6 +541,10 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
vblank);
if (ret)
return ret;
+
+ ret = drm_vblank_worker_init(vblank);
+ if (ret)
+ return ret;
}
return 0;
@@ -1135,7 +1141,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
return ret;
}
-static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
+int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1178,7 +1184,7 @@ int drm_crtc_vblank_get(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_get);
-static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
+void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1281,13 +1287,16 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
-
ktime_t now;
u64 seq;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
+ /*
+ * Grab event_lock early to prevent vblank work from being scheduled
+ * while we're in the middle of shutting down vblank interrupts
+ */
spin_lock_irq(&dev->event_lock);
spin_lock(&dev->vbl_lock);
@@ -1324,11 +1333,18 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, now);
}
+
+ /* Cancel any leftover pending vblank work */
+ drm_vblank_cancel_pending_works(vblank);
+
spin_unlock_irq(&dev->event_lock);
/* Will be reset by the modeset helpers when re-enabling the crtc by
* calling drm_calc_timestamping_constants(). */
vblank->hwmode.crtc_clock = 0;
+
+ /* Wait for any vblank work that's still executing to finish */
+ drm_vblank_flush_worker(vblank);
}
EXPORT_SYMBOL(drm_crtc_vblank_off);
@@ -1363,6 +1379,7 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
drm_WARN_ON(dev, !list_empty(&dev->vblank_event_list));
+ drm_WARN_ON(dev, !list_empty(&vblank->pending_work));
}
EXPORT_SYMBOL(drm_crtc_vblank_reset);
@@ -1589,11 +1606,6 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static inline bool vblank_passed(u64 seq, u64 ref)
-{
- return (seq - ref) <= (1 << 23);
-}
-
static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
u64 req_seq,
union drm_wait_vblank *vblwait,
@@ -1651,7 +1663,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
trace_drm_vblank_event_queued(file_priv, pipe, req_seq);
e->sequence = req_seq;
- if (vblank_passed(seq, req_seq)) {
+ if (drm_vblank_passed(seq, req_seq)) {
drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, now);
vblwait->reply.sequence = seq;
@@ -1806,7 +1818,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- vblank_passed(seq, req_seq)) {
+ drm_vblank_passed(seq, req_seq)) {
req_seq = seq + 1;
vblwait->request.type &= ~_DRM_VBLANK_NEXTONMISS;
vblwait->request.sequence = req_seq;
@@ -1825,7 +1837,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
drm_dbg_core(dev, "waiting on vblank count %llu, crtc %u\n",
req_seq, pipe);
wait = wait_event_interruptible_timeout(vblank->queue,
- vblank_passed(drm_vblank_count(dev, pipe), req_seq) ||
+ drm_vblank_passed(drm_vblank_count(dev, pipe), req_seq) ||
!READ_ONCE(vblank->enabled),
msecs_to_jiffies(3000));
@@ -1874,7 +1886,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != pipe)
continue;
- if (!vblank_passed(seq, e->sequence))
+ if (!drm_vblank_passed(seq, e->sequence))
continue;
drm_dbg_core(dev, "vblank event on %llu, current %llu\n",
@@ -1944,6 +1956,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
!atomic_read(&vblank->refcount));
drm_handle_vblank_events(dev, pipe);
+ drm_handle_vblank_works(vblank);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
@@ -2097,7 +2110,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
if (flags & DRM_CRTC_SEQUENCE_RELATIVE)
req_seq += seq;
- if ((flags & DRM_CRTC_SEQUENCE_NEXT_ON_MISS) && vblank_passed(seq, req_seq))
+ if ((flags & DRM_CRTC_SEQUENCE_NEXT_ON_MISS) && drm_vblank_passed(seq, req_seq))
req_seq = seq + 1;
e->pipe = pipe;
@@ -2126,7 +2139,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
e->sequence = req_seq;
- if (vblank_passed(seq, req_seq)) {
+ if (drm_vblank_passed(seq, req_seq)) {
drm_crtc_vblank_put(crtc);
send_vblank_event(dev, e, seq, now);
queue_seq->sequence = seq;
@@ -2146,3 +2159,4 @@ err_free:
kfree(e);
return ret;
}
+
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
new file mode 100644
index 000000000000..7ac0fc0a9415
--- /dev/null
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: MIT
+
+#include <uapi/linux/sched/types.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_work.h>
+#include <drm/drm_crtc.h>
+
+#include "drm_internal.h"
+
+/**
+ * DOC: vblank works
+ *
+ * Many DRM drivers need to program hardware in a time-sensitive manner, many
+ * times with a deadline of starting and finishing within a certain region of
+ * the scanout. Most of the time the safest way to accomplish this is to
+ * simply do said time-sensitive programming in the driver's IRQ handler,
+ * which allows drivers to avoid being preempted during these critical
+ * regions. Or even better, the hardware may even handle applying such
+ * time-critical programming independently of the CPU.
+ *
+ * While there's a decent amount of hardware that's designed so that the CPU
+ * doesn't need to be concerned with extremely time-sensitive programming,
+ * there's a few situations where it can't be helped. Some unforgiving
+ * hardware may require that certain time-sensitive programming be handled
+ * completely by the CPU, and said programming may even take too long to
+ * handle in an IRQ handler. Another such situation would be where the driver
+ * needs to perform a task that needs to complete within a specific scanout
+ * period, but might possibly block and thus cannot be handled in an IRQ
+ * context. Both of these situations can't be solved perfectly in Linux since
+ * we're not a realtime kernel, and thus the scheduler may cause us to miss
+ * our deadline if it decides to preempt us. But for some drivers, it's good
+ * enough if we can lower our chance of being preempted to an absolute
+ * minimum.
+ *
+ * This is where &drm_vblank_work comes in. &drm_vblank_work provides a simple
+ * generic delayed work implementation which delays work execution until a
+ * particular vblank has passed, and then executes the work at realtime
+ * priority. This provides the best possible chance at performing
+ * time-sensitive hardware programming on time, even when the system is under
+ * heavy load. &drm_vblank_work also supports rescheduling, so that self
+ * re-arming work items can be easily implemented.
+ */
+
+void drm_handle_vblank_works(struct drm_vblank_crtc *vblank)
+{
+ struct drm_vblank_work *work, *next;
+ u64 count = atomic64_read(&vblank->count);
+ bool wake = false;
+
+ assert_spin_locked(&vblank->dev->event_lock);
+
+ list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
+ if (!drm_vblank_passed(count, work->count))
+ continue;
+
+ list_del_init(&work->node);
+ drm_vblank_put(vblank->dev, vblank->pipe);
+ kthread_queue_work(vblank->worker, &work->base);
+ wake = true;
+ }
+ if (wake)
+ wake_up_all(&vblank->work_wait_queue);
+}
+
+/* Handle cancelling any pending vblank work items and drop respective vblank
+ * references in response to vblank interrupts being disabled.
+ */
+void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank)
+{
+ struct drm_vblank_work *work, *next;
+
+ assert_spin_locked(&vblank->dev->event_lock);
+
+ list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
+ list_del_init(&work->node);
+ drm_vblank_put(vblank->dev, vblank->pipe);
+ }
+
+ wake_up_all(&vblank->work_wait_queue);
+}
+
+/**
+ * drm_vblank_work_schedule - schedule a vblank work
+ * @work: vblank work to schedule
+ * @count: target vblank count
+ * @nextonmiss: defer until the next vblank if target vblank was missed
+ *
+ * Schedule @work for execution once the crtc vblank count reaches @count.
+ *
+ * If the crtc vblank count has already reached @count and @nextonmiss is
+ * %false the work starts to execute immediately.
+ *
+ * If the crtc vblank count has already reached @count and @nextonmiss is
+ * %true the work is deferred until the next vblank (as if @count has been
+ * specified as crtc vblank count + 1).
+ *
+ * If @work is already scheduled, this function will reschedule said work
+ * using the new @count. This can be used for self-rearming work items.
+ *
+ * Returns:
+ * %1 if @work was successfully (re)scheduled, %0 if it was either already
+ * scheduled or cancelled, or a negative error code on failure.
+ */
+int drm_vblank_work_schedule(struct drm_vblank_work *work,
+ u64 count, bool nextonmiss)
+{
+ struct drm_vblank_crtc *vblank = work->vblank;
+ struct drm_device *dev = vblank->dev;
+ u64 cur_vbl;
+ unsigned long irqflags;
+ bool passed, inmodeset, rescheduling = false, wake = false;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+ if (work->cancelling)
+ goto out;
+
+ spin_lock(&dev->vbl_lock);
+ inmodeset = vblank->inmodeset;
+ spin_unlock(&dev->vbl_lock);
+ if (inmodeset)
+ goto out;
+
+ if (list_empty(&work->node)) {
+ ret = drm_vblank_get(dev, vblank->pipe);
+ if (ret < 0)
+ goto out;
+ } else if (work->count == count) {
+ /* Already scheduled w/ same vbl count */
+ goto out;
+ } else {
+ rescheduling = true;
+ }
+
+ work->count = count;
+ cur_vbl = drm_vblank_count(dev, vblank->pipe);
+ passed = drm_vblank_passed(cur_vbl, count);
+ if (passed)
+ drm_dbg_core(dev,
+ "crtc %d vblank %llu already passed (current %llu)\n",
+ vblank->pipe, count, cur_vbl);
+
+ if (!nextonmiss && passed) {
+ drm_vblank_put(dev, vblank->pipe);
+ ret = kthread_queue_work(vblank->worker, &work->base);
+
+ if (rescheduling) {
+ list_del_init(&work->node);
+ wake = true;
+ }
+ } else {
+ if (!rescheduling)
+ list_add_tail(&work->node, &vblank->pending_work);
+ ret = true;
+ }
+
+out:
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ if (wake)
+ wake_up_all(&vblank->work_wait_queue);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vblank_work_schedule);
+
+/**
+ * drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to
+ * finish executing
+ * @work: vblank work to cancel
+ *
+ * Cancel an already scheduled vblank work and wait for its
+ * execution to finish.
+ *
+ * On return, @work is guaranteed to no longer be scheduled or running, even
+ * if it's self-arming.
+ *
+ * Returns:
+ * %True if the work was cancelled before it started to execute, %false
+ * otherwise.
+ */
+bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
+{
+ struct drm_vblank_crtc *vblank = work->vblank;
+ struct drm_device *dev = vblank->dev;
+ bool ret = false;
+
+ spin_lock_irq(&dev->event_lock);
+ if (!list_empty(&work->node)) {
+ list_del_init(&work->node);
+ drm_vblank_put(vblank->dev, vblank->pipe);
+ ret = true;
+ }
+
+ work->cancelling++;
+ spin_unlock_irq(&dev->event_lock);
+
+ wake_up_all(&vblank->work_wait_queue);
+
+ if (kthread_cancel_work_sync(&work->base))
+ ret = true;
+
+ spin_lock_irq(&dev->event_lock);
+ work->cancelling--;
+ spin_unlock_irq(&dev->event_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_vblank_work_cancel_sync);
+
+/**
+ * drm_vblank_work_flush - wait for a scheduled vblank work to finish
+ * executing
+ * @work: vblank work to flush
+ *
+ * Wait until @work has finished executing once.
+ */
+void drm_vblank_work_flush(struct drm_vblank_work *work)
+{
+ struct drm_vblank_crtc *vblank = work->vblank;
+ struct drm_device *dev = vblank->dev;
+
+ spin_lock_irq(&dev->event_lock);
+ wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
+ dev->event_lock);
+ spin_unlock_irq(&dev->event_lock);
+
+ kthread_flush_work(&work->base);
+}
+EXPORT_SYMBOL(drm_vblank_work_flush);
+
+/**
+ * drm_vblank_work_init - initialize a vblank work item
+ * @work: vblank work item
+ * @crtc: CRTC whose vblank will trigger the work execution
+ * @func: work function to be executed
+ *
+ * Initialize a vblank work item for a specific crtc.
+ */
+void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
+ void (*func)(struct kthread_work *work))
+{
+ kthread_init_work(&work->base, func);
+ INIT_LIST_HEAD(&work->node);
+ work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
+}
+EXPORT_SYMBOL(drm_vblank_work_init);
+
+int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
+{
+ struct sched_param param = {
+ .sched_priority = MAX_RT_PRIO - 1,
+ };
+ struct kthread_worker *worker;
+
+ INIT_LIST_HEAD(&vblank->pending_work);
+ init_waitqueue_head(&vblank->work_wait_queue);
+ worker = kthread_create_worker(0, "card%d-crtc%d",
+ vblank->dev->primary->index,
+ vblank->pipe);
+ if (IS_ERR(worker))
+ return PTR_ERR(worker);
+
+ vblank->worker = worker;
+
+ return sched_setscheduler(vblank->worker->task, SCHED_FIFO, &param);
+}
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index dd9f5b9e56e4..dd125f8c766c 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -27,12 +27,14 @@
#include <linux/seqlock.h>
#include <linux/idr.h>
#include <linux/poll.h>
+#include <linux/kthread.h>
#include <drm/drm_file.h>
#include <drm/drm_modes.h>
struct drm_device;
struct drm_crtc;
+struct drm_vblank_work;
/**
* struct drm_pending_vblank_event - pending vblank event tracking
@@ -203,6 +205,24 @@ struct drm_vblank_crtc {
* disabling functions multiple times.
*/
bool enabled;
+
+ /**
+ * @worker: The &kthread_worker used for executing vblank works.
+ */
+ struct kthread_worker *worker;
+
+ /**
+ * @pending_work: A list of scheduled &drm_vblank_work items that are
+ * waiting for a future vblank.
+ */
+ struct list_head pending_work;
+
+ /**
+ * @work_wait_queue: The wait queue used for signaling that a
+ * &drm_vblank_work item has either finished executing, or was
+ * cancelled.
+ */
+ wait_queue_head_t work_wait_queue;
};
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
diff --git a/include/drm/drm_vblank_work.h b/include/drm/drm_vblank_work.h
new file mode 100644
index 000000000000..eb41d0810c4f
--- /dev/null
+++ b/include/drm/drm_vblank_work.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _DRM_VBLANK_WORK_H_
+#define _DRM_VBLANK_WORK_H_
+
+#include <linux/kthread.h>
+
+struct drm_crtc;
+
+/**
+ * struct drm_vblank_work - A delayed work item which delays until a target
+ * vblank passes, and then executes at realtime priority outside of IRQ
+ * context.
+ *
+ * See also:
+ * drm_vblank_work_schedule()
+ * drm_vblank_work_init()
+ * drm_vblank_work_cancel_sync()
+ * drm_vblank_work_flush()
+ */
+struct drm_vblank_work {
+ /**
+ * @base: The base &kthread_work item which will be executed by
+ * &drm_vblank_crtc.worker. Drivers should not interact with this
+ * directly, and instead rely on drm_vblank_work_init() to initialize
+ * this.
+ */
+ struct kthread_work base;
+
+ /**
+ * @vblank: A pointer to &drm_vblank_crtc this work item belongs to.
+ */
+ struct drm_vblank_crtc *vblank;
+
+ /**
+ * @count: The target vblank this work will execute on. Drivers should
+ * not modify this value directly, and instead use
+ * drm_vblank_work_schedule()
+ */
+ u64 count;
+
+ /**
+ * @cancelling: The number of drm_vblank_work_cancel_sync() calls that
+ * are currently running. A work item cannot be rescheduled until all
+ * calls have finished.
+ */
+ int cancelling;
+
+ /**
+ * @node: The position of this work item in
+ * &drm_vblank_crtc.pending_work.
+ */
+ struct list_head node;
+};
+
+/**
+ * to_drm_vblank_work - Retrieve the respective &drm_vblank_work item from a
+ * &kthread_work
+ * @_work: The &kthread_work embedded inside a &drm_vblank_work
+ */
+#define to_drm_vblank_work(_work) \
+ container_of((_work), struct drm_vblank_work, base)
+
+int drm_vblank_work_schedule(struct drm_vblank_work *work,
+ u64 count, bool nextonmiss);
+void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
+ void (*func)(struct kthread_work *work));
+bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work);
+void drm_vblank_work_flush(struct drm_vblank_work *work);
+
+#endif /* !_DRM_VBLANK_WORK_H_ */