aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/virtio/virtgpu_fence.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_fence.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 87d1966192f4..70d6c4329778 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
+#include <trace/events/dma_fence.h>
#include "virtgpu_drv.h"
static const char *virtio_get_driver_name(struct dma_fence *f)
@@ -40,16 +41,14 @@ bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
- if (atomic64_read(&fence->drv->last_seq) >= fence->seq)
+ if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
return true;
return false;
}
static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
{
- struct virtio_gpu_fence *fence = to_virtio_fence(f);
-
- snprintf(str, size, "%llu", fence->seq);
+ snprintf(str, size, "%llu", f->seqno);
}
static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
@@ -71,17 +70,22 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!fence)
return fence;
fence->drv = drv;
+
+ /* This only partially initializes the fence because the seqno is
+ * unknown yet. The fence must not be used outside of the driver
+ * until virtio_gpu_fence_emit is called.
+ */
dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
return fence;
}
-int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
+void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence)
{
@@ -89,14 +93,15 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
- fence->seq = ++drv->sync_seq;
+ fence->f.seqno = ++drv->sync_seq;
dma_fence_get(&fence->f);
list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
+ trace_dma_fence_emit(&fence->f);
+
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
- cmd_hdr->fence_id = cpu_to_le64(fence->seq);
- return 0;
+ cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
@@ -109,7 +114,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
spin_lock_irqsave(&drv->lock, irq_flags);
atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
- if (last_seq < fence->seq)
+ if (last_seq < fence->f.seqno)
continue;
dma_fence_signal_locked(&fence->f);
list_del(&fence->node);