diff options
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_vq.c')
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 306 |
1 files changed, 223 insertions, 83 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 6bc2008b0d0d..74ad3bc3ebe8 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -26,12 +26,14 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/drmP.h> -#include "virtgpu_drv.h" +#include <linux/dma-mapping.h> #include <linux/virtio.h> #include <linux/virtio_config.h> #include <linux/virtio_ring.h> +#include "virtgpu_drv.h" +#include "virtgpu_trace.h" + #define MAX_INLINE_CMD_SIZE 96 #define MAX_INLINE_RESP_SIZE 24 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ @@ -153,7 +155,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, { if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); - kfree(vbuf->data_buf); + kvfree(vbuf->data_buf); kmem_cache_free(vgdev->vbufs, vbuf); } @@ -190,8 +192,11 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; + + trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); + if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { struct virtio_gpu_ctrl_hdr *cmd; @@ -214,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free_delayed(vgdev, entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(struct work_struct *work) @@ -247,26 +256,67 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) wake_up(&vgdev->cursorq.ack_queue); } -static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +/* Create sg_table from a vmalloc'd buffer. */ +static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) +{ + int ret, s, i; + struct sg_table *sgt; + struct scatterlist *sg; + struct page *pg; + + if (WARN_ON(!PAGE_ALIGNED(data))) + return NULL; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return NULL; + + *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); + if (ret) { + kfree(sgt); + return NULL; + } + + for_each_sg(sgt->sgl, sg, *sg_ents, i) { + pg = vmalloc_to_page(data); + if (!pg) { + sg_free_table(sgt); + kfree(sgt); + return NULL; + } + + s = min_t(int, PAGE_SIZE, size); + sg_set_page(sg, pg, s, 0); + + size -= s; + data += s; + } + + return sgt; +} + +static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct scatterlist *vout) __releases(&vgdev->ctrlq.qlock) __acquires(&vgdev->ctrlq.qlock) { struct virtqueue *vq = vgdev->ctrlq.vq; - struct scatterlist *sgs[3], vcmd, vout, vresp; + struct scatterlist *sgs[3], vcmd, vresp; int outcnt = 0, incnt = 0; + bool notify = false; int ret; if (!vgdev->vqs_ready) - return -ENODEV; + return notify; sg_init_one(&vcmd, vbuf->buf, vbuf->size); sgs[outcnt + incnt] = &vcmd; outcnt++; - if (vbuf->data_size) { - sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); - sgs[outcnt + incnt] = &vout; + if (vout) { + sgs[outcnt + incnt] = vout; outcnt++; } @@ -284,32 +334,38 @@ retry: spin_lock(&vgdev->ctrlq.qlock); goto retry; } else { - virtqueue_kick(vq); - } - - if (!ret) - ret = vq->num_free; - return ret; -} + trace_virtio_gpu_cmd_queue(vq, + (struct virtio_gpu_ctrl_hdr *)vbuf->buf); -static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) -{ - int rc; - - spin_lock(&vgdev->ctrlq.qlock); - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); - spin_unlock(&vgdev->ctrlq.qlock); - return rc; + notify = virtqueue_kick_prepare(vq); + } + return notify; } -static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf, - struct virtio_gpu_ctrl_hdr *hdr, - struct virtio_gpu_fence *fence) +static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_ctrl_hdr *hdr, + struct virtio_gpu_fence *fence) { struct virtqueue *vq = vgdev->ctrlq.vq; - int rc; + struct scatterlist *vout = NULL, sg; + struct sg_table *sgt = NULL; + bool notify; + int outcnt = 0; + + if (vbuf->data_size) { + if (is_vmalloc_addr(vbuf->data_buf)) { + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, + &outcnt); + if (!sgt) + return; + vout = sgt->sgl; + } else { + sg_init_one(&sg, vbuf->data_buf, vbuf->data_size); + vout = &sg; + outcnt = 1; + } + } again: spin_lock(&vgdev->ctrlq.qlock); @@ -322,29 +378,47 @@ again: * to wait for free space, which can result in fence ids being * submitted out-of-order. */ - if (vq->num_free < 3) { + if (vq->num_free < 2 + outcnt) { spin_unlock(&vgdev->ctrlq.qlock); wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); goto again; } - if (fence) + if (hdr && fence) { virtio_gpu_fence_emit(vgdev, hdr, fence); - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); + if (vbuf->objs) { + virtio_gpu_array_add_fence(vbuf->objs, &fence->f); + virtio_gpu_array_unlock_resv(vbuf->objs); + } + } + notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout); spin_unlock(&vgdev->ctrlq.qlock); - return rc; + if (notify) + virtqueue_notify(vgdev->ctrlq.vq); + + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } +} + +static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL); } -static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) { struct virtqueue *vq = vgdev->cursorq.vq; struct scatterlist *sgs[1], ccmd; + bool notify; int ret; int outcnt; if (!vgdev->vqs_ready) - return -ENODEV; + return; sg_init_one(&ccmd, vbuf->buf, vbuf->size); sgs[0] = &ccmd; @@ -359,14 +433,16 @@ retry: spin_lock(&vgdev->cursorq.qlock); goto retry; } else { - virtqueue_kick(vq); + trace_virtio_gpu_cmd_queue(vq, + (struct virtio_gpu_ctrl_hdr *)vbuf->buf); + + notify = virtqueue_kick_prepare(vq); } spin_unlock(&vgdev->cursorq.qlock); - if (!ret) - ret = vq->num_free; - return ret; + if (notify) + virtqueue_notify(vq); } /* just create gem objects for userspace and long lived objects, @@ -376,23 +452,24 @@ retry: /* create a basic resource */ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, - uint32_t format, - uint32_t width, - uint32_t height) + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_create_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); - cmd_p->format = cpu_to_le32(format); - cmd_p->width = cpu_to_le32(width); - cmd_p->height = cpu_to_le32(height); + cmd_p->format = cpu_to_le32(params->format); + cmd_p->width = cpu_to_le32(params->width); + cmd_p->height = cpu_to_le32(params->height); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); bo->created = true; } @@ -471,12 +548,13 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); @@ -488,14 +566,15 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->offset = cpu_to_le64(offset); - cmd_p->r.width = width; - cmd_p->r.height = height; - cmd_p->r.x = x; - cmd_p->r.y = y; + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); } @@ -584,12 +663,14 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, cache_ent->id == le32_to_cpu(cmd->capset_id)) { memcpy(cache_ent->caps_cache, resp->capset_data, cache_ent->size); + /* Copy must occur before is_valid is signalled. */ + smp_wmb(); atomic_set(&cache_ent->is_valid, 1); break; } } spin_unlock(&vgdev->display_info_lock); - wake_up(&vgdev->resp_wq); + wake_up_all(&vgdev->resp_wq); } static int virtio_get_edid_block(void *data, u8 *buf, @@ -620,11 +701,11 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, output = vgdev->outputs + scanout; new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); + drm_connector_update_edid_property(&output->conn, new_edid); spin_lock(&vgdev->display_info_lock); old_edid = output->edid; output->edid = new_edid; - drm_connector_update_edid_property(&output->conn, output->edid); spin_unlock(&vgdev->display_info_lock); kfree(old_edid); @@ -685,8 +766,11 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf; int max_size; struct virtio_gpu_drv_cap_cache *cache_ent; + struct virtio_gpu_drv_cap_cache *search_ent; void *resp_buf; + *cache_p = NULL; + if (idx >= vgdev->num_capsets) return -EINVAL; @@ -717,9 +801,26 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, atomic_set(&cache_ent->is_valid, 0); cache_ent->size = max_size; spin_lock(&vgdev->display_info_lock); - list_add_tail(&cache_ent->head, &vgdev->cap_cache); + /* Search while under lock in case it was added by another task. */ + list_for_each_entry(search_ent, &vgdev->cap_cache, head) { + if (search_ent->id == vgdev->capsets[idx].id && + search_ent->version == version) { + *cache_p = search_ent; + break; + } + } + if (!*cache_p) + list_add_tail(&cache_ent->head, &vgdev->cap_cache); spin_unlock(&vgdev->display_info_lock); + if (*cache_p) { + /* Entry was found, so free everything that was just created. */ + kfree(resp_buf); + kfree(cache_ent->caps_cache); + kfree(cache_ent); + return 0; + } + cmd_p = virtio_gpu_alloc_cmd_resp (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset) + max_size, @@ -794,63 +895,81 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id) + struct virtio_gpu_object_array *objs) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_ctx_resource *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id) + struct virtio_gpu_object_array *objs) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_ctx_resource *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, - struct virtio_gpu_resource_create_3d *rc_3d) + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_create_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; - *cmd_p = *rc_3d; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); - cmd_p->hdr.flags = 0; + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->format = cpu_to_le32(params->format); + cmd_p->width = cpu_to_le32(params->width); + cmd_p->height = cpu_to_le32(params->height); + + cmd_p->target = cpu_to_le32(params->target); + cmd_p->bind = cpu_to_le32(params->bind); + cmd_p->depth = cpu_to_le32(params->depth); + cmd_p->array_size = cpu_to_le32(params->array_size); + cmd_p->last_level = cpu_to_le32(params->last_level); + cmd_p->nr_samples = cpu_to_le32(params->nr_samples); + cmd_p->flags = cpu_to_le32(params->flags); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); bo->created = true; } void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); @@ -863,6 +982,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); @@ -874,20 +995,24 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint32_t ctx_id, + uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->box = *box; cmd_p->offset = cpu_to_le64(offset); cmd_p->level = cpu_to_le32(level); @@ -897,7 +1022,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void *data, uint32_t data_size, - uint32_t ctx_id, struct virtio_gpu_fence *fence) + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -907,6 +1034,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, vbuf->data_buf = data; vbuf->data_size = data_size; + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); @@ -922,17 +1050,21 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); struct virtio_gpu_mem_entry *ents; struct scatterlist *sg; - int si, nents; + int si, nents, ret; - if (!obj->created) - return 0; + if (WARN_ON_ONCE(!obj->created)) + return -EINVAL; + if (WARN_ON_ONCE(obj->pages)) + return -EINVAL; - if (!obj->pages) { - int ret; + ret = drm_gem_shmem_pin(&obj->base.base); + if (ret < 0) + return -EINVAL; - ret = virtio_gpu_object_get_sg_table(vgdev, obj); - if (ret) - return ret; + obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base); + if (obj->pages == NULL) { + drm_gem_shmem_unpin(&obj->base.base); + return -EINVAL; } if (use_dma_api) { @@ -971,6 +1103,9 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, { bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + if (WARN_ON_ONCE(!obj->pages)) + return; + if (use_dma_api && obj->mapped) { struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); /* detach backing and wait for the host process it ... */ @@ -986,6 +1121,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, } else { virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); } + + sg_free_table(obj->pages); + obj->pages = NULL; + + drm_gem_shmem_unpin(&obj->base.base); } void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, |
