diff options
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_vq.c')
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 218 |
1 files changed, 71 insertions, 147 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index cc02fc4bab2a..73854915ec34 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -120,23 +120,6 @@ virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf) return (struct virtio_gpu_ctrl_hdr *)vbuf->buf; } -static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer **vbuffer_p, - int size) -{ - struct virtio_gpu_vbuffer *vbuf; - - vbuf = virtio_gpu_get_vbuf(vgdev, size, - sizeof(struct virtio_gpu_ctrl_hdr), - NULL, NULL); - if (IS_ERR(vbuf)) { - *vbuffer_p = NULL; - return ERR_CAST(vbuf); - } - *vbuffer_p = vbuf; - return vbuf->buf; -} - static struct virtio_gpu_update_cursor* virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer **vbuffer_p) @@ -172,6 +155,25 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, return (struct virtio_gpu_command *)vbuf->buf; } +static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int size) +{ + return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size, + sizeof(struct virtio_gpu_ctrl_hdr), + NULL); +} + +static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int size, + virtio_gpu_resp_cb cb) +{ + return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size, + sizeof(struct virtio_gpu_ctrl_hdr), + NULL); +} + static void free_vbuf(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf) { @@ -220,7 +222,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { - if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { + if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { struct virtio_gpu_ctrl_hdr *cmd; cmd = virtio_gpu_vbuf_ctrl_hdr(entry); DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", @@ -327,8 +329,14 @@ static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, int incnt) { struct virtqueue *vq = vgdev->ctrlq.vq; - bool notify = false; - int ret; + int ret, idx; + + if (!drm_dev_enter(vgdev->ddev, &idx)) { + if (fence && vbuf->objs) + virtio_gpu_array_unlock_resv(vbuf->objs); + free_vbuf(vgdev, vbuf); + return; + } if (vgdev->has_indirect) elemcnt = 1; @@ -336,16 +344,9 @@ static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, again: spin_lock(&vgdev->ctrlq.qlock); - if (!vgdev->vqs_ready) { - spin_unlock(&vgdev->ctrlq.qlock); - - if (fence && vbuf->objs) - virtio_gpu_array_unlock_resv(vbuf->objs); - return; - } - if (vq->num_free < elemcnt) { spin_unlock(&vgdev->ctrlq.qlock); + virtio_gpu_notify(vgdev); wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt); goto again; } @@ -367,16 +368,11 @@ again: trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf)); - notify = virtqueue_kick_prepare(vq); + atomic_inc(&vgdev->pending_commands); spin_unlock(&vgdev->ctrlq.qlock); - if (notify) { - if (vgdev->disable_notify) - vgdev->pending_notify = true; - else - virtqueue_notify(vq); - } + drm_dev_exit(idx); } static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, @@ -432,19 +428,20 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, } } -void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev) -{ - vgdev->disable_notify = true; -} - -void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev) +void virtio_gpu_notify(struct virtio_gpu_device *vgdev) { - vgdev->disable_notify = false; + bool notify; - if (!vgdev->pending_notify) + if (!atomic_read(&vgdev->pending_commands)) return; - vgdev->pending_notify = false; - virtqueue_notify(vgdev->ctrlq.vq); + + spin_lock(&vgdev->ctrlq.qlock); + atomic_set(&vgdev->pending_commands, 0); + notify = virtqueue_kick_prepare(vgdev->ctrlq.vq); + spin_unlock(&vgdev->ctrlq.qlock); + + if (notify) + virtqueue_notify(vgdev->ctrlq.vq); } static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, @@ -458,12 +455,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, { struct virtqueue *vq = vgdev->cursorq.vq; struct scatterlist *sgs[1], ccmd; + int idx, ret, outcnt; bool notify; - int ret; - int outcnt; - if (!vgdev->vqs_ready) + if (!drm_dev_enter(vgdev->ddev, &idx)) { + free_vbuf(vgdev, vbuf); return; + } sg_init_one(&ccmd, vbuf->buf, vbuf->size); sgs[0] = &ccmd; @@ -488,6 +486,8 @@ retry: if (notify) virtqueue_notify(vq); + + drm_dev_exit(idx); } /* just create gem objects for userspace and long lived objects, @@ -518,35 +518,32 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, bo->created = true; } -void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, - uint32_t resource_id) +static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) { - struct virtio_gpu_resource_unref *cmd_p; - struct virtio_gpu_vbuffer *vbuf; - - cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); - memset(cmd_p, 0, sizeof(*cmd_p)); + struct virtio_gpu_object *bo; - cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); - cmd_p->resource_id = cpu_to_le32(resource_id); + bo = vbuf->resp_cb_data; + vbuf->resp_cb_data = NULL; - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_cleanup_object(bo); } -static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, - uint32_t resource_id, - struct virtio_gpu_fence *fence) +void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) { - struct virtio_gpu_resource_detach_backing *cmd_p; + struct virtio_gpu_resource_unref *cmd_p; struct virtio_gpu_vbuffer *vbuf; - cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p), + virtio_gpu_cmd_unref_cb); memset(cmd_p, 0, sizeof(*cmd_p)); - cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); + vbuf->resp_cb_data = bo; + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, @@ -603,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) dma_sync_sg_for_device(vgdev->vdev->dev.parent, - bo->pages->sgl, bo->pages->nents, + shmem->pages->sgl, shmem->pages->nents, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); @@ -954,7 +952,6 @@ void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); - } void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, @@ -1004,6 +1001,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, cmd_p->flags = cpu_to_le32(params->flags); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); + bo->created = true; } @@ -1018,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) dma_sync_sg_for_device(vgdev->vdev->dev.parent, - bo->pages->sgl, bo->pages->nents, + shmem->pages->sgl, shmem->pages->nents, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); @@ -1090,89 +1089,14 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *obj, - struct virtio_gpu_fence *fence) + struct virtio_gpu_mem_entry *ents, + unsigned int nents) { - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); - struct virtio_gpu_mem_entry *ents; - struct scatterlist *sg; - int si, nents, ret; - - if (WARN_ON_ONCE(!obj->created)) - return -EINVAL; - if (WARN_ON_ONCE(obj->pages)) - return -EINVAL; - - ret = drm_gem_shmem_pin(&obj->base.base); - if (ret < 0) - return -EINVAL; - - obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base); - if (obj->pages == NULL) { - drm_gem_shmem_unpin(&obj->base.base); - return -EINVAL; - } - - if (use_dma_api) { - obj->mapped = dma_map_sg(vgdev->vdev->dev.parent, - obj->pages->sgl, obj->pages->nents, - DMA_TO_DEVICE); - nents = obj->mapped; - } else { - nents = obj->pages->nents; - } - - /* gets freed when the ring has consumed it */ - ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), - GFP_KERNEL); - if (!ents) { - DRM_ERROR("failed to allocate ent list\n"); - return -ENOMEM; - } - - for_each_sg(obj->pages->sgl, sg, nents, si) { - ents[si].addr = cpu_to_le64(use_dma_api - ? sg_dma_address(sg) - : sg_phys(sg)); - ents[si].length = cpu_to_le32(sg->length); - ents[si].padding = 0; - } - virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, - ents, nents, - fence); + ents, nents, NULL); return 0; } -void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *obj) -{ - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); - - if (WARN_ON_ONCE(!obj->pages)) - return; - - if (use_dma_api && obj->mapped) { - struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); - /* detach backing and wait for the host process it ... */ - virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence); - dma_fence_wait(&fence->f, true); - dma_fence_put(&fence->f); - - /* ... then tear down iommu mappings */ - dma_unmap_sg(vgdev->vdev->dev.parent, - obj->pages->sgl, obj->mapped, - DMA_TO_DEVICE); - obj->mapped = 0; - } else { - virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); - } - - sg_free_table(obj->pages); - obj->pages = NULL; - - drm_gem_shmem_unpin(&obj->base.base); -} - void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, struct virtio_gpu_output *output) { |
