diff options
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_ioctl.c')
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ioctl.c | 367 |
1 files changed, 120 insertions, 247 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 14ce8188c052..9af1ec62434f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -25,11 +25,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/drmP.h> -#include <drm/virtgpu_drm.h> -#include <drm/ttm/ttm_execbuf_util.h> +#include <linux/file.h> #include <linux/sync_file.h> +#include <drm/drm_file.h> +#include <drm/virtgpu_drm.h> + #include "virtgpu_drv.h" static void convert_to_hw_box(struct virtio_gpu_box *dst, @@ -54,45 +55,6 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, &virtio_gpu_map->offset); } -static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, - struct list_head *head) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct ttm_validate_buffer *buf; - struct ttm_buffer_object *bo; - struct virtio_gpu_object *qobj; - int ret; - - ret = ttm_eu_reserve_buffers(ticket, head, true, NULL); - if (ret != 0) - return ret; - - list_for_each_entry(buf, head, head) { - bo = buf->bo; - qobj = container_of(bo, struct virtio_gpu_object, tbo); - ret = ttm_bo_validate(bo, &qobj->placement, &ctx); - if (ret) { - ttm_eu_backoff_reservation(ticket, head); - return ret; - } - } - return 0; -} - -static void virtio_gpu_unref_list(struct list_head *head) -{ - struct ttm_validate_buffer *buf; - struct ttm_buffer_object *bo; - struct virtio_gpu_object *qobj; - - list_for_each_entry(buf, head, head) { - bo = buf->bo; - qobj = container_of(bo, struct virtio_gpu_object, tbo); - - drm_gem_object_put_unlocked(&qobj->gem_base); - } -} - /* * Usage of execbuffer: * Relocations need to take into account the full VIRTIO_GPUDrawable size. @@ -105,16 +67,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_virtgpu_execbuffer *exbuf = data; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; - struct drm_gem_object *gobj; struct virtio_gpu_fence *out_fence; - struct virtio_gpu_object *qobj; int ret; uint32_t *bo_handles = NULL; void __user *user_bo_handles = NULL; - struct list_head validate_list; - struct ttm_validate_buffer *buflist = NULL; - int i; - struct ww_acquire_ctx ticket; + struct virtio_gpu_object_array *buflist = NULL; struct sync_file *sync_file; int in_fence_fd = exbuf->fence_fd; int out_fence_fd = -1; @@ -155,48 +112,38 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, return out_fence_fd; } - INIT_LIST_HEAD(&validate_list); if (exbuf->num_bo_handles) { - bo_handles = kvmalloc_array(exbuf->num_bo_handles, - sizeof(uint32_t), GFP_KERNEL); - buflist = kvmalloc_array(exbuf->num_bo_handles, - sizeof(struct ttm_validate_buffer), - GFP_KERNEL | __GFP_ZERO); - if (!bo_handles || !buflist) { + sizeof(uint32_t), GFP_KERNEL); + if (!bo_handles) { ret = -ENOMEM; goto out_unused_fd; } - user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles; + user_bo_handles = u64_to_user_ptr(exbuf->bo_handles); if (copy_from_user(bo_handles, user_bo_handles, exbuf->num_bo_handles * sizeof(uint32_t))) { ret = -EFAULT; goto out_unused_fd; } - for (i = 0; i < exbuf->num_bo_handles; i++) { - gobj = drm_gem_object_lookup(drm_file, bo_handles[i]); - if (!gobj) { - ret = -ENOENT; - goto out_unused_fd; - } - - qobj = gem_to_virtio_gpu_obj(gobj); - buflist[i].bo = &qobj->tbo; - - list_add(&buflist[i].head, &validate_list); + buflist = virtio_gpu_array_from_handles(drm_file, bo_handles, + exbuf->num_bo_handles); + if (!buflist) { + ret = -ENOENT; + goto out_unused_fd; } kvfree(bo_handles); bo_handles = NULL; } - ret = virtio_gpu_object_list_validate(&ticket, &validate_list); - if (ret) - goto out_free; + if (buflist) { + ret = virtio_gpu_array_lock_resv(buflist); + if (ret) + goto out_unused_fd; + } - buf = memdup_user((void __user *)(uintptr_t)exbuf->command, - exbuf->size); + buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); if (IS_ERR(buf)) { ret = PTR_ERR(buf); goto out_unresv; @@ -221,24 +168,18 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, } virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, - vfpriv->ctx_id, out_fence); - - ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f); - - /* fence the command bo */ - virtio_gpu_unref_list(&validate_list); - kvfree(buflist); + vfpriv->ctx_id, buflist, out_fence); return 0; out_memdup: - kfree(buf); + kvfree(buf); out_unresv: - ttm_eu_backoff_reservation(&ticket, &validate_list); -out_free: - virtio_gpu_unref_list(&validate_list); + if (buflist) + virtio_gpu_array_unlock_resv(buflist); out_unused_fd: kvfree(bo_handles); - kvfree(buflist); + if (buflist) + virtio_gpu_array_put_free(buflist); if (out_fence_fd >= 0) put_unused_fd(out_fence_fd); @@ -263,10 +204,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, default: return -EINVAL; } - if (copy_to_user((void __user *)(unsigned long)param->value, - &value, sizeof(int))) { + if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int))) return -EFAULT; - } + return 0; } @@ -275,16 +215,12 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, { struct virtio_gpu_device *vgdev = dev->dev_private; struct drm_virtgpu_resource_create *rc = data; + struct virtio_gpu_fence *fence; int ret; struct virtio_gpu_object *qobj; struct drm_gem_object *obj; uint32_t handle = 0; - uint32_t size; - struct list_head validate_list; - struct ttm_validate_buffer mainbuf; - struct virtio_gpu_fence *fence = NULL; - struct ww_acquire_ctx ticket; - struct virtio_gpu_resource_create_3d rc_3d; + struct virtio_gpu_object_params params = { 0 }; if (vgdev->has_virgl_3d == false) { if (rc->depth > 1) @@ -299,94 +235,43 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - INIT_LIST_HEAD(&validate_list); - memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer)); - - size = rc->size; - + params.format = rc->format; + params.width = rc->width; + params.height = rc->height; + params.size = rc->size; + if (vgdev->has_virgl_3d) { + params.virgl = true; + params.target = rc->target; + params.bind = rc->bind; + params.depth = rc->depth; + params.array_size = rc->array_size; + params.last_level = rc->last_level; + params.nr_samples = rc->nr_samples; + params.flags = rc->flags; + } /* allocate a single page size object */ - if (size == 0) - size = PAGE_SIZE; - - qobj = virtio_gpu_alloc_object(dev, size, false, false); - if (IS_ERR(qobj)) - return PTR_ERR(qobj); - obj = &qobj->gem_base; - - if (!vgdev->has_virgl_3d) { - virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format, - rc->width, rc->height); - - ret = virtio_gpu_object_attach(vgdev, qobj, NULL); - } else { - /* use a gem reference since unref list undoes them */ - drm_gem_object_get(&qobj->gem_base); - mainbuf.bo = &qobj->tbo; - list_add(&mainbuf.head, &validate_list); - - ret = virtio_gpu_object_list_validate(&ticket, &validate_list); - if (ret) { - DRM_DEBUG("failed to validate\n"); - goto fail_unref; - } + if (params.size == 0) + params.size = PAGE_SIZE; - rc_3d.resource_id = cpu_to_le32(qobj->hw_res_handle); - rc_3d.target = cpu_to_le32(rc->target); - rc_3d.format = cpu_to_le32(rc->format); - rc_3d.bind = cpu_to_le32(rc->bind); - rc_3d.width = cpu_to_le32(rc->width); - rc_3d.height = cpu_to_le32(rc->height); - rc_3d.depth = cpu_to_le32(rc->depth); - rc_3d.array_size = cpu_to_le32(rc->array_size); - rc_3d.last_level = cpu_to_le32(rc->last_level); - rc_3d.nr_samples = cpu_to_le32(rc->nr_samples); - rc_3d.flags = cpu_to_le32(rc->flags); - - fence = virtio_gpu_fence_alloc(vgdev); - if (!fence) { - ret = -ENOMEM; - goto fail_backoff; - } - - virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d); - ret = virtio_gpu_object_attach(vgdev, qobj, fence); - if (ret) { - dma_fence_put(&fence->f); - goto fail_backoff; - } - ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); - } + fence = virtio_gpu_fence_alloc(vgdev); + if (!fence) + return -ENOMEM; + ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence); + dma_fence_put(&fence->f); + if (ret < 0) + return ret; + obj = &qobj->base.base; ret = drm_gem_handle_create(file_priv, obj, &handle); if (ret) { - drm_gem_object_release(obj); - if (vgdev->has_virgl_3d) { - virtio_gpu_unref_list(&validate_list); - dma_fence_put(&fence->f); - } return ret; } drm_gem_object_put_unlocked(obj); rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */ rc->bo_handle = handle; - - if (vgdev->has_virgl_3d) { - virtio_gpu_unref_list(&validate_list); - dma_fence_put(&fence->f); - } return 0; -fail_backoff: - ttm_eu_backoff_reservation(&ticket, &validate_list); -fail_unref: - if (vgdev->has_virgl_3d) { - virtio_gpu_unref_list(&validate_list); - dma_fence_put(&fence->f); - } -//fail_obj: -// drm_gem_object_handle_unreference_unlocked(obj); - return ret; } static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, @@ -402,7 +287,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, qobj = gem_to_virtio_gpu_obj(gobj); - ri->size = qobj->gem_base.size; + ri->size = qobj->base.base.size; ri->res_handle = qobj->hw_res_handle; drm_gem_object_put_unlocked(gobj); return 0; @@ -415,9 +300,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_from_host *args = data; - struct ttm_operation_ctx ctx = { true, false }; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; int ret; u32 offset = args->offset; @@ -426,39 +309,31 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, if (vgdev->has_virgl_3d == false) return -ENOSYS; - gobj = drm_gem_object_lookup(file, args->bo_handle); - if (gobj == NULL) + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - ret = virtio_gpu_object_reserve(qobj, false); - if (ret) - goto out; - - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); - if (unlikely(ret)) - goto out_unres; + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; convert_to_hw_box(&box, &args->box); fence = virtio_gpu_fence_alloc(vgdev); if (!fence) { ret = -ENOMEM; - goto out_unres; + goto err_unlock; } virtio_gpu_cmd_transfer_from_host_3d - (vgdev, qobj->hw_res_handle, - vfpriv->ctx_id, offset, args->level, - &box, fence); - reservation_object_add_excl_fence(qobj->tbo.resv, - &fence->f); - + (vgdev, vfpriv->ctx_id, offset, args->level, + &box, objs, fence); dma_fence_put(&fence->f); -out_unres: - virtio_gpu_object_unreserve(qobj); -out: - drm_gem_object_put_unlocked(gobj); + return 0; + +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); return ret; } @@ -468,75 +343,71 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_to_host *args = data; - struct ttm_operation_ctx ctx = { true, false }; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; struct virtio_gpu_box box; int ret; u32 offset = args->offset; - gobj = drm_gem_object_lookup(file, args->bo_handle); - if (gobj == NULL) + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - ret = virtio_gpu_object_reserve(qobj, false); - if (ret) - goto out; - - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); - if (unlikely(ret)) - goto out_unres; - convert_to_hw_box(&box, &args->box); if (!vgdev->has_virgl_3d) { virtio_gpu_cmd_transfer_to_host_2d - (vgdev, qobj, offset, - box.w, box.h, box.x, box.y, NULL); + (vgdev, offset, + box.w, box.h, box.x, box.y, + objs, NULL); } else { + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; + + ret = -ENOMEM; fence = virtio_gpu_fence_alloc(vgdev); - if (!fence) { - ret = -ENOMEM; - goto out_unres; - } + if (!fence) + goto err_unlock; + virtio_gpu_cmd_transfer_to_host_3d - (vgdev, qobj, + (vgdev, vfpriv ? vfpriv->ctx_id : 0, offset, - args->level, &box, fence); - reservation_object_add_excl_fence(qobj->tbo.resv, - &fence->f); + args->level, &box, objs, fence); dma_fence_put(&fence->f); } + return 0; -out_unres: - virtio_gpu_object_unreserve(qobj); -out: - drm_gem_object_put_unlocked(gobj); +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); return ret; } static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) + struct drm_file *file) { struct drm_virtgpu_3d_wait *args = data; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct drm_gem_object *obj; + long timeout = 15 * HZ; int ret; - bool nowait = false; - gobj = drm_gem_object_lookup(file, args->handle); - if (gobj == NULL) + obj = drm_gem_object_lookup(file, args->handle); + if (obj == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - if (args->flags & VIRTGPU_WAIT_NOWAIT) - nowait = true; - ret = virtio_gpu_object_wait(qobj, nowait); + if (args->flags & VIRTGPU_WAIT_NOWAIT) { + ret = dma_resv_test_signaled_rcu(obj->resv, true); + } else { + ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, + timeout); + } + if (ret == 0) + ret = -EBUSY; + else if (ret > 0) + ret = 0; - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -581,7 +452,6 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { if (cache_ent->id == args->cap_set_id && cache_ent->version == args->cap_set_ver) { - ptr = cache_ent->caps_cache; spin_unlock(&vgdev->display_info_lock); goto copy_exit; } @@ -592,15 +462,18 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver, &cache_ent); +copy_exit: ret = wait_event_timeout(vgdev->resp_wq, atomic_read(&cache_ent->is_valid), 5 * HZ); if (!ret) return -EBUSY; + /* is_valid check must proceed before copy of the cache entry. */ + smp_rmb(); + ptr = cache_ent->caps_cache; -copy_exit: - if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size)) + if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size)) return -EFAULT; return 0; @@ -608,34 +481,34 @@ copy_exit: struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE, virtio_gpu_resource_create_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), /* make transfer async to the main ring? - no sure, can we * thread these in the underlying GL */ DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST, virtio_gpu_transfer_from_host_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST, virtio_gpu_transfer_to_host_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), }; |
