diff options
Diffstat (limited to 'drivers/gpu/drm/virtio')
-rw-r--r-- | drivers/gpu/drm/virtio/Kconfig | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.h | 135 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_fence.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_gem.c | 183 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ioctl.c | 228 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_kms.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_object.c | 270 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_plane.c | 61 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_prime.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ttm.c | 305 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 227 |
13 files changed, 574 insertions, 923 deletions
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig index ba36e933bb49..eff3047052d4 100644 --- a/drivers/gpu/drm/virtio/Kconfig +++ b/drivers/gpu/drm/virtio/Kconfig @@ -3,7 +3,7 @@ config DRM_VIRTIO_GPU tristate "Virtio GPU driver" depends on DRM && VIRTIO && MMU select DRM_KMS_HELPER - select DRM_TTM + select DRM_GEM_SHMEM_HELPER help This is the virtual GPU driver for virtio. It can be used with QEMU based VMMs (like KVM or Xen). diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile index 458e606a936f..92aa2b3d349d 100644 --- a/drivers/gpu/drm/virtio/Makefile +++ b/drivers/gpu/drm/virtio/Makefile @@ -4,7 +4,7 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \ - virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \ + virtgpu_display.o virtgpu_vq.o \ virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \ virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 0fc32fa0b3c0..8dee698c90ff 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -56,7 +56,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd dev->pdev = pdev; if (vga) drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, - 0, "virtiodrmfb"); /* @@ -185,17 +184,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>"); MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); MODULE_AUTHOR("Alon Levy"); -static const struct file_operations virtio_gpu_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .mmap = virtio_gpu_mmap, - .poll = drm_poll, - .read = drm_read, - .unlocked_ioctl = drm_ioctl, - .release = drm_release, - .compat_ioctl = drm_compat_ioctl, - .llseek = noop_llseek, -}; +DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, @@ -210,15 +199,10 @@ static struct drm_driver driver = { #endif .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, + .gem_prime_mmap = drm_gem_prime_mmap, .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, - .gem_prime_vmap = virtgpu_gem_prime_vmap, - .gem_prime_vunmap = virtgpu_gem_prime_vunmap, - .gem_prime_mmap = virtgpu_gem_prime_mmap, - .gem_free_object_unlocked = virtio_gpu_gem_free_object, - .gem_open_object = virtio_gpu_gem_object_open, - .gem_close_object = virtio_gpu_gem_object_close, + .gem_create_object = virtio_gpu_create_object, .fops = &virtio_gpu_driver_fops, .ioctls = virtio_gpu_ioctls, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index e28829661724..0b56ba005e25 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -35,12 +35,9 @@ #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_probe_helper.h> -#include <drm/ttm/ttm_bo_api.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_placement.h> #define DRIVER_NAME "virtio_gpu" #define DRIVER_DESC "virtio GPU" @@ -68,21 +65,23 @@ struct virtio_gpu_object_params { }; struct virtio_gpu_object { - struct drm_gem_object gem_base; + struct drm_gem_shmem_object base; uint32_t hw_res_handle; struct sg_table *pages; uint32_t mapped; - void *vmap; bool dumb; - struct ttm_place placement_code; - struct ttm_placement placement; - struct ttm_buffer_object tbo; - struct ttm_bo_kmap_obj kmap; bool created; }; #define gem_to_virtio_gpu_obj(gobj) \ - container_of((gobj), struct virtio_gpu_object, gem_base) + container_of((gobj), struct virtio_gpu_object, base.base) + +struct virtio_gpu_object_array { + struct ww_acquire_ctx ticket; + struct list_head next; + u32 nents, total; + struct drm_gem_object *objs[]; +}; struct virtio_gpu_vbuffer; struct virtio_gpu_device; @@ -115,9 +114,9 @@ struct virtio_gpu_vbuffer { char *resp_buf; int resp_size; - virtio_gpu_resp_cb resp_cb; + struct virtio_gpu_object_array *objs; struct list_head list; }; @@ -147,10 +146,6 @@ struct virtio_gpu_framebuffer { #define to_virtio_gpu_framebuffer(x) \ container_of(x, struct virtio_gpu_framebuffer, base) -struct virtio_gpu_mman { - struct ttm_bo_device bdev; -}; - struct virtio_gpu_queue { struct virtqueue *vq; spinlock_t qlock; @@ -179,8 +174,6 @@ struct virtio_gpu_device { struct virtio_device *vdev; - struct virtio_gpu_mman mman; - struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS]; uint32_t num_scanouts; @@ -205,6 +198,10 @@ struct virtio_gpu_device { struct work_struct config_changed_work; + struct work_struct obj_free_work; + spinlock_t obj_free_lock; + struct list_head obj_free_list; + struct virtio_gpu_drv_capset *capsets; uint32_t num_capsets; struct list_head cap_cache; @@ -217,9 +214,6 @@ struct virtio_gpu_fpriv { /* virtio_ioctl.c */ #define DRM_VIRTIO_NUM_IOCTLS 10 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; -int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, - struct list_head *head); -void virtio_gpu_unref_list(struct list_head *head); /* virtio_kms.c */ int virtio_gpu_init(struct drm_device *dev); @@ -240,10 +234,6 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file); void virtio_gpu_gem_object_close(struct drm_gem_object *obj, struct drm_file *file); -struct virtio_gpu_object* -virtio_gpu_alloc_object(struct drm_device *dev, - struct virtio_gpu_object_params *params, - struct virtio_gpu_fence *fence); int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -251,20 +241,35 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); +struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents); +struct virtio_gpu_object_array* +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents); +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, + struct drm_gem_object *obj); +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence); +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs); +void virtio_gpu_array_put_free_work(struct work_struct *work); + /* virtio vg */ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, uint32_t resource_id); void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, @@ -295,28 +300,32 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, uint32_t id); void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id); + struct virtio_gpu_object_array *objs); void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id); + struct virtio_gpu_object_array *objs); void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void *data, uint32_t data_size, - uint32_t ctx_id, struct virtio_gpu_fence *fence); + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint32_t ctx_id, + uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_ctrl_ack(struct virtqueue *vq); void virtio_gpu_cursor_ack(struct virtqueue *vq); @@ -339,11 +348,6 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, enum drm_plane_type type, int index); -/* virtio_gpu_ttm.c */ -int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev); -void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev); -int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma); - /* virtio_gpu_fence.c */ bool virtio_fence_signaled(struct dma_fence *f); struct virtio_gpu_fence *virtio_gpu_fence_alloc( @@ -355,70 +359,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, u64 last_seq); /* virtio_gpu_object */ +struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, + size_t size); int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_params *params, struct virtio_gpu_object **bo_ptr, struct virtio_gpu_fence *fence); -void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo); -int virtio_gpu_object_kmap(struct virtio_gpu_object *bo); -int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, - struct virtio_gpu_object *bo); -void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo); -int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); /* virtgpu_prime.c */ -struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); -void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); -int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma); - -static inline struct virtio_gpu_object* -virtio_gpu_object_ref(struct virtio_gpu_object *bo) -{ - ttm_bo_get(&bo->tbo); - return bo; -} - -static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo) -{ - struct ttm_buffer_object *tbo; - - if ((*bo) == NULL) - return; - tbo = &((*bo)->tbo); - ttm_bo_put(tbo); - *bo = NULL; -} static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo) { - return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); -} - -static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo, - bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) { - struct virtio_gpu_device *qdev = - bo->gem_base.dev->dev_private; - dev_err(qdev->dev, "%p reserve failed\n", bo); - } - return r; - } - return 0; -} - -static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo) -{ - ttm_bo_unreserve(&bo->tbo); + return drm_vma_node_offset_addr(&bo->base.base.vma_node); } /* virgl debufs */ diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index a0514f5bd006..a4b9881ca1d3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -41,6 +41,10 @@ bool virtio_fence_signaled(struct dma_fence *f) { struct virtio_gpu_fence *fence = to_virtio_fence(f); + if (WARN_ON_ONCE(fence->f.seqno == 0)) + /* leaked fence outside driver before completing + * initialization with virtio_gpu_fence_emit */ + return false; if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno) return true; return false; diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 292566146814..4c1f579edfb3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -28,54 +28,31 @@ #include "virtgpu_drv.h" -void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj) -{ - struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj); - - if (obj) - virtio_gpu_object_unref(&obj); -} - -struct virtio_gpu_object* -virtio_gpu_alloc_object(struct drm_device *dev, - struct virtio_gpu_object_params *params, - struct virtio_gpu_fence *fence) -{ - struct virtio_gpu_device *vgdev = dev->dev_private; - struct virtio_gpu_object *obj; - int ret; - - ret = virtio_gpu_object_create(vgdev, params, &obj, fence); - if (ret) - return ERR_PTR(ret); - - return obj; -} - int virtio_gpu_gem_create(struct drm_file *file, struct drm_device *dev, struct virtio_gpu_object_params *params, struct drm_gem_object **obj_p, uint32_t *handle_p) { + struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_object *obj; int ret; u32 handle; - obj = virtio_gpu_alloc_object(dev, params, NULL); - if (IS_ERR(obj)) - return PTR_ERR(obj); + ret = virtio_gpu_object_create(vgdev, params, &obj, NULL); + if (ret < 0) + return ret; - ret = drm_gem_handle_create(file, &obj->gem_base, &handle); + ret = drm_gem_handle_create(file, &obj->base.base, &handle); if (ret) { - drm_gem_object_release(&obj->gem_base); + drm_gem_object_release(&obj->base.base); return ret; } - *obj_p = &obj->gem_base; + *obj_p = &obj->base.base; /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(&obj->gem_base); + drm_gem_object_put_unlocked(&obj->base.base); *handle_p = handle; return 0; @@ -136,19 +113,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, { struct virtio_gpu_device *vgdev = obj->dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; - struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); - int r; + struct virtio_gpu_object_array *objs; if (!vgdev->has_virgl_3d) return 0; - r = virtio_gpu_object_reserve(qobj, false); - if (r) - return r; + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + virtio_gpu_array_add_obj(objs, obj); virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, - qobj->hw_res_handle); - virtio_gpu_object_unreserve(qobj); + objs); return 0; } @@ -157,17 +133,136 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj, { struct virtio_gpu_device *vgdev = obj->dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; - struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); - int r; + struct virtio_gpu_object_array *objs; if (!vgdev->has_virgl_3d) return; - r = virtio_gpu_object_reserve(qobj, false); - if (r) + objs = virtio_gpu_array_alloc(1); + if (!objs) return; + virtio_gpu_array_add_obj(objs, obj); virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id, - qobj->hw_res_handle); - virtio_gpu_object_unreserve(qobj); + objs); +} + +struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) +{ + struct virtio_gpu_object_array *objs; + size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents; + + objs = kmalloc(size, GFP_KERNEL); + if (!objs) + return NULL; + + objs->nents = 0; + objs->total = nents; + return objs; +} + +static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs) +{ + kfree(objs); +} + +struct virtio_gpu_object_array* +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents) +{ + struct virtio_gpu_object_array *objs; + u32 i; + + objs = virtio_gpu_array_alloc(nents); + if (!objs) + return NULL; + + for (i = 0; i < nents; i++) { + objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]); + if (!objs->objs[i]) { + objs->nents = i; + virtio_gpu_array_put_free(objs); + return NULL; + } + } + objs->nents = i; + return objs; +} + +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, + struct drm_gem_object *obj) +{ + if (WARN_ON_ONCE(objs->nents == objs->total)) + return; + + drm_gem_object_get(obj); + objs->objs[objs->nents] = obj; + objs->nents++; +} + +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs) +{ + int ret; + + if (objs->nents == 1) { + ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL); + } else { + ret = drm_gem_lock_reservations(objs->objs, objs->nents, + &objs->ticket); + } + return ret; +} + +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs) +{ + if (objs->nents == 1) { + dma_resv_unlock(objs->objs[0]->resv); + } else { + drm_gem_unlock_reservations(objs->objs, objs->nents, + &objs->ticket); + } +} + +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence) +{ + int i; + + for (i = 0; i < objs->nents; i++) + dma_resv_add_excl_fence(objs->objs[i]->resv, fence); +} + +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) +{ + u32 i; + + for (i = 0; i < objs->nents; i++) + drm_gem_object_put_unlocked(objs->objs[i]); + virtio_gpu_array_free(objs); +} + +void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs) +{ + spin_lock(&vgdev->obj_free_lock); + list_add_tail(&objs->next, &vgdev->obj_free_list); + spin_unlock(&vgdev->obj_free_lock); + schedule_work(&vgdev->obj_free_work); +} + +void virtio_gpu_array_put_free_work(struct work_struct *work) +{ + struct virtio_gpu_device *vgdev = + container_of(work, struct virtio_gpu_device, obj_free_work); + struct virtio_gpu_object_array *objs; + + spin_lock(&vgdev->obj_free_lock); + while (!list_empty(&vgdev->obj_free_list)) { + objs = list_first_entry(&vgdev->obj_free_list, + struct virtio_gpu_object_array, next); + list_del(&objs->next); + spin_unlock(&vgdev->obj_free_lock); + virtio_gpu_array_put_free(objs); + spin_lock(&vgdev->obj_free_lock); + } + spin_unlock(&vgdev->obj_free_lock); } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 0a88ef11b9d3..9af1ec62434f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -29,7 +29,6 @@ #include <linux/sync_file.h> #include <drm/drm_file.h> -#include <drm/ttm/ttm_execbuf_util.h> #include <drm/virtgpu_drm.h> #include "virtgpu_drv.h" @@ -56,45 +55,6 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, &virtio_gpu_map->offset); } -int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, - struct list_head *head) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct ttm_validate_buffer *buf; - struct ttm_buffer_object *bo; - struct virtio_gpu_object *qobj; - int ret; - - ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true); - if (ret != 0) - return ret; - - list_for_each_entry(buf, head, head) { - bo = buf->bo; - qobj = container_of(bo, struct virtio_gpu_object, tbo); - ret = ttm_bo_validate(bo, &qobj->placement, &ctx); - if (ret) { - ttm_eu_backoff_reservation(ticket, head); - return ret; - } - } - return 0; -} - -void virtio_gpu_unref_list(struct list_head *head) -{ - struct ttm_validate_buffer *buf; - struct ttm_buffer_object *bo; - struct virtio_gpu_object *qobj; - - list_for_each_entry(buf, head, head) { - bo = buf->bo; - qobj = container_of(bo, struct virtio_gpu_object, tbo); - - drm_gem_object_put_unlocked(&qobj->gem_base); - } -} - /* * Usage of execbuffer: * Relocations need to take into account the full VIRTIO_GPUDrawable size. @@ -107,16 +67,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_virtgpu_execbuffer *exbuf = data; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; - struct drm_gem_object *gobj; struct virtio_gpu_fence *out_fence; - struct virtio_gpu_object *qobj; int ret; uint32_t *bo_handles = NULL; void __user *user_bo_handles = NULL; - struct list_head validate_list; - struct ttm_validate_buffer *buflist = NULL; - int i; - struct ww_acquire_ctx ticket; + struct virtio_gpu_object_array *buflist = NULL; struct sync_file *sync_file; int in_fence_fd = exbuf->fence_fd; int out_fence_fd = -1; @@ -157,15 +112,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, return out_fence_fd; } - INIT_LIST_HEAD(&validate_list); if (exbuf->num_bo_handles) { - bo_handles = kvmalloc_array(exbuf->num_bo_handles, - sizeof(uint32_t), GFP_KERNEL); - buflist = kvmalloc_array(exbuf->num_bo_handles, - sizeof(struct ttm_validate_buffer), - GFP_KERNEL | __GFP_ZERO); - if (!bo_handles || !buflist) { + sizeof(uint32_t), GFP_KERNEL); + if (!bo_handles) { ret = -ENOMEM; goto out_unused_fd; } @@ -177,27 +127,23 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, goto out_unused_fd; } - for (i = 0; i < exbuf->num_bo_handles; i++) { - gobj = drm_gem_object_lookup(drm_file, bo_handles[i]); - if (!gobj) { - ret = -ENOENT; - goto out_unused_fd; - } - - qobj = gem_to_virtio_gpu_obj(gobj); - buflist[i].bo = &qobj->tbo; - - list_add(&buflist[i].head, &validate_list); + buflist = virtio_gpu_array_from_handles(drm_file, bo_handles, + exbuf->num_bo_handles); + if (!buflist) { + ret = -ENOENT; + goto out_unused_fd; } kvfree(bo_handles); bo_handles = NULL; } - ret = virtio_gpu_object_list_validate(&ticket, &validate_list); - if (ret) - goto out_free; + if (buflist) { + ret = virtio_gpu_array_lock_resv(buflist); + if (ret) + goto out_unused_fd; + } - buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); + buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); if (IS_ERR(buf)) { ret = PTR_ERR(buf); goto out_unresv; @@ -222,24 +168,18 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, } virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, - vfpriv->ctx_id, out_fence); - - ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f); - - /* fence the command bo */ - virtio_gpu_unref_list(&validate_list); - kvfree(buflist); + vfpriv->ctx_id, buflist, out_fence); return 0; out_memdup: - kfree(buf); + kvfree(buf); out_unresv: - ttm_eu_backoff_reservation(&ticket, &validate_list); -out_free: - virtio_gpu_unref_list(&validate_list); + if (buflist) + virtio_gpu_array_unlock_resv(buflist); out_unused_fd: kvfree(bo_handles); - kvfree(buflist); + if (buflist) + virtio_gpu_array_put_free(buflist); if (out_fence_fd >= 0) put_unused_fd(out_fence_fd); @@ -316,11 +256,11 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, fence = virtio_gpu_fence_alloc(vgdev); if (!fence) return -ENOMEM; - qobj = virtio_gpu_alloc_object(dev, ¶ms, fence); + ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence); dma_fence_put(&fence->f); - if (IS_ERR(qobj)) - return PTR_ERR(qobj); - obj = &qobj->gem_base; + if (ret < 0) + return ret; + obj = &qobj->base.base; ret = drm_gem_handle_create(file_priv, obj, &handle); if (ret) { @@ -347,7 +287,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, qobj = gem_to_virtio_gpu_obj(gobj); - ri->size = qobj->gem_base.size; + ri->size = qobj->base.base.size; ri->res_handle = qobj->hw_res_handle; drm_gem_object_put_unlocked(gobj); return 0; @@ -360,9 +300,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_from_host *args = data; - struct ttm_operation_ctx ctx = { true, false }; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; int ret; u32 offset = args->offset; @@ -371,39 +309,31 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, if (vgdev->has_virgl_3d == false) return -ENOSYS; - gobj = drm_gem_object_lookup(file, args->bo_handle); - if (gobj == NULL) + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - ret = virtio_gpu_object_reserve(qobj, false); - if (ret) - goto out; - - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); - if (unlikely(ret)) - goto out_unres; + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; convert_to_hw_box(&box, &args->box); fence = virtio_gpu_fence_alloc(vgdev); if (!fence) { ret = -ENOMEM; - goto out_unres; + goto err_unlock; } virtio_gpu_cmd_transfer_from_host_3d - (vgdev, qobj->hw_res_handle, - vfpriv->ctx_id, offset, args->level, - &box, fence); - dma_resv_add_excl_fence(qobj->tbo.base.resv, - &fence->f); - + (vgdev, vfpriv->ctx_id, offset, args->level, + &box, objs, fence); dma_fence_put(&fence->f); -out_unres: - virtio_gpu_object_unreserve(qobj); -out: - drm_gem_object_put_unlocked(gobj); + return 0; + +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); return ret; } @@ -413,75 +343,71 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_to_host *args = data; - struct ttm_operation_ctx ctx = { true, false }; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; struct virtio_gpu_box box; int ret; u32 offset = args->offset; - gobj = drm_gem_object_lookup(file, args->bo_handle); - if (gobj == NULL) + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - ret = virtio_gpu_object_reserve(qobj, false); - if (ret) - goto out; - - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); - if (unlikely(ret)) - goto out_unres; - convert_to_hw_box(&box, &args->box); if (!vgdev->has_virgl_3d) { virtio_gpu_cmd_transfer_to_host_2d - (vgdev, qobj, offset, - box.w, box.h, box.x, box.y, NULL); + (vgdev, offset, + box.w, box.h, box.x, box.y, + objs, NULL); } else { + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; + + ret = -ENOMEM; fence = virtio_gpu_fence_alloc(vgdev); - if (!fence) { - ret = -ENOMEM; - goto out_unres; - } + if (!fence) + goto err_unlock; + virtio_gpu_cmd_transfer_to_host_3d - (vgdev, qobj, + (vgdev, vfpriv ? vfpriv->ctx_id : 0, offset, - args->level, &box, fence); - dma_resv_add_excl_fence(qobj->tbo.base.resv, - &fence->f); + args->level, &box, objs, fence); dma_fence_put(&fence->f); } + return 0; -out_unres: - virtio_gpu_object_unreserve(qobj); -out: - drm_gem_object_put_unlocked(gobj); +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); return ret; } static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) + struct drm_file *file) { struct drm_virtgpu_3d_wait *args = data; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct drm_gem_object *obj; + long timeout = 15 * HZ; int ret; - bool nowait = false; - gobj = drm_gem_object_lookup(file, args->handle); - if (gobj == NULL) + obj = drm_gem_object_lookup(file, args->handle); + if (obj == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - if (args->flags & VIRTGPU_WAIT_NOWAIT) - nowait = true; - ret = virtio_gpu_object_wait(qobj, nowait); + if (args->flags & VIRTGPU_WAIT_NOWAIT) { + ret = dma_resv_test_signaled_rcu(obj->resv, true); + } else { + ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, + timeout); + } + if (ret == 0) + ret = -EBUSY; + else if (ret > 0) + ret = 0; - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index c190702fab72..2f5773e43557 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -147,19 +147,23 @@ int virtio_gpu_init(struct drm_device *dev) INIT_WORK(&vgdev->config_changed_work, virtio_gpu_config_changed_work_func); + INIT_WORK(&vgdev->obj_free_work, + virtio_gpu_array_put_free_work); + INIT_LIST_HEAD(&vgdev->obj_free_list); + spin_lock_init(&vgdev->obj_free_lock); + #ifdef __LITTLE_ENDIAN if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) vgdev->has_virgl_3d = true; - DRM_INFO("virgl 3d acceleration %s\n", - vgdev->has_virgl_3d ? "enabled" : "not supported by host"); -#else - DRM_INFO("virgl 3d acceleration not supported by guest\n"); #endif if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { vgdev->has_edid = true; - DRM_INFO("EDID support available.\n"); } + DRM_INFO("features: %cvirgl %cedid\n", + vgdev->has_virgl_3d ? '+' : '-', + vgdev->has_edid ? '+' : '-'); + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); @@ -173,12 +177,6 @@ int virtio_gpu_init(struct drm_device *dev) goto err_vbufs; } - ret = virtio_gpu_ttm_init(vgdev); - if (ret) { - DRM_ERROR("failed to init ttm %d\n", ret); - goto err_ttm; - } - /* get display info */ virtio_cread(vgdev->vdev, struct virtio_gpu_config, num_scanouts, &num_scanouts); @@ -210,8 +208,6 @@ int virtio_gpu_init(struct drm_device *dev) return 0; err_scanouts: - virtio_gpu_ttm_fini(vgdev); -err_ttm: virtio_gpu_free_vbufs(vgdev); err_vbufs: vgdev->vdev->config->del_vqs(vgdev->vdev); @@ -234,6 +230,7 @@ void virtio_gpu_deinit(struct drm_device *dev) { struct virtio_gpu_device *vgdev = dev->dev_private; + flush_work(&vgdev->obj_free_work); vgdev->vqs_ready = false; flush_work(&vgdev->ctrlq.dequeue_work); flush_work(&vgdev->cursorq.dequeue_work); @@ -242,7 +239,6 @@ void virtio_gpu_deinit(struct drm_device *dev) vgdev->vdev->config->del_vqs(vgdev->vdev); virtio_gpu_modeset_fini(vgdev); - virtio_gpu_ttm_fini(vgdev); virtio_gpu_free_vbufs(vgdev); virtio_gpu_cleanup_cap_cache(vgdev); kfree(vgdev->capsets); diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 09b526518f5a..017a9e0fc3bb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -23,73 +23,83 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/ttm/ttm_execbuf_util.h> +#include <linux/moduleparam.h> #include "virtgpu_drv.h" +static int virtio_gpu_virglrenderer_workaround = 1; +module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); + static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) { -#if 0 - int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); - - if (handle < 0) - return handle; -#else - static int handle; - - /* - * FIXME: dirty hack to avoid re-using IDs, virglrenderer - * can't deal with that. Needs fixing in virglrenderer, also - * should figure a better way to handle that in the guest. - */ - handle++; -#endif - - *resid = handle + 1; + if (virtio_gpu_virglrenderer_workaround) { + /* + * Hack to avoid re-using resource IDs. + * + * virglrenderer versions up to (and including) 0.7.0 + * can't deal with that. virglrenderer commit + * "f91a9dd35715 Fix unlinking resources from hash + * table." (Feb 2019) fixes the bug. + */ + static int handle; + handle++; + *resid = handle + 1; + } else { + int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); + if (handle < 0) + return handle; + *resid = handle + 1; + } return 0; } static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) { -#if 0 - ida_free(&vgdev->resource_ida, id - 1); -#endif + if (!virtio_gpu_virglrenderer_workaround) { + ida_free(&vgdev->resource_ida, id - 1); + } } -static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) +static void virtio_gpu_free_object(struct drm_gem_object *obj) { - struct virtio_gpu_object *bo; - struct virtio_gpu_device *vgdev; - - bo = container_of(tbo, struct virtio_gpu_object, tbo); - vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + if (bo->pages) + virtio_gpu_object_detach(vgdev, bo); if (bo->created) virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle); - if (bo->pages) - virtio_gpu_object_free_sg_table(bo); - if (bo->vmap) - virtio_gpu_object_kunmap(bo); - drm_gem_object_release(&bo->gem_base); virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); - kfree(bo); + + drm_gem_shmem_free_object(obj); } -static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo) +static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = { + .free = virtio_gpu_free_object, + .open = virtio_gpu_gem_object_open, + .close = virtio_gpu_gem_object_close, + + .print_info = drm_gem_shmem_print_info, + .pin = drm_gem_shmem_pin, + .unpin = drm_gem_shmem_unpin, + .get_sg_table = drm_gem_shmem_get_sg_table, + .vmap = drm_gem_shmem_vmap, + .vunmap = drm_gem_shmem_vunmap, + .mmap = &drm_gem_shmem_mmap, +}; + +struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, + size_t size) { - u32 c = 1; - - vgbo->placement.placement = &vgbo->placement_code; - vgbo->placement.busy_placement = &vgbo->placement_code; - vgbo->placement_code.fpfn = 0; - vgbo->placement_code.lpfn = 0; - vgbo->placement_code.flags = - TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | - TTM_PL_FLAG_NO_EVICT; - vgbo->placement.num_placement = c; - vgbo->placement.num_busy_placement = c; + struct virtio_gpu_object *bo; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return NULL; + bo->base.base.funcs = &virtio_gpu_gem_funcs; + return &bo->base.base; } int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, @@ -97,157 +107,59 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object **bo_ptr, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object_array *objs = NULL; + struct drm_gem_shmem_object *shmem_obj; struct virtio_gpu_object *bo; - size_t acc_size; int ret; *bo_ptr = NULL; - acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size, - sizeof(struct virtio_gpu_object)); + params->size = roundup(params->size, PAGE_SIZE); + shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size); + if (IS_ERR(shmem_obj)) + return PTR_ERR(shmem_obj); + bo = gem_to_virtio_gpu_obj(&shmem_obj->base); - bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); - if (bo == NULL) - return -ENOMEM; ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); - if (ret < 0) { - kfree(bo); - return ret; - } - params->size = roundup(params->size, PAGE_SIZE); - ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size); - if (ret != 0) { - virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); - kfree(bo); - return ret; - } + if (ret < 0) + goto err_free_gem; + bo->dumb = params->dumb; + if (fence) { + ret = -ENOMEM; + objs = virtio_gpu_array_alloc(1); + if (!objs) + goto err_put_id; + virtio_gpu_array_add_obj(objs, &bo->base.base); + + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_objs; + } + if (params->virgl) { - virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence); + virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, + objs, fence); } else { - virtio_gpu_cmd_create_resource(vgdev, bo, params, fence); + virtio_gpu_cmd_create_resource(vgdev, bo, params, + objs, fence); } - virtio_gpu_init_ttm_placement(bo); - ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size, - ttm_bo_type_device, &bo->placement, 0, - true, acc_size, NULL, NULL, - &virtio_gpu_ttm_bo_destroy); - /* ttm_bo_init failure will call the destroy */ - if (ret != 0) + ret = virtio_gpu_object_attach(vgdev, bo, NULL); + if (ret != 0) { + virtio_gpu_free_object(&shmem_obj->base); return ret; - - if (fence) { - struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; - struct list_head validate_list; - struct ttm_validate_buffer mainbuf; - struct ww_acquire_ctx ticket; - unsigned long irq_flags; - bool signaled; - - INIT_LIST_HEAD(&validate_list); - memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer)); - - /* use a gem reference since unref list undoes them */ - drm_gem_object_get(&bo->gem_base); - mainbuf.bo = &bo->tbo; - list_add(&mainbuf.head, &validate_list); - - ret = virtio_gpu_object_list_validate(&ticket, &validate_list); - if (ret == 0) { - spin_lock_irqsave(&drv->lock, irq_flags); - signaled = virtio_fence_signaled(&fence->f); - if (!signaled) - /* virtio create command still in flight */ - ttm_eu_fence_buffer_objects(&ticket, &validate_list, - &fence->f); - spin_unlock_irqrestore(&drv->lock, irq_flags); - if (signaled) - /* virtio create command finished */ - ttm_eu_backoff_reservation(&ticket, &validate_list); - } - virtio_gpu_unref_list(&validate_list); } *bo_ptr = bo; return 0; -} - -void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo) -{ - bo->vmap = NULL; - ttm_bo_kunmap(&bo->kmap); -} - -int virtio_gpu_object_kmap(struct virtio_gpu_object *bo) -{ - bool is_iomem; - int r; - - WARN_ON(bo->vmap); - - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); - if (r) - return r; - bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); - return 0; -} -int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, - struct virtio_gpu_object *bo) -{ - int ret; - struct page **pages = bo->tbo.ttm->pages; - int nr_pages = bo->tbo.num_pages; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; - size_t max_segment; - - /* wtf swapping */ - if (bo->pages) - return 0; - - if (bo->tbo.ttm->state == tt_unpopulated) - bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx); - bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!bo->pages) - goto out; - - max_segment = virtio_max_dma_size(qdev->vdev); - max_segment &= PAGE_MASK; - if (max_segment > SCATTERLIST_MAX_SEGMENT) - max_segment = SCATTERLIST_MAX_SEGMENT; - ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, - max_segment, GFP_KERNEL); - if (ret) - goto out; - return 0; -out: - kfree(bo->pages); - bo->pages = NULL; - return -ENOMEM; -} - -void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo) -{ - sg_free_table(bo->pages); - kfree(bo->pages); - bo->pages = NULL; -} - -int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) - return r; - r = ttm_bo_wait(&bo->tbo, true, no_wait); - ttm_bo_unreserve(&bo->tbo); - return r; +err_put_objs: + virtio_gpu_array_put_free(objs); +err_put_id: + virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); +err_free_gem: + drm_gem_shmem_free_object(&shmem_obj->base); + return ret; } - diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index a492ac3f4a7e..390524143139 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -84,7 +84,22 @@ static const struct drm_plane_funcs virtio_gpu_plane_funcs = { static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { - return 0; + bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; + struct drm_crtc_state *crtc_state; + int ret; + + if (!state->fb || !state->crtc) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + is_cursor, true); + return ret; } static void virtio_gpu_primary_plane_update(struct drm_plane *plane, @@ -109,12 +124,19 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); handle = bo->hw_res_handle; if (bo->dumb) { + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_cmd_transfer_to_host_2d - (vgdev, bo, 0, - cpu_to_le32(plane->state->src_w >> 16), - cpu_to_le32(plane->state->src_h >> 16), - cpu_to_le32(plane->state->src_x >> 16), - cpu_to_le32(plane->state->src_y >> 16), NULL); + (vgdev, 0, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16, + objs, NULL); } } else { handle = 0; @@ -186,7 +208,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, struct virtio_gpu_framebuffer *vgfb; struct virtio_gpu_object *bo = NULL; uint32_t handle; - int ret = 0; if (plane->state->crtc) output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); @@ -205,20 +226,20 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { /* new cursor -- update & wait */ + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_cmd_transfer_to_host_2d - (vgdev, bo, 0, - cpu_to_le32(plane->state->crtc_w), - cpu_to_le32(plane->state->crtc_h), - 0, 0, vgfb->fence); - ret = virtio_gpu_object_reserve(bo, false); - if (!ret) { - dma_resv_add_excl_fence(bo->tbo.base.resv, - &vgfb->fence->f); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; - virtio_gpu_object_unreserve(bo); - virtio_gpu_object_wait(bo, false); - } + (vgdev, 0, + plane->state->crtc_w, + plane->state->crtc_h, + 0, 0, objs, vgfb->fence); + dma_fence_wait(&vgfb->fence->f, true); + dma_fence_put(&vgfb->fence->f); + vgfb->fence = NULL; } if (plane->state->fb != old_state->fb) { diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index dc642a884b88..050d24c39a8f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -30,43 +30,9 @@ * device that might share buffers with virtgpu */ -struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); - - if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages) - /* should not happen */ - return ERR_PTR(-EINVAL); - - return drm_prime_pages_to_sg(bo->tbo.ttm->pages, - bo->tbo.ttm->num_pages); -} - struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *table) { return ERR_PTR(-ENODEV); } - -void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) -{ - struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); - int ret; - - ret = virtio_gpu_object_kmap(bo); - if (ret) - return NULL; - return bo->vmap; -} - -void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - virtio_gpu_object_kunmap(gem_to_virtio_gpu_obj(obj)); -} - -int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - return drm_gem_prime_mmap(obj, vma); -} diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c deleted file mode 100644 index f87903641847..000000000000 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Copyright (C) 2015 Red Hat, Inc. - * All Rights Reserved. - * - * Authors: - * Dave Airlie - * Alon Levy - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/delay.h> - -#include <drm/drm.h> -#include <drm/drm_file.h> -#include <drm/ttm/ttm_bo_api.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> -#include <drm/ttm/ttm_placement.h> -#include <drm/virtgpu_drm.h> - -#include "virtgpu_drv.h" - -static struct -virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev) -{ - struct virtio_gpu_mman *mman; - struct virtio_gpu_device *vgdev; - - mman = container_of(bdev, struct virtio_gpu_mman, bdev); - vgdev = container_of(mman, struct virtio_gpu_device, mman); - return vgdev; -} - -int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *file_priv; - struct virtio_gpu_device *vgdev; - int r; - - file_priv = filp->private_data; - vgdev = file_priv->minor->dev->dev_private; - if (vgdev == NULL) { - DRM_ERROR( - "filp->private_data->minor->dev->dev_private == NULL\n"); - return -EINVAL; - } - r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev); - - return r; -} - -static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev, - uint32_t flags) -{ - return 0; -} - -static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_mem_reg *mem) -{ - mem->mm_node = (void *)1; - return 0; -} - -static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) -{ - mem->mm_node = (void *)NULL; -} - -static int ttm_bo_man_init(struct ttm_mem_type_manager *man, - unsigned long p_size) -{ - return 0; -} - -static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) -{ - return 0; -} - -static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, - struct drm_printer *printer) -{ -} - -static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = { - .init = ttm_bo_man_init, - .takedown = ttm_bo_man_takedown, - .get_node = ttm_bo_man_get_node, - .put_node = ttm_bo_man_put_node, - .debug = ttm_bo_man_debug -}; - -static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - switch (type) { - case TTM_PL_SYSTEM: - /* System memory */ - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - case TTM_PL_TT: - man->func = &virtio_gpu_bo_manager_func; - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type); - return -EINVAL; - } - return 0; -} - -static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo, - struct ttm_placement *placement) -{ - static const struct ttm_place placements = { - .fpfn = 0, - .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM, - }; - - placement->placement = &placements; - placement->busy_placement = &placements; - placement->num_placement = 1; - placement->num_busy_placement = 1; -} - -static int virtio_gpu_verify_access(struct ttm_buffer_object *bo, - struct file *filp) -{ - return 0; -} - -static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) -{ - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - - mem->bus.addr = NULL; - mem->bus.offset = 0; - mem->bus.size = mem->num_pages << PAGE_SHIFT; - mem->bus.base = 0; - mem->bus.is_iomem = false; - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) - return -EINVAL; - switch (mem->mem_type) { - case TTM_PL_SYSTEM: - case TTM_PL_TT: - /* system memory */ - return 0; - default: - return -EINVAL; - } - return 0; -} - -static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) -{ -} - -/* - * TTM backend functions. - */ -struct virtio_gpu_ttm_tt { - struct ttm_dma_tt ttm; - struct virtio_gpu_object *obj; -}; - -static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) -{ - struct virtio_gpu_ttm_tt *gtt = - container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); - struct virtio_gpu_device *vgdev = - virtio_gpu_get_vgdev(gtt->obj->tbo.bdev); - - virtio_gpu_object_attach(vgdev, gtt->obj, NULL); - return 0; -} - -static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm) -{ - struct virtio_gpu_ttm_tt *gtt = - container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); - struct virtio_gpu_device *vgdev = - virtio_gpu_get_vgdev(gtt->obj->tbo.bdev); - - virtio_gpu_object_detach(vgdev, gtt->obj); - return 0; -} - -static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm) -{ - struct virtio_gpu_ttm_tt *gtt = - container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); - - ttm_dma_tt_fini(>t->ttm); - kfree(gtt); -} - -static struct ttm_backend_func virtio_gpu_tt_func = { - .bind = &virtio_gpu_ttm_tt_bind, - .unbind = &virtio_gpu_ttm_tt_unbind, - .destroy = &virtio_gpu_ttm_tt_destroy, -}; - -static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo, - uint32_t page_flags) -{ - struct virtio_gpu_device *vgdev; - struct virtio_gpu_ttm_tt *gtt; - - vgdev = virtio_gpu_get_vgdev(bo->bdev); - gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL); - if (gtt == NULL) - return NULL; - gtt->ttm.ttm.func = &virtio_gpu_tt_func; - gtt->obj = container_of(bo, struct virtio_gpu_object, tbo); - if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) { - kfree(gtt); - return NULL; - } - return >t->ttm.ttm; -} - -static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo) -{ - struct virtio_gpu_object *bo; - - bo = container_of(tbo, struct virtio_gpu_object, tbo); - - if (bo->pages) - virtio_gpu_object_free_sg_table(bo); -} - -static struct ttm_bo_driver virtio_gpu_bo_driver = { - .ttm_tt_create = &virtio_gpu_ttm_tt_create, - .invalidate_caches = &virtio_gpu_invalidate_caches, - .init_mem_type = &virtio_gpu_init_mem_type, - .eviction_valuable = ttm_bo_eviction_valuable, - .evict_flags = &virtio_gpu_evict_flags, - .verify_access = &virtio_gpu_verify_access, - .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve, - .io_mem_free = &virtio_gpu_ttm_io_mem_free, - .swap_notify = &virtio_gpu_bo_swap_notify, -}; - -int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev) -{ - int r; - - /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&vgdev->mman.bdev, - &virtio_gpu_bo_driver, - vgdev->ddev->anon_inode->i_mapping, - false); - if (r) { - DRM_ERROR("failed initializing buffer object driver(%d).\n", r); - goto err_dev_init; - } - - r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0); - if (r) { - DRM_ERROR("Failed initializing GTT heap.\n"); - goto err_mm_init; - } - return 0; - -err_mm_init: - ttm_bo_device_release(&vgdev->mman.bdev); -err_dev_init: - return r; -} - -void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev) -{ - ttm_bo_device_release(&vgdev->mman.bdev); - DRM_INFO("virtio_gpu: ttm finalized\n"); -} diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 7ac20490e1b4..74ad3bc3ebe8 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -155,7 +155,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, { if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); - kfree(vbuf->data_buf); + kvfree(vbuf->data_buf); kmem_cache_free(vgdev->vbufs, vbuf); } @@ -192,7 +192,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -219,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free_delayed(vgdev, entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(struct work_struct *work) @@ -252,26 +256,67 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) wake_up(&vgdev->cursorq.ack_queue); } -static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +/* Create sg_table from a vmalloc'd buffer. */ +static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) +{ + int ret, s, i; + struct sg_table *sgt; + struct scatterlist *sg; + struct page *pg; + + if (WARN_ON(!PAGE_ALIGNED(data))) + return NULL; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return NULL; + + *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); + if (ret) { + kfree(sgt); + return NULL; + } + + for_each_sg(sgt->sgl, sg, *sg_ents, i) { + pg = vmalloc_to_page(data); + if (!pg) { + sg_free_table(sgt); + kfree(sgt); + return NULL; + } + + s = min_t(int, PAGE_SIZE, size); + sg_set_page(sg, pg, s, 0); + + size -= s; + data += s; + } + + return sgt; +} + +static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct scatterlist *vout) __releases(&vgdev->ctrlq.qlock) __acquires(&vgdev->ctrlq.qlock) { struct virtqueue *vq = vgdev->ctrlq.vq; - struct scatterlist *sgs[3], vcmd, vout, vresp; + struct scatterlist *sgs[3], vcmd, vresp; int outcnt = 0, incnt = 0; + bool notify = false; int ret; if (!vgdev->vqs_ready) - return -ENODEV; + return notify; sg_init_one(&vcmd, vbuf->buf, vbuf->size); sgs[outcnt + incnt] = &vcmd; outcnt++; - if (vbuf->data_size) { - sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); - sgs[outcnt + incnt] = &vout; + if (vout) { + sgs[outcnt + incnt] = vout; outcnt++; } @@ -292,32 +337,35 @@ retry: trace_virtio_gpu_cmd_queue(vq, (struct virtio_gpu_ctrl_hdr *)vbuf->buf); - virtqueue_kick(vq); + notify = virtqueue_kick_prepare(vq); } - - if (!ret) - ret = vq->num_free; - return ret; -} - -static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) -{ - int rc; - - spin_lock(&vgdev->ctrlq.qlock); - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); - spin_unlock(&vgdev->ctrlq.qlock); - return rc; + return notify; } -static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf, - struct virtio_gpu_ctrl_hdr *hdr, - struct virtio_gpu_fence *fence) +static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_ctrl_hdr *hdr, + struct virtio_gpu_fence *fence) { struct virtqueue *vq = vgdev->ctrlq.vq; - int rc; + struct scatterlist *vout = NULL, sg; + struct sg_table *sgt = NULL; + bool notify; + int outcnt = 0; + + if (vbuf->data_size) { + if (is_vmalloc_addr(vbuf->data_buf)) { + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, + &outcnt); + if (!sgt) + return; + vout = sgt->sgl; + } else { + sg_init_one(&sg, vbuf->data_buf, vbuf->data_size); + vout = &sg; + outcnt = 1; + } + } again: spin_lock(&vgdev->ctrlq.qlock); @@ -330,29 +378,47 @@ again: * to wait for free space, which can result in fence ids being * submitted out-of-order. */ - if (vq->num_free < 3) { + if (vq->num_free < 2 + outcnt) { spin_unlock(&vgdev->ctrlq.qlock); wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); goto again; } - if (fence) + if (hdr && fence) { virtio_gpu_fence_emit(vgdev, hdr, fence); - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); + if (vbuf->objs) { + virtio_gpu_array_add_fence(vbuf->objs, &fence->f); + virtio_gpu_array_unlock_resv(vbuf->objs); + } + } + notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout); spin_unlock(&vgdev->ctrlq.qlock); - return rc; + if (notify) + virtqueue_notify(vgdev->ctrlq.vq); + + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } +} + +static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL); } -static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) { struct virtqueue *vq = vgdev->cursorq.vq; struct scatterlist *sgs[1], ccmd; + bool notify; int ret; int outcnt; if (!vgdev->vqs_ready) - return -ENODEV; + return; sg_init_one(&ccmd, vbuf->buf, vbuf->size); sgs[0] = &ccmd; @@ -370,14 +436,13 @@ retry: trace_virtio_gpu_cmd_queue(vq, (struct virtio_gpu_ctrl_hdr *)vbuf->buf); - virtqueue_kick(vq); + notify = virtqueue_kick_prepare(vq); } spin_unlock(&vgdev->cursorq.qlock); - if (!ret) - ret = vq->num_free; - return ret; + if (notify) + virtqueue_notify(vq); } /* just create gem objects for userspace and long lived objects, @@ -388,6 +453,7 @@ retry: void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_create_2d *cmd_p; @@ -395,6 +461,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); @@ -481,12 +548,13 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); @@ -498,14 +566,15 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->offset = cpu_to_le64(offset); - cmd_p->r.width = width; - cmd_p->r.height = height; - cmd_p->r.x = x; - cmd_p->r.y = y; + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); } @@ -826,34 +895,38 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id) + struct virtio_gpu_object_array *objs) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_ctx_resource *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id) + struct virtio_gpu_object_array *objs) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_ctx_resource *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } @@ -861,6 +934,7 @@ void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_create_3d *cmd_p; @@ -868,6 +942,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); @@ -888,12 +963,13 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); @@ -906,6 +982,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); @@ -917,20 +995,24 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint32_t ctx_id, + uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->box = *box; cmd_p->offset = cpu_to_le64(offset); cmd_p->level = cpu_to_le32(level); @@ -940,7 +1022,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void *data, uint32_t data_size, - uint32_t ctx_id, struct virtio_gpu_fence *fence) + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -950,6 +1034,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, vbuf->data_buf = data; vbuf->data_size = data_size; + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); @@ -965,17 +1050,21 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); struct virtio_gpu_mem_entry *ents; struct scatterlist *sg; - int si, nents; + int si, nents, ret; if (WARN_ON_ONCE(!obj->created)) return -EINVAL; + if (WARN_ON_ONCE(obj->pages)) + return -EINVAL; - if (!obj->pages) { - int ret; + ret = drm_gem_shmem_pin(&obj->base.base); + if (ret < 0) + return -EINVAL; - ret = virtio_gpu_object_get_sg_table(vgdev, obj); - if (ret) - return ret; + obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base); + if (obj->pages == NULL) { + drm_gem_shmem_unpin(&obj->base.base); + return -EINVAL; } if (use_dma_api) { @@ -1014,6 +1103,9 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, { bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + if (WARN_ON_ONCE(!obj->pages)) + return; + if (use_dma_api && obj->mapped) { struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); /* detach backing and wait for the host process it ... */ @@ -1029,6 +1121,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, } else { virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); } + + sg_free_table(obj->pages); + obj->pages = NULL; + + drm_gem_shmem_unpin(&obj->base.base); } void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, |