aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/virtio/virtgpu_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_object.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c121
1 files changed, 105 insertions, 16 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 3af7ec80c7da..d9039bb7c5e3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,6 +23,7 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
@@ -61,21 +62,46 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t
}
}
-static void virtio_gpu_free_object(struct drm_gem_object *obj)
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- if (bo->pages)
- virtio_gpu_object_detach(vgdev, bo);
- if (bo->created)
- virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+ if (virtio_gpu_is_shmem(bo)) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+
+ if (shmem->pages) {
+ if (shmem->mapped) {
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl, shmem->mapped,
+ DMA_TO_DEVICE);
+ shmem->mapped = 0;
+ }
+
+ sg_free_table(shmem->pages);
+ shmem->pages = NULL;
+ drm_gem_shmem_unpin(&bo->base.base);
+ }
+
+ drm_gem_shmem_free_object(&bo->base.base);
+ }
+}
+
+static void virtio_gpu_free_object(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- drm_gem_shmem_free_object(obj);
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ /* completion handler calls virtio_gpu_cleanup_object() */
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
}
-static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
+static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
@@ -86,21 +112,75 @@ static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .mmap = &drm_gem_shmem_mmap,
+ .mmap = drm_gem_shmem_mmap,
};
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
+{
+ return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
+}
+
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size)
{
- struct virtio_gpu_object *bo;
+ struct virtio_gpu_object_shmem *shmem;
+ struct drm_gem_shmem_object *dshmem;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
return NULL;
- bo->base.base.funcs = &virtio_gpu_gem_funcs;
- bo->base.map_cached = true;
- return &bo->base.base;
+ dshmem = &shmem->base.base;
+ dshmem->base.funcs = &virtio_gpu_shmem_funcs;
+ dshmem->map_cached = true;
+ return &dshmem->base;
+}
+
+static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents)
+{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+ struct scatterlist *sg;
+ int si, ret;
+
+ ret = drm_gem_shmem_pin(&bo->base.base);
+ if (ret < 0)
+ return -EINVAL;
+
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
+ if (!shmem->pages) {
+ drm_gem_shmem_unpin(&bo->base.base);
+ return -EINVAL;
+ }
+
+ if (use_dma_api) {
+ shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl,
+ shmem->pages->nents,
+ DMA_TO_DEVICE);
+ *nents = shmem->mapped;
+ } else {
+ *nents = shmem->pages->nents;
+ }
+
+ *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ DRM_ERROR("failed to allocate ent list\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(shmem->pages->sgl, sg, *nents, si) {
+ (*ents)[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
+ (*ents)[si].length = cpu_to_le32(sg->length);
+ (*ents)[si].padding = 0;
+ }
+ return 0;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
@@ -111,6 +191,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
+ struct virtio_gpu_mem_entry *ents;
+ unsigned int nents;
int ret;
*bo_ptr = NULL;
@@ -147,12 +229,19 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
objs, fence);
}
- ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ if (ret != 0) {
+ virtio_gpu_free_object(&shmem_obj->base);
+ return ret;
+ }
+
+ ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
if (ret != 0) {
virtio_gpu_free_object(&shmem_obj->base);
return ret;
}
+ virtio_gpu_notify(vgdev);
*bo_ptr = bo;
return 0;