diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
| -rw-r--r-- | drivers/gpu/drm/drm_gem.c | 407 |
1 files changed, 334 insertions, 73 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d0b9f6a9953f..000fa4a1899f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -38,10 +38,15 @@ #include <linux/dma-buf.h> #include <linux/mem_encrypt.h> #include <linux/pagevec.h> -#include <drm/drmP.h> -#include <drm/drm_vma_manager.h> + +#include <drm/drm.h> +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_file.h> #include <drm/drm_gem.h> #include <drm/drm_print.h> +#include <drm/drm_vma_manager.h> + #include "drm_internal.h" /** @file drm_gem.c @@ -72,23 +77,6 @@ * up at a later date, and as our interface with shmfs for memory allocation. */ -/* - * We make up offsets for buffer objects so we can recognize them at - * mmap time. - */ - -/* pgoff in mmap is an unsigned long, so we need to make sure that - * the faked up offset will fit - */ - -#if BITS_PER_LONG == 64 -#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) -#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) -#else -#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) -#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) -#endif - /** * drm_gem_init - Initialize the GEM device fields * @dev: drm_devic structure to initialize @@ -171,6 +159,10 @@ void drm_gem_private_object_init(struct drm_device *dev, kref_init(&obj->refcount); obj->handle_count = 0; obj->size = size; + dma_resv_init(&obj->_resv); + if (!obj->resv) + obj->resv = &obj->_resv; + drm_vma_node_reset(&obj->vma_node); } EXPORT_SYMBOL(drm_gem_private_object_init); @@ -263,8 +255,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) else if (dev->driver->gem_close_object) dev->driver->gem_close_object(obj, file_priv); - if (drm_core_check_feature(dev, DRIVER_PRIME)) - drm_gem_remove_prime_handles(obj, file_priv); + drm_gem_remove_prime_handles(obj, file_priv); drm_vma_node_revoke(&obj->vma_node, file_priv); drm_gem_object_handle_put_unlocked(obj); @@ -642,6 +633,9 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, pagevec_init(&pvec); for (i = 0; i < npages; i++) { + if (!pages[i]) + continue; + if (dirty) set_page_dirty(pages[i]); @@ -659,6 +653,85 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, } EXPORT_SYMBOL(drm_gem_put_pages); +static int objects_lookup(struct drm_file *filp, u32 *handle, int count, + struct drm_gem_object **objs) +{ + int i, ret = 0; + struct drm_gem_object *obj; + + spin_lock(&filp->table_lock); + + for (i = 0; i < count; i++) { + /* Check if we currently have a reference on the object */ + obj = idr_find(&filp->object_idr, handle[i]); + if (!obj) { + ret = -ENOENT; + break; + } + drm_gem_object_get(obj); + objs[i] = obj; + } + spin_unlock(&filp->table_lock); + + return ret; +} + +/** + * drm_gem_objects_lookup - look up GEM objects from an array of handles + * @filp: DRM file private date + * @bo_handles: user pointer to array of userspace handle + * @count: size of handle array + * @objs_out: returned pointer to array of drm_gem_object pointers + * + * Takes an array of userspace handles and returns a newly allocated array of + * GEM objects. + * + * For a single handle lookup, use drm_gem_object_lookup(). + * + * Returns: + * + * @objs filled in with GEM object pointers. Returned GEM objects need to be + * released with drm_gem_object_put(). -ENOENT is returned on a lookup + * failure. 0 is returned on success. + * + */ +int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, + int count, struct drm_gem_object ***objs_out) +{ + int ret; + u32 *handles; + struct drm_gem_object **objs; + + if (!count) + return 0; + + objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), + GFP_KERNEL | __GFP_ZERO); + if (!objs) + return -ENOMEM; + + handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); + if (!handles) { + ret = -ENOMEM; + goto out; + } + + if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy in GEM handles\n"); + goto out; + } + + ret = objects_lookup(filp, handles, count, objs); + *objs_out = objs; + +out: + kvfree(handles); + return ret; + +} +EXPORT_SYMBOL(drm_gem_objects_lookup); + /** * drm_gem_object_lookup - look up a GEM object from its handle * @filp: DRM file private date @@ -668,24 +741,56 @@ EXPORT_SYMBOL(drm_gem_put_pages); * * A reference to the object named by the handle if such exists on @filp, NULL * otherwise. + * + * If looking up an array of handles, use drm_gem_objects_lookup(). */ struct drm_gem_object * drm_gem_object_lookup(struct drm_file *filp, u32 handle) { + struct drm_gem_object *obj = NULL; + + objects_lookup(filp, &handle, 1, &obj); + return obj; +} +EXPORT_SYMBOL(drm_gem_object_lookup); + +/** + * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects + * shared and/or exclusive fences. + * @filep: DRM file private date + * @handle: userspace handle + * @wait_all: if true, wait on all fences, else wait on just exclusive fence + * @timeout: timeout value in jiffies or zero to return immediately + * + * Returns: + * + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or + * greater than 0 on success. + */ +long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, + bool wait_all, unsigned long timeout) +{ + long ret; struct drm_gem_object *obj; - spin_lock(&filp->table_lock); + obj = drm_gem_object_lookup(filep, handle); + if (!obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", handle); + return -EINVAL; + } - /* Check if we currently have a reference on the object */ - obj = idr_find(&filp->object_idr, handle); - if (obj) - drm_gem_object_get(obj); + ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, + true, timeout); + if (ret == 0) + ret = -ETIME; + else if (ret > 0) + ret = 0; - spin_unlock(&filp->table_lock); + drm_gem_object_put_unlocked(obj); - return obj; + return ret; } -EXPORT_SYMBOL(drm_gem_object_lookup); +EXPORT_SYMBOL(drm_gem_dma_resv_wait); /** * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl @@ -851,6 +956,7 @@ drm_gem_object_release(struct drm_gem_object *obj) if (obj->filp) fput(obj->filp); + dma_resv_fini(&obj->_resv); drm_gem_free_mmap_offset(obj); } EXPORT_SYMBOL(drm_gem_object_release); @@ -993,23 +1099,12 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma) { struct drm_device *dev = obj->dev; + int ret; /* Check for valid size. */ if (obj_size < vma->vm_end - vma->vm_start) return -EINVAL; - if (obj->funcs && obj->funcs->vm_ops) - vma->vm_ops = obj->funcs->vm_ops; - else if (dev->driver->gem_vm_ops) - vma->vm_ops = dev->driver->gem_vm_ops; - else - return -EINVAL; - - vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_private_data = obj; - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); - /* Take a ref for this mapping of the object, so that the fault * handler can dereference the mmap offset's pointer to the object. * This reference is cleaned up by the corresponding vm_close @@ -1018,6 +1113,33 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, */ drm_gem_object_get(obj); + if (obj->funcs && obj->funcs->mmap) { + /* Remove the fake offset */ + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); + + ret = obj->funcs->mmap(obj, vma); + if (ret) { + drm_gem_object_put_unlocked(obj); + return ret; + } + WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); + } else { + if (obj->funcs && obj->funcs->vm_ops) + vma->vm_ops = obj->funcs->vm_ops; + else if (dev->driver->gem_vm_ops) + vma->vm_ops = dev->driver->gem_vm_ops; + else { + drm_gem_object_put_unlocked(obj); + return -EINVAL; + } + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + } + + vma->vm_private_data = obj; + return 0; } EXPORT_SYMBOL(drm_gem_mmap_obj); @@ -1113,15 +1235,6 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, obj->dev->driver->gem_print_info(p, indent, obj); } -/** - * drm_gem_pin - Pin backing buffer in memory - * @obj: GEM object - * - * Make sure the backing buffer is pinned in memory. - * - * Returns: - * 0 on success or a negative error code on failure. - */ int drm_gem_pin(struct drm_gem_object *obj) { if (obj->funcs && obj->funcs->pin) @@ -1131,14 +1244,7 @@ int drm_gem_pin(struct drm_gem_object *obj) else return 0; } -EXPORT_SYMBOL(drm_gem_pin); -/** - * drm_gem_unpin - Unpin backing buffer from memory - * @obj: GEM object - * - * Relax the requirement that the backing buffer is pinned in memory. - */ void drm_gem_unpin(struct drm_gem_object *obj) { if (obj->funcs && obj->funcs->unpin) @@ -1146,16 +1252,7 @@ void drm_gem_unpin(struct drm_gem_object *obj) else if (obj->dev->driver->gem_prime_unpin) obj->dev->driver->gem_prime_unpin(obj); } -EXPORT_SYMBOL(drm_gem_unpin); -/** - * drm_gem_vmap - Map buffer into kernel virtual address space - * @obj: GEM object - * - * Returns: - * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative - * error code on failure. - */ void *drm_gem_vmap(struct drm_gem_object *obj) { void *vaddr; @@ -1172,13 +1269,7 @@ void *drm_gem_vmap(struct drm_gem_object *obj) return vaddr; } -EXPORT_SYMBOL(drm_gem_vmap); -/** - * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space - * @obj: GEM object - * @vaddr: Virtual address (can be NULL) - */ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) { if (!vaddr) @@ -1189,4 +1280,174 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) else if (obj->dev->driver->gem_prime_vunmap) obj->dev->driver->gem_prime_vunmap(obj, vaddr); } -EXPORT_SYMBOL(drm_gem_vunmap); + +/** + * drm_gem_lock_reservations - Sets up the ww context and acquires + * the lock on an array of GEM objects. + * + * Once you've locked your reservations, you'll want to set up space + * for your shared fences (if applicable), submit your job, then + * drm_gem_unlock_reservations(). + * + * @objs: drm_gem_objects to lock + * @count: Number of objects in @objs + * @acquire_ctx: struct ww_acquire_ctx that will be initialized as + * part of tracking this set of locked reservations. + */ +int +drm_gem_lock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx) +{ + int contended = -1; + int i, ret; + + ww_acquire_init(acquire_ctx, &reservation_ww_class); + +retry: + if (contended != -1) { + struct drm_gem_object *obj = objs[contended]; + + ret = dma_resv_lock_slow_interruptible(obj->resv, + acquire_ctx); + if (ret) { + ww_acquire_done(acquire_ctx); + return ret; + } + } + + for (i = 0; i < count; i++) { + if (i == contended) + continue; + + ret = dma_resv_lock_interruptible(objs[i]->resv, + acquire_ctx); + if (ret) { + int j; + + for (j = 0; j < i; j++) + dma_resv_unlock(objs[j]->resv); + + if (contended != -1 && contended >= i) + dma_resv_unlock(objs[contended]->resv); + + if (ret == -EDEADLK) { + contended = i; + goto retry; + } + + ww_acquire_done(acquire_ctx); + return ret; + } + } + + ww_acquire_done(acquire_ctx); + + return 0; +} +EXPORT_SYMBOL(drm_gem_lock_reservations); + +void +drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx) +{ + int i; + + for (i = 0; i < count; i++) + dma_resv_unlock(objs[i]->resv); + + ww_acquire_fini(acquire_ctx); +} +EXPORT_SYMBOL(drm_gem_unlock_reservations); + +/** + * drm_gem_fence_array_add - Adds the fence to an array of fences to be + * waited on, deduplicating fences from the same context. + * + * @fence_array: array of dma_fence * for the job to block on. + * @fence: the dma_fence to add to the list of dependencies. + * + * Returns: + * 0 on success, or an error on failing to expand the array. + */ +int drm_gem_fence_array_add(struct xarray *fence_array, + struct dma_fence *fence) +{ + struct dma_fence *entry; + unsigned long index; + u32 id = 0; + int ret; + + if (!fence) + return 0; + + /* Deduplicate if we already depend on a fence from the same context. + * This lets the size of the array of deps scale with the number of + * engines involved, rather than the number of BOs. + */ + xa_for_each(fence_array, index, entry) { + if (entry->context != fence->context) + continue; + + if (dma_fence_is_later(fence, entry)) { + dma_fence_put(entry); + xa_store(fence_array, index, fence, GFP_KERNEL); + } else { + dma_fence_put(fence); + } + return 0; + } + + ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL); + if (ret != 0) + dma_fence_put(fence); + + return ret; +} +EXPORT_SYMBOL(drm_gem_fence_array_add); + +/** + * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked + * in the GEM object's reservation object to an array of dma_fences for use in + * scheduling a rendering job. + * + * This should be called after drm_gem_lock_reservations() on your array of + * GEM objects used in the job but before updating the reservations with your + * own fences. + * + * @fence_array: array of dma_fence * for the job to block on. + * @obj: the gem object to add new dependencies from. + * @write: whether the job might write the object (so we need to depend on + * shared fences in the reservation object). + */ +int drm_gem_fence_array_add_implicit(struct xarray *fence_array, + struct drm_gem_object *obj, + bool write) +{ + int ret; + struct dma_fence **fences; + unsigned int i, fence_count; + + if (!write) { + struct dma_fence *fence = + dma_resv_get_excl_rcu(obj->resv); + + return drm_gem_fence_array_add(fence_array, fence); + } + + ret = dma_resv_get_fences_rcu(obj->resv, NULL, + &fence_count, &fences); + if (ret || !fence_count) + return ret; + + for (i = 0; i < fence_count; i++) { + ret = drm_gem_fence_array_add(fence_array, fences[i]); + if (ret) + break; + } + + for (; i < fence_count; i++) + dma_fence_put(fences[i]); + kfree(fences); + return ret; +} +EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); |
