aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r--drivers/gpu/drm/drm_gem.c49
1 files changed, 31 insertions, 18 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 3c2d4abd71c5..c7de454e8e88 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -491,7 +491,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
* __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
* so shmem can relocate pages during swapin if required.
*/
- BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
+ BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL));
}
@@ -763,7 +763,8 @@ EXPORT_SYMBOL(drm_gem_object_release);
void
drm_gem_object_free(struct kref *kref)
{
- struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+ struct drm_gem_object *obj =
+ container_of(kref, struct drm_gem_object, refcount);
struct drm_device *dev = obj->dev;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -810,8 +811,6 @@ EXPORT_SYMBOL(drm_gem_vm_close);
* drm_gem_mmap() prevents unprivileged users from mapping random objects. So
* callers must verify access restrictions before calling this helper.
*
- * NOTE: This function has to be protected with dev->struct_mutex
- *
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
* size, or if no gem_vm_ops are provided.
*/
@@ -820,8 +819,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
{
struct drm_device *dev = obj->dev;
- lockdep_assert_held(&dev->struct_mutex);
-
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
@@ -865,30 +862,46 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj = NULL;
struct drm_vma_offset_node *node;
int ret;
if (drm_device_is_unplugged(dev))
return -ENODEV;
- mutex_lock(&dev->struct_mutex);
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (likely(node)) {
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ /*
+ * When the object is being freed, after it hits 0-refcnt it
+ * proceeds to tear down the object. In the process it will
+ * attempt to remove the VMA offset and so acquire this
+ * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
+ * that matches our range, we know it is in the process of being
+ * destroyed and will be freed as soon as we release the lock -
+ * so we have to check for the 0-refcnted object and treat it as
+ * invalid.
+ */
+ if (!kref_get_unless_zero(&obj->refcount))
+ obj = NULL;
+ }
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
- node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
- if (!node) {
- mutex_unlock(&dev->struct_mutex);
+ if (!obj)
return -EINVAL;
- } else if (!drm_vma_node_is_allowed(node, filp)) {
- mutex_unlock(&dev->struct_mutex);
+
+ if (!drm_vma_node_is_allowed(node, filp)) {
+ drm_gem_object_unreference_unlocked(obj);
return -EACCES;
}
- obj = container_of(node, struct drm_gem_object, vma_node);
- ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
+ ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
+ vma);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}