aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_vma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c39
1 files changed, 26 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 0bffb70b3c5f..5d5828b9a242 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -551,13 +551,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
return IOMEM_ERR_PTR(-EINVAL);
- if (!i915_gem_object_is_lmem(vma->obj)) {
- if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
- err = -ENODEV;
- goto err;
- }
- }
-
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
@@ -570,20 +563,33 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
* of pages, that way we can also drop the
* I915_BO_ALLOC_CONTIGUOUS when allocating the object.
*/
- if (i915_gem_object_is_lmem(vma->obj))
+ if (i915_gem_object_is_lmem(vma->obj)) {
ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
vma->obj->base.size);
- else
+ } else if (i915_vma_is_map_and_fenceable(vma)) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
vma->node.size);
+ } else {
+ ptr = (void __iomem *)
+ i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err;
+ }
+ ptr = page_pack_bits(ptr, 1);
+ }
+
if (ptr == NULL) {
err = -ENOMEM;
goto err;
}
if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
- io_mapping_unmap(ptr);
+ if (page_unmask_bits(ptr))
+ __i915_gem_object_release_map(vma->obj);
+ else
+ io_mapping_unmap(ptr);
ptr = vma->iomap;
}
}
@@ -597,7 +603,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
i915_vma_set_ggtt_write(vma);
/* NB Access through the GTT requires the device to be awake. */
- return ptr;
+ return page_mask_bits(ptr);
err_unpin:
__i915_vma_unpin(vma);
@@ -615,6 +621,8 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
{
GEM_BUG_ON(vma->iomap == NULL);
+ /* XXX We keep the mapping until __i915_vma_unbind()/evict() */
+
i915_vma_flush_writes(vma);
i915_vma_unpin_fence(vma);
@@ -1763,7 +1771,10 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
if (vma->iomap == NULL)
return;
- io_mapping_unmap(vma->iomap);
+ if (page_unmask_bits(vma->iomap))
+ __i915_gem_object_release_map(vma->obj);
+ else
+ io_mapping_unmap(vma->iomap);
vma->iomap = NULL;
}
@@ -1907,9 +1918,11 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
/* release the fence reg _after_ flushing */
i915_vma_revoke_fence(vma);
- __i915_vma_iounmap(vma);
clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
+
+ __i915_vma_iounmap(vma);
+
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));