aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCQ Tang <cq.tang@intel.com>2022-06-10 15:12:04 +0300
committerMatthew Auld <matthew.auld@intel.com>2022-06-22 11:17:13 +0100
commitd976521a995a817007ae3f471ac22b93b1bd39f7 (patch)
treee7670b9ab2a6aeeb04cb94270716e7ef0ab1d484
parentdrm/i915: don't leak lmem mapping in vma_evict (diff)
downloadlinux-dev-d976521a995a817007ae3f471ac22b93b1bd39f7.tar.xz
linux-dev-d976521a995a817007ae3f471ac22b93b1bd39f7.zip
drm/i915: extend i915_vma_pin_iomap()
In the future display might try call this with a normal smem object, which doesn't require PIN_MAPPABLE underneath in order to CPU map the pages (unlike stolen). Extend i915_vma_pin_iomap() to directly use i915_gem_object_pin_map() for such cases. This change was suggested by Chris P Wilson, that we pin the smem with i915_gem_object_pin_map_unlocked(). v2 (jheikkil): Change i915_gem_object_pin_map_unlocked to i915_gem_object_pin_map Signed-off-by: CQ Tang <cq.tang@intel.com> Signed-off-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com> Cc: Chris Wilson <chris.p.wilson@intel.com> Cc: Jari Tahvanainen <jari.tahvanainen@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Signed-off-by: Matthew Auld <matthew.auld@intel.com> [mauld: tweak commit message, plus minor checkpatch fix] Link: https://patchwork.freedesktop.org/patch/msgid/20220610121205.29645-2-juhapekka.heikkila@gmail.com
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d333c4416997..5d5828b9a242 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -551,13 +551,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
return IOMEM_ERR_PTR(-EINVAL);
- if (!i915_gem_object_is_lmem(vma->obj)) {
- if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
- err = -ENODEV;
- goto err;
- }
- }
-
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
@@ -570,20 +563,33 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
* of pages, that way we can also drop the
* I915_BO_ALLOC_CONTIGUOUS when allocating the object.
*/
- if (i915_gem_object_is_lmem(vma->obj))
+ if (i915_gem_object_is_lmem(vma->obj)) {
ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
vma->obj->base.size);
- else
+ } else if (i915_vma_is_map_and_fenceable(vma)) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
vma->node.size);
+ } else {
+ ptr = (void __iomem *)
+ i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err;
+ }
+ ptr = page_pack_bits(ptr, 1);
+ }
+
if (ptr == NULL) {
err = -ENOMEM;
goto err;
}
if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
- io_mapping_unmap(ptr);
+ if (page_unmask_bits(ptr))
+ __i915_gem_object_release_map(vma->obj);
+ else
+ io_mapping_unmap(ptr);
ptr = vma->iomap;
}
}
@@ -597,7 +603,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
i915_vma_set_ggtt_write(vma);
/* NB Access through the GTT requires the device to be awake. */
- return ptr;
+ return page_mask_bits(ptr);
err_unpin:
__i915_vma_unpin(vma);
@@ -615,6 +621,8 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
{
GEM_BUG_ON(vma->iomap == NULL);
+ /* XXX We keep the mapping until __i915_vma_unbind()/evict() */
+
i915_vma_flush_writes(vma);
i915_vma_unpin_fence(vma);
@@ -1763,7 +1771,10 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
if (vma->iomap == NULL)
return;
- io_mapping_unmap(vma->iomap);
+ if (page_unmask_bits(vma->iomap))
+ __i915_gem_object_release_map(vma->obj);
+ else
+ io_mapping_unmap(vma->iomap);
vma->iomap = NULL;
}