aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
diff options
context:
space:
mode:
authorLucas Stach <l.stach@pengutronix.de>2022-03-23 17:08:25 +0100
committerLucas Stach <l.stach@pengutronix.de>2022-04-06 10:01:51 +0200
commit2829a9fcb738f742baad9d15de4c6eac84bcfd08 (patch)
tree1514e271a5f0700ad465f2b69027c11ef18cc319 /drivers/gpu/drm/etnaviv/etnaviv_mmu.c
parentdrm/etnaviv: move flush_seq increment into etnaviv_iommu_map/unmap (diff)
downloadlinux-dev-2829a9fcb738f742baad9d15de4c6eac84bcfd08.tar.xz
linux-dev-2829a9fcb738f742baad9d15de4c6eac84bcfd08.zip
drm/etnaviv: reap idle softpin mappings when necessary
Right now the only point where softpin mappings get removed from the MMU context is when the mapped GEM object is destroyed. However, userspace might want to reuse that address space before the object is destroyed, which is a valid usage, as long as all mapping in that region of the address space are no longer used by any GPU jobs. Implement reaping of idle MMU mappings that would otherwise prevent the insertion of a softpin mapping. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Tested-by: Guido Günther <agx@sigxcpu.org> Acked-by: Guido Günther <agx@sigxcpu.org> Reviewed-by: Christian Gmeiner <christian.gmeiner@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/etnaviv/etnaviv_mmu.c')
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index d41295208102..dc1aa738c4f1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -223,8 +223,47 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
struct drm_mm_node *node, size_t size, u64 va)
{
+ struct etnaviv_vram_mapping *m, *n;
+ struct drm_mm_node *scan_node;
+ LIST_HEAD(scan_list);
+ int ret;
+
lockdep_assert_held(&context->lock);
+ ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
+ va + size, DRM_MM_INSERT_LOWEST);
+ if (ret != -ENOSPC)
+ return ret;
+
+ /*
+ * When we can't insert the node, due to a existing mapping blocking
+ * the address space, there are two possible reasons:
+ * 1. Userspace genuinely messed up and tried to reuse address space
+ * before the last job using this VMA has finished executing.
+ * 2. The existing buffer mappings are idle, but the buffers are not
+ * destroyed yet (likely due to being referenced by another context) in
+ * which case the mappings will not be cleaned up and we must reap them
+ * here to make space for the new mapping.
+ */
+
+ drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
+ m = container_of(scan_node, struct etnaviv_vram_mapping,
+ vram_node);
+
+ if (m->use)
+ return -ENOSPC;
+
+ list_add(&m->scan_node, &scan_list);
+ }
+
+ list_for_each_entry_safe(m, n, &scan_list, scan_node) {
+ etnaviv_iommu_remove_mapping(context, m);
+ etnaviv_iommu_context_put(m->context);
+ m->context = NULL;
+ list_del_init(&m->mmu_node);
+ list_del_init(&m->scan_node);
+ }
+
return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
va + size, DRM_MM_INSERT_LOWEST);
}