aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
diff options
context:
space:
mode:
authorLucas Stach <l.stach@pengutronix.de>2019-07-05 19:17:24 +0200
committerLucas Stach <l.stach@pengutronix.de>2019-08-15 10:56:45 +0200
commit27b67278e007b5475bc14918794bf73cf297a026 (patch)
treef7ae309210bb22508eead1eab6fe139fbd76f100 /drivers/gpu/drm/etnaviv/etnaviv_iommu.c
parentdrm/etnaviv: replace MMU flush marker with flush sequence (diff)
downloadlinux-dev-27b67278e007b5475bc14918794bf73cf297a026.tar.xz
linux-dev-27b67278e007b5475bc14918794bf73cf297a026.zip
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts. A context is basically one instance of GPU page tables. Currently we have one set of page tables per GPU, which isn't all that clever, as it has the following two consequences: 1. All GPU clients (aka processes) are sharing the same pagetables, which means there is no isolation between clients, but only between GPU assigned memory spaces and the rest of the system. Better than nothing, but also not great. 2. Clients operating on the same set of buffers with different etnaviv GPU cores, e.g. a workload using both the 2D and 3D GPU, need to map the used buffers into the pagetable sets of each used GPU. This patch reworks all the MMU handling to introduce the abstraction of the MMU context. A context can be shared across different GPU cores, as long as they have compatible MMU implementations, which is the case for all systems with Vivante GPUs seen in the wild. As MMUv1 is not able to change pagetables on the fly, without a "stop the world" operation, which stops GPU, changes pagetables via CPU interaction, restarts GPU, the implementation introduces a shared context on MMUv1, which is returned whenever there is a request for a new context. This patch assigns a MMU context to each GPU, so on MMUv2 systems there is still one set of pagetables per GPU, but due to the shared context MMUv1 systems see a change in behavior as now a single pagetable set is used across all GPU cores. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de> Reviewed-by: Guido Günther <agx@sigxcpu.org>
Diffstat (limited to 'drivers/gpu/drm/etnaviv/etnaviv_iommu.c')
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c151
1 files changed, 69 insertions, 82 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 18c627c5cae1..a2f1ff151822 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -11,7 +11,6 @@
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_iommu.h"
#include "state_hi.xml.h"
#define PT_SIZE SZ_2M
@@ -19,113 +18,78 @@
#define GPU_MEM_START 0x80000000
-struct etnaviv_iommuv1_domain {
- struct etnaviv_iommu_domain base;
+struct etnaviv_iommuv1_context {
+ struct etnaviv_iommu_context base;
u32 *pgtable_cpu;
dma_addr_t pgtable_dma;
};
-static struct etnaviv_iommuv1_domain *
-to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
+static struct etnaviv_iommuv1_context *
+to_v1_context(struct etnaviv_iommu_context *context)
{
- return container_of(domain, struct etnaviv_iommuv1_domain, base);
+ return container_of(context, struct etnaviv_iommuv1_context, base);
}
-static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
+static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
{
- u32 *p;
- int i;
-
- etnaviv_domain->base.bad_page_cpu =
- dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
- &etnaviv_domain->base.bad_page_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->base.bad_page_cpu)
- return -ENOMEM;
-
- p = etnaviv_domain->base.bad_page_cpu;
- for (i = 0; i < SZ_4K / 4; i++)
- *p++ = 0xdead55aa;
-
- etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
- PT_SIZE,
- &etnaviv_domain->pgtable_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->pgtable_cpu) {
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
- return -ENOMEM;
- }
-
- memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
- PT_ENTRIES);
-
- return 0;
-}
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
-static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
-{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ drm_mm_takedown(&context->mm);
- dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
- etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
+ dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
+ v1_context->pgtable_dma);
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
+ context->global->v1.shared_context = NULL;
- kfree(etnaviv_domain);
+ kfree(v1_context);
}
-static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
+static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
- etnaviv_domain->pgtable_cpu[index] = paddr;
+ v1_context->pgtable_cpu[index] = paddr;
return 0;
}
-static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
+static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
- etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
+ v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
return SZ_4K;
}
-static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
+static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
{
return PT_SIZE;
}
-static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
+static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
+ void *buf)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
- memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
+ memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
}
-void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(gpu->mmu->domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable;
/* set base addresses */
@@ -136,7 +100,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
/* set page table address in MC */
- pgtable = (u32)etnaviv_domain->pgtable_dma;
+ pgtable = (u32)v1_context->pgtable_dma;
gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
@@ -145,39 +109,62 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
-static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
- .free = etnaviv_iommuv1_domain_free,
+
+const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
+ .free = etnaviv_iommuv1_free,
.map = etnaviv_iommuv1_map,
.unmap = etnaviv_iommuv1_unmap,
.dump_size = etnaviv_iommuv1_dump_size,
.dump = etnaviv_iommuv1_dump,
+ .restore = etnaviv_iommuv1_restore,
};
-struct etnaviv_iommu_domain *
-etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
+struct etnaviv_iommu_context *
+etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain;
- struct etnaviv_iommu_domain *domain;
- int ret;
+ struct etnaviv_iommuv1_context *v1_context;
+ struct etnaviv_iommu_context *context;
+
+ mutex_lock(&global->lock);
+
+ /*
+ * MMUv1 does not support switching between different contexts without
+ * a stop the world operation, so we only support a single shared
+ * context with this version.
+ */
+ if (global->v1.shared_context) {
+ context = global->v1.shared_context;
+ etnaviv_iommu_context_get(context);
+ mutex_unlock(&global->lock);
+ return context;
+ }
- etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
- if (!etnaviv_domain)
+ v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
+ if (!v1_context)
return NULL;
- domain = &etnaviv_domain->base;
+ v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
+ &v1_context->pgtable_dma,
+ GFP_KERNEL);
+ if (!v1_context->pgtable_cpu)
+ goto out_free;
- domain->dev = gpu->dev;
- domain->base = GPU_MEM_START;
- domain->size = PT_ENTRIES * SZ_4K;
- domain->ops = &etnaviv_iommuv1_ops;
+ memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
- ret = __etnaviv_iommu_init(etnaviv_domain);
- if (ret)
- goto out_free;
+ context = &v1_context->base;
+ context->global = global;
+ kref_init(&context->refcount);
+ mutex_init(&context->lock);
+ INIT_LIST_HEAD(&context->mappings);
+ drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
+ context->global->v1.shared_context = context;
+
+ mutex_unlock(&global->lock);
- return &etnaviv_domain->base;
+ return context;
out_free:
- kfree(etnaviv_domain);
+ mutex_unlock(&global->lock);
+ kfree(v1_context);
return NULL;
}