aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-19 16:24:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-19 16:24:24 -0700
commit574cc4539762561d96b456dbc0544d8898bd4c6e (patch)
tree07d84db8cf9fd30cbde6f539ce3a3f6116593e41 /drivers/gpu/drm/etnaviv/etnaviv_mmu.c
parentMerge tag 'pinctrl-v5.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl (diff)
parentMerge branch 'linux-5.4' of git://github.com/skeggsb/linux into drm-next (diff)
downloadlinux-dev-574cc4539762561d96b456dbc0544d8898bd4c6e.tar.xz
linux-dev-574cc4539762561d96b456dbc0544d8898bd4c6e.zip
Merge tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "This is the main pull request for 5.4-rc1 merge window. I don't think there is anything outstanding so next week should just be fixes, but we'll see if I missed anything. I landed some fixes earlier in the week but got delayed writing summary and sending it out, due to a mix of sick kid and jetlag! There are some fixes pending, but I'd rather get the main merge out of the way instead of delaying it longer. It's also pretty large in commit count and new amd header file size. The largest thing is four new amdgpu products (navi12/14, arcturus and renoir APU support). Otherwise it's pretty much lots of work across the board, i915 has started landing tigerlake support, lots of icelake fixes and lots of locking reworking for future gpu support, lots of header file rework (drmP.h is nearly gone), some old legacy hacks (DRM_WAIT_ON) have been put into the places they are needed. uapi: - content protection type property for HDCP core: - rework include dependencies - lots of drmP.h removals - link rate calculation robustness fix - make fb helper map only when required - add connector->DDC adapter link - DRM_WAIT_ON removed - drop DRM_AUTH usage from drivers dma-buf: - reservation object fence helper dma-fence: - shrink dma_fence struct - merge signal functions - store timestamps in dma_fence - selftests ttm: - embed drm_get_object struct into ttm_buffer_object - release_notify callback bridges: - sii902x - audio graph card support - tc358767 - aux data handling rework - ti-snd64dsi86 - debugfs support, DSI mode flags support panels: - Support for GiantPlus GPM940B0, Sharp LQ070Y3DG3B, Ortustech COM37H3M, Novatek NT39016, Sharp LS020B1DD01D, Raydium RM67191, Boe Himax8279d, Sharp LD-D5116Z01B - TI nspire, NEC NL8048HL11, LG Philips LB035Q02, Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1 Toppoly TD043MTEA1 i915: - Initial tigerlake platform support - Locking simplification work, general all over refactoring. - Selftests - HDCP debug info improvements - DSI properties - Icelake display PLL fixes, colorspace fixes, bandwidth fixes, DSI suspend/resume - GuC fixes - Perf fixes - ElkhartLake enablement - DP MST fixes - GVT - command parser enhancements amdgpu: - add wipe memory on release flag for buffer creation - Navi12/14 support (may be marked experimental) - Arcturus support - Renoir APU support - mclk DPM for Navi - DC display fixes - Raven scatter/gather support - RAS support for GFX - Navi12 + Arcturus power features - GPU reset for Picasso - smu11 i2c controller support amdkfd: - navi12/14 support - Arcturus support radeon: - kexec fix nouveau: - improved display color management - detect lack of GPU power cables vmwgfx: - evicition priority support - remove unused security feature msm: - msm8998 display support - better async commit support for cursor updates etnaviv: - per-process address space support - performance counter fixes - softpin support mcde: - DCS transfers fix exynos: - drmP.h cleanup lima: - reduce logging kirin: - misc clenaups komeda: - dual-link support - DT memory regions hisilicon: - misc fixes imx: - IPUv3 image converter fixes - 32-bit RGB V4L2 pixel format support ingenic: - more support for panel related cases mgag200: - cursor support fix panfrost: - export GPU features register to userspace - gpu heap allocations - per-fd address space support pl111: - CLD pads wiring support removed from DT rockchip: - rework to use DRM PSR helpers - fix bug in VOP_WIN_GET macro - DSI DT binding rework sun4i: - improve support for color encoding and range - DDC enabled GPIO tinydrm: - rework SPI support - improve MIPI-DBI support - moved to drm/tiny vkms: - rework CRC tracking dw-hdmi: - get_eld and i2s improvements gm12u320: - misc fixes meson: - global code cleanup - vpu feature detect omap: - alpha/pixel blend mode properties rcar-du: - misc fixes" * tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm: (2112 commits) drm/nouveau/bar/gm20b: Avoid BAR1 teardown during init drm/nouveau: Fix ordering between TTM and GEM release drm/nouveau/prime: Extend DMA reservation object lock drm/nouveau: Fix fallout from reservation object rework drm/nouveau/kms/nv50-: Don't create MSTMs for eDP connectors drm/i915: Use NOEVICT for first pass on attemping to pin a GGTT mmap drm/i915: to make vgpu ppgtt notificaiton as atomic operation drm/i915: Flush the existing fence before GGTT read/write drm/i915: Hold irq-off for the entire fake lock period drm/i915/gvt: update RING_START reg of vGPU when the context is submitted to i915 drm/i915/gvt: update vgpu workload head pointer correctly drm/mcde: Fix DSI transfers drm/msm: Use the correct dma_sync calls harder drm/msm: remove unlikely() from WARN_ON() conditions drm/msm/dsi: Fix return value check for clk_get_parent drm/msm: add atomic traces drm/msm/dpu: async commit support drm/msm: async commit support drm/msm: split power control from prepare/complete_commit drm/msm: add kms->flush_commit() ...
Diffstat (limited to 'drivers/gpu/drm/etnaviv/etnaviv_mmu.c')
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c326
1 files changed, 221 insertions, 105 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 8069f9f36a2e..35ebae6a1be7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -3,15 +3,17 @@
* Copyright (C) 2015-2018 Etnaviv Project
*/
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
#include "common.xml.h"
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
-#include "etnaviv_iommu.h"
#include "etnaviv_mmu.h"
-static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
+static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
size_t unmapped_page, unmapped = 0;
@@ -24,7 +26,8 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
}
while (unmapped < size) {
- unmapped_page = domain->ops->unmap(domain, iova, pgsize);
+ unmapped_page = context->global->ops->unmap(context, iova,
+ pgsize);
if (!unmapped_page)
break;
@@ -33,7 +36,7 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
}
}
-static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
+static int etnaviv_context_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
@@ -49,7 +52,8 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
}
while (size) {
- ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ ret = context->global->ops->map(context, iova, paddr, pgsize,
+ prot);
if (ret)
break;
@@ -60,21 +64,19 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
/* unroll mapping in case something went wrong */
if (ret)
- etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
+ etnaviv_context_unmap(context, orig_iova, orig_size - size);
return ret;
}
-static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len, int prot)
-{
- struct etnaviv_iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
+{ struct scatterlist *sg;
unsigned int da = iova;
unsigned int i, j;
int ret;
- if (!domain || !sgt)
+ if (!context || !sgt)
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -83,7 +85,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
- ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
+ ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
goto fail;
@@ -98,16 +100,15 @@ fail:
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg_dma_len(sg) + sg->offset;
- etnaviv_domain_unmap(domain, da, bytes);
+ etnaviv_context_unmap(context, da, bytes);
da += bytes;
}
return ret;
}
-static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len)
{
- struct etnaviv_iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
int i;
@@ -115,7 +116,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
- etnaviv_domain_unmap(domain, da, bytes);
+ etnaviv_context_unmap(context, da, bytes);
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
@@ -125,24 +126,24 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
}
}
-static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
+static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
- etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
+ etnaviv_iommu_unmap(context, mapping->vram_node.start,
etnaviv_obj->sgt, etnaviv_obj->base.size);
drm_mm_remove_node(&mapping->vram_node);
}
-static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
+static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
struct drm_mm_node *node, size_t size)
{
struct etnaviv_vram_mapping *free = NULL;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
int ret;
- lockdep_assert_held(&mmu->lock);
+ lockdep_assert_held(&context->lock);
while (1) {
struct etnaviv_vram_mapping *m, *n;
@@ -150,17 +151,17 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct list_head list;
bool found;
- ret = drm_mm_insert_node_in_range(&mmu->mm, node,
+ ret = drm_mm_insert_node_in_range(&context->mm, node,
size, 0, 0, 0, U64_MAX, mode);
if (ret != -ENOSPC)
break;
/* Try to retire some entries */
- drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
+ drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
found = 0;
INIT_LIST_HEAD(&list);
- list_for_each_entry(free, &mmu->mappings, mmu_node) {
+ list_for_each_entry(free, &context->mappings, mmu_node) {
/* If this vram node has not been used, skip this. */
if (!free->vram_node.mm)
continue;
@@ -202,8 +203,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
* this mapping.
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
- etnaviv_iommu_remove_mapping(mmu, m);
- m->mmu = NULL;
+ etnaviv_iommu_remove_mapping(context, m);
+ m->context = NULL;
list_del_init(&m->mmu_node);
list_del_init(&m->scan_node);
}
@@ -219,9 +220,16 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
return ret;
}
-int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
+ struct drm_mm_node *node, size_t size, u64 va)
+{
+ return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
+ va + size, DRM_MM_INSERT_LOWEST);
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
- struct etnaviv_vram_mapping *mapping)
+ struct etnaviv_vram_mapping *mapping, u64 va)
{
struct sg_table *sgt = etnaviv_obj->sgt;
struct drm_mm_node *node;
@@ -229,17 +237,17 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
lockdep_assert_held(&etnaviv_obj->lock);
- mutex_lock(&mmu->lock);
+ mutex_lock(&context->lock);
/* v1 MMU can optimize single entry (contiguous) scatterlists */
- if (mmu->version == ETNAVIV_IOMMU_V1 &&
+ if (context->global->version == ETNAVIV_IOMMU_V1 &&
sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
u32 iova;
iova = sg_dma_address(sgt->sgl) - memory_base;
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
mapping->iova = iova;
- list_add_tail(&mapping->mmu_node, &mmu->mappings);
+ list_add_tail(&mapping->mmu_node, &context->mappings);
ret = 0;
goto unlock;
}
@@ -247,12 +255,17 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
node = &mapping->vram_node;
- ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
+ if (va)
+ ret = etnaviv_iommu_insert_exact(context, node,
+ etnaviv_obj->base.size, va);
+ else
+ ret = etnaviv_iommu_find_iova(context, node,
+ etnaviv_obj->base.size);
if (ret < 0)
goto unlock;
mapping->iova = node->start;
- ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
+ ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
if (ret < 0) {
@@ -260,130 +273,233 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
goto unlock;
}
- list_add_tail(&mapping->mmu_node, &mmu->mappings);
- mmu->need_flush = true;
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+ context->flush_seq++;
unlock:
- mutex_unlock(&mmu->lock);
+ mutex_unlock(&context->lock);
return ret;
}
-void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
WARN_ON(mapping->use);
- mutex_lock(&mmu->lock);
+ mutex_lock(&context->lock);
/* If the vram node is on the mm, unmap and remove the node */
- if (mapping->vram_node.mm == &mmu->mm)
- etnaviv_iommu_remove_mapping(mmu, mapping);
+ if (mapping->vram_node.mm == &context->mm)
+ etnaviv_iommu_remove_mapping(context, mapping);
list_del(&mapping->mmu_node);
- mmu->need_flush = true;
- mutex_unlock(&mmu->lock);
+ context->flush_seq++;
+ mutex_unlock(&context->lock);
}
-void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
+static void etnaviv_iommu_context_free(struct kref *kref)
{
- drm_mm_takedown(&mmu->mm);
- mmu->domain->ops->free(mmu->domain);
- kfree(mmu);
+ struct etnaviv_iommu_context *context =
+ container_of(kref, struct etnaviv_iommu_context, refcount);
+
+ etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
+
+ context->global->ops->free(context);
+}
+void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
+{
+ kref_put(&context->refcount, etnaviv_iommu_context_free);
}
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
+struct etnaviv_iommu_context *
+etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
+ struct etnaviv_cmdbuf_suballoc *suballoc)
{
- enum etnaviv_iommu_version version;
- struct etnaviv_iommu *mmu;
+ struct etnaviv_iommu_context *ctx;
+ int ret;
- mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
- if (!mmu)
- return ERR_PTR(-ENOMEM);
+ if (global->version == ETNAVIV_IOMMU_V1)
+ ctx = etnaviv_iommuv1_context_alloc(global);
+ else
+ ctx = etnaviv_iommuv2_context_alloc(global);
- if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
- mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
- version = ETNAVIV_IOMMU_V1;
- } else {
- mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
- version = ETNAVIV_IOMMU_V2;
- }
+ if (!ctx)
+ return NULL;
- if (!mmu->domain) {
- dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
- kfree(mmu);
- return ERR_PTR(-ENOMEM);
+ ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
+ global->memory_base);
+ if (ret) {
+ global->ops->free(ctx);
+ return NULL;
}
- mmu->gpu = gpu;
- mmu->version = version;
- mutex_init(&mmu->lock);
- INIT_LIST_HEAD(&mmu->mappings);
-
- drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
-
- return mmu;
+ return ctx;
}
-void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
- etnaviv_iommuv1_restore(gpu);
- else
- etnaviv_iommuv2_restore(gpu);
+ context->global->ops->restore(gpu, context);
}
-int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
- struct drm_mm_node *vram_node, size_t size,
- u32 *iova)
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base, dma_addr_t paddr,
+ size_t size)
{
- struct etnaviv_iommu *mmu = gpu->mmu;
+ mutex_lock(&context->lock);
- if (mmu->version == ETNAVIV_IOMMU_V1) {
- *iova = paddr - gpu->memory_base;
+ if (mapping->use > 0) {
+ mapping->use++;
+ mutex_unlock(&context->lock);
return 0;
+ }
+
+ /*
+ * For MMUv1 we don't add the suballoc region to the pagetables, as
+ * those GPUs can only work with cmdbufs accessed through the linear
+ * window. Instead we manufacture a mapping to make it look uniform
+ * to the upper layers.
+ */
+ if (context->global->version == ETNAVIV_IOMMU_V1) {
+ mapping->iova = paddr - memory_base;
} else {
+ struct drm_mm_node *node = &mapping->vram_node;
int ret;
- mutex_lock(&mmu->lock);
- ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
+ ret = etnaviv_iommu_find_iova(context, node, size);
if (ret < 0) {
- mutex_unlock(&mmu->lock);
+ mutex_unlock(&context->lock);
return ret;
}
- ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
- size, ETNAVIV_PROT_READ);
+
+ mapping->iova = node->start;
+ ret = etnaviv_context_map(context, node->start, paddr, size,
+ ETNAVIV_PROT_READ);
if (ret < 0) {
- drm_mm_remove_node(vram_node);
- mutex_unlock(&mmu->lock);
+ drm_mm_remove_node(node);
+ mutex_unlock(&context->lock);
return ret;
}
- gpu->mmu->need_flush = true;
- mutex_unlock(&mmu->lock);
- *iova = (u32)vram_node->start;
- return 0;
+ context->flush_seq++;
}
+
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+ mapping->use = 1;
+
+ mutex_unlock(&context->lock);
+
+ return 0;
}
-void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
- struct drm_mm_node *vram_node, size_t size,
- u32 iova)
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping)
{
- struct etnaviv_iommu *mmu = gpu->mmu;
+ struct drm_mm_node *node = &mapping->vram_node;
- if (mmu->version == ETNAVIV_IOMMU_V2) {
- mutex_lock(&mmu->lock);
- etnaviv_domain_unmap(mmu->domain, iova, size);
- drm_mm_remove_node(vram_node);
- mutex_unlock(&mmu->lock);
+ mutex_lock(&context->lock);
+ mapping->use--;
+
+ if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
+ mutex_unlock(&context->lock);
+ return;
}
+
+ etnaviv_context_unmap(context, node->start, node->size);
+ drm_mm_remove_node(node);
+ mutex_unlock(&context->lock);
+}
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
+{
+ return context->global->ops->dump_size(context);
+}
+
+void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
+{
+ context->global->ops->dump(context, buf);
}
-size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
+
+int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
{
- return iommu->domain->ops->dump_size(iommu->domain);
+ enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ struct etnaviv_iommu_global *global;
+ struct device *dev = gpu->drm->dev;
+
+ if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
+ version = ETNAVIV_IOMMU_V2;
+
+ if (priv->mmu_global) {
+ if (priv->mmu_global->version != version) {
+ dev_err(gpu->dev,
+ "MMU version doesn't match global version\n");
+ return -ENXIO;
+ }
+
+ priv->mmu_global->use++;
+ return 0;
+ }
+
+ global = kzalloc(sizeof(*global), GFP_KERNEL);
+ if (!global)
+ return -ENOMEM;
+
+ global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
+ GFP_KERNEL);
+ if (!global->bad_page_cpu)
+ goto free_global;
+
+ memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
+
+ if (version == ETNAVIV_IOMMU_V2) {
+ global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
+ &global->v2.pta_dma, GFP_KERNEL);
+ if (!global->v2.pta_cpu)
+ goto free_bad_page;
+ }
+
+ global->dev = dev;
+ global->version = version;
+ global->use = 1;
+ mutex_init(&global->lock);
+
+ if (version == ETNAVIV_IOMMU_V1)
+ global->ops = &etnaviv_iommuv1_ops;
+ else
+ global->ops = &etnaviv_iommuv2_ops;
+
+ priv->mmu_global = global;
+
+ return 0;
+
+free_bad_page:
+ dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
+free_global:
+ kfree(global);
+
+ return -ENOMEM;
}
-void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
+void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
{
- iommu->domain->ops->dump(iommu->domain, buf);
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ struct etnaviv_iommu_global *global = priv->mmu_global;
+
+ if (--global->use > 0)
+ return;
+
+ if (global->v2.pta_cpu)
+ dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
+ global->v2.pta_cpu, global->v2.pta_dma);
+
+ if (global->bad_page_cpu)
+ dma_free_wc(global->dev, SZ_4K,
+ global->bad_page_cpu, global->bad_page_dma);
+
+ mutex_destroy(&global->lock);
+ kfree(global);
+
+ priv->mmu_global = NULL;
}