aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2020-12-02 16:40:28 +1000
committerBen Skeggs <bskeggs@redhat.com>2021-02-11 10:14:09 +1000
commite5bf9a5ce5da32792a4dc1eafebe89d349cbbf27 (patch)
tree175b6082a6f6d3a80a4f6e0540145b9e6b19eeca
parentdrm/nouveau/fb: protect vram mm with private mutex (diff)
downloadlinux-dev-e5bf9a5ce5da32792a4dc1eafebe89d349cbbf27.tar.xz
linux-dev-e5bf9a5ce5da32792a4dc1eafebe89d349cbbf27.zip
drm/nouveau/instmem: protect mm/lru with private mutex
nvkm_subdev.mutex is going away. Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Reviewed-by: Lyude Paul <lyude@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c30
5 files changed, 35 insertions, 29 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index c74ab7c31d05..4090d4795f74 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -13,6 +13,11 @@ struct nvkm_instmem {
struct list_head boot;
u32 reserved;
+ /* <=nv4x: protects NV_PRAMIN/BAR2 MM
+ * >=nv50: protects BAR2 MM & LRU
+ */
+ struct mutex mutex;
+
struct nvkm_memory *vbios;
struct nvkm_ramht *ramht;
struct nvkm_memory *ramro;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 364ea4492acc..33566566435a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -218,9 +218,11 @@ static void *
nvkm_instmem_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
+ void *data = imem;
if (imem->func->dtor)
- return imem->func->dtor(imem);
- return imem;
+ data = imem->func->dtor(imem);
+ mutex_destroy(&imem->mutex);
+ return data;
}
static const struct nvkm_subdev_func
@@ -241,4 +243,5 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
spin_lock_init(&imem->lock);
INIT_LIST_HEAD(&imem->list);
INIT_LIST_HEAD(&imem->boot);
+ mutex_init(&imem->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 6bf0dad46919..0133f98467bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -99,9 +99,9 @@ static void *
nv04_instobj_dtor(struct nvkm_memory *memory)
{
struct nv04_instobj *iobj = nv04_instobj(memory);
- mutex_lock(&iobj->imem->base.subdev.mutex);
+ mutex_lock(&iobj->imem->base.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
- mutex_unlock(&iobj->imem->base.subdev.mutex);
+ mutex_unlock(&iobj->imem->base.mutex);
nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
@@ -132,10 +132,9 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
iobj->base.memory.ptrs = &nv04_instobj_ptrs;
iobj->imem = imem;
- mutex_lock(&imem->base.subdev.mutex);
- ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
- align ? align : 1, &iobj->node);
- mutex_unlock(&imem->base.subdev.mutex);
+ mutex_lock(&imem->base.mutex);
+ ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
+ mutex_unlock(&imem->base.mutex);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index 086c118488ef..3df5bb169258 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -99,9 +99,9 @@ static void *
nv40_instobj_dtor(struct nvkm_memory *memory)
{
struct nv40_instobj *iobj = nv40_instobj(memory);
- mutex_lock(&iobj->imem->base.subdev.mutex);
+ mutex_lock(&iobj->imem->base.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
- mutex_unlock(&iobj->imem->base.subdev.mutex);
+ mutex_unlock(&iobj->imem->base.mutex);
nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
@@ -132,10 +132,9 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
iobj->base.memory.ptrs = &nv40_instobj_ptrs;
iobj->imem = imem;
- mutex_lock(&imem->base.subdev.mutex);
- ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
- align ? align : 1, &iobj->node);
- mutex_unlock(&imem->base.subdev.mutex);
+ mutex_lock(&imem->base.mutex);
+ ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
+ mutex_unlock(&imem->base.mutex);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 02c4eb28cef4..44c567036c2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -133,12 +133,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
* into it. The lock has to be dropped while doing this due
* to the possibility of recursion for page table allocation.
*/
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
/* Evict unused mappings, and keep retrying until we either
* succeed,or there's no more objects left on the LRU.
*/
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
if (eobj) {
nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
@@ -151,7 +151,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
emap = eobj->map;
eobj->map = NULL;
}
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
if (!eobj)
break;
iounmap(emap);
@@ -160,12 +160,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
if (ret == 0)
ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
if (ret || iobj->bar) {
/* We either failed, or another thread beat us. */
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
nvkm_vmm_put(vmm, &bar);
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
return;
}
@@ -197,7 +197,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
wmb();
nvkm_bar_flush(subdev->device->bar);
- if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
+ if (refcount_dec_and_mutex_lock(&iobj->maps, &imem->base.mutex)) {
/* Add the now-unused mapping to the LRU instead of directly
* unmapping it here, in case we need to map it again later.
*/
@@ -208,7 +208,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
/* Switch back to NULL accessors when last map is gone. */
iobj->base.memory.ptrs = NULL;
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
}
}
@@ -227,9 +227,9 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
*/
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (refcount_inc_not_zero(&iobj->maps)) {
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
return iobj->map;
}
@@ -252,7 +252,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
refcount_set(&iobj->maps, 1);
}
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
return map;
}
@@ -265,7 +265,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
/* Exclude bootstrapped objects (ie. the page tables for the
* instmem BAR itself) from eviction.
*/
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (likely(iobj->lru.next)) {
list_del_init(&iobj->lru);
iobj->lru.next = NULL;
@@ -273,7 +273,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
nv50_instobj_kmap(iobj, vmm);
nvkm_instmem_boot(imem);
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
}
static u64
@@ -315,12 +315,12 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
struct nvkm_vma *bar;
void *map = map;
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (likely(iobj->lru.next))
list_del(&iobj->lru);
map = iobj->map;
bar = iobj->bar;
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
if (map) {
struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);