aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2017-11-01 03:56:19 +1000
committerBen Skeggs <bskeggs@redhat.com>2017-11-02 13:32:19 +1000
commitaf515ec8d3fbd8376513eee9648a52d5ab92bbac (patch)
tree3a1a79d68c329521a8c43349254ecef4565e90b8 /drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
parentdrm/nouveau/imem/nv50: split object map out from api functions (diff)
downloadlinux-dev-af515ec8d3fbd8376513eee9648a52d5ab92bbac.tar.xz
linux-dev-af515ec8d3fbd8376513eee9648a52d5ab92bbac.zip
drm/nouveau/imem/nv50: move slow-path locking into rd/wr functions
This is to simplify upcoming changes. The slow-path is something that currently occurs during bootstrap of the BAR2 VMM, while backing up an object during suspend/resume, or when BAR2 address space runs out. The latter is a real problem that can happen at runtime, and occurs in Fedora 26 already (due to some change that causes a lot of channels to be created at login), so ideally we'd prefer not to make it any slower. We'd also like suspend/resume speed to not suffer. Upcoming commits will solve those problems in a better way, making the extra overhead of moving the locking here a non-issue. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 95b2c560fe4b..d0159d5876f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -31,8 +31,6 @@
struct nv50_instmem {
struct nvkm_instmem base;
- unsigned long lock_flags;
- spinlock_t lock;
u64 addr;
};
@@ -57,12 +55,15 @@ nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
struct nvkm_device *device = imem->base.subdev.device;
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
+ unsigned long flags;
+ spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base;
}
nvkm_wr32(device, 0x700000 + addr, data);
+ spin_unlock_irqrestore(&imem->base.lock, flags);
}
static u32
@@ -74,12 +75,15 @@ nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
u32 data;
+ unsigned long flags;
+ spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base;
}
data = nvkm_rd32(device, 0x700000 + addr);
+ spin_unlock_irqrestore(&imem->base.lock, flags);
return data;
}
@@ -127,8 +131,6 @@ nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
static void
nv50_instobj_release(struct nvkm_memory *memory)
{
- struct nv50_instmem *imem = nv50_instobj(memory)->imem;
- spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
}
static void __iomem *
@@ -137,15 +139,12 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
struct nv50_instobj *iobj = nv50_instobj(memory);
struct nv50_instmem *imem = iobj->imem;
struct nvkm_vm *vm;
- unsigned long flags;
if (!iobj->map && (vm = nvkm_bar_bar2_vmm(imem->base.subdev.device)))
nv50_instobj_kmap(iobj, vm);
if (!IS_ERR_OR_NULL(iobj->map))
return iobj->map;
- spin_lock_irqsave(&imem->lock, flags);
- imem->lock_flags = flags;
return NULL;
}
@@ -254,7 +253,6 @@ nv50_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
- spin_lock_init(&imem->lock);
*pimem = &imem->base;
return 0;
}