aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
diff options
context:
space:
mode:
authorRalph Campbell <rcampbell@nvidia.com>2020-06-30 12:57:35 -0700
committerBen Skeggs <bskeggs@redhat.com>2020-07-24 18:50:50 +1000
commit7763d24f3ba0e5e30daf047cfa6e6bcfd9d7de75 (patch)
tree635eef7b60a45bc1aed22e4c2a6240ebc707c18f /drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
parentdrm/nouveau/mmu: make nvkm_vmm_ctor() static (diff)
downloadlinux-dev-7763d24f3ba0e5e30daf047cfa6e6bcfd9d7de75.tar.xz
linux-dev-7763d24f3ba0e5e30daf047cfa6e6bcfd9d7de75.zip
drm/nouveau/vmm/gp100-: fix mapping 2MB sysmem pages
The nvif_object_ioctl() method NVIF_VMM_V0_PFNMAP wasn't correctly setting the hardware specific GPU page table entries for 2MB sized pages. Fix this by adding functions to set and clear PD0 GPU page table entries. Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 67b00dcef4b8..710f3f8dc7c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1204,7 +1204,6 @@ nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
/*TODO:
* - Avoid PT readback (for dma_unmap etc), this might end up being dealt
* with inside HMM, which would be a lot nicer for us to deal with.
- * - Multiple page sizes (particularly for huge page support).
* - Support for systems without a 4KiB page size.
*/
int
@@ -1220,8 +1219,8 @@ nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
/* Only support mapping where the page size of the incoming page
* array matches a page size available for direct mapping.
*/
- while (page->shift && page->shift != shift &&
- page->desc->func->pfn == NULL)
+ while (page->shift && (page->shift != shift ||
+ page->desc->func->pfn == NULL))
page++;
if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||