aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_ttm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_ttm.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c66
1 files changed, 33 insertions, 33 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 3d1cfcb96b6b..273e50110ec3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -33,7 +33,7 @@ static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nvkm_fb(&drm->device);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
man->priv = pfb;
return 0;
}
@@ -46,16 +46,16 @@ nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
}
static inline void
-nouveau_mem_node_cleanup(struct nouveau_mem *node)
+nvkm_mem_node_cleanup(struct nvkm_mem *node)
{
if (node->vma[0].node) {
- nouveau_vm_unmap(&node->vma[0]);
- nouveau_vm_put(&node->vma[0]);
+ nvkm_vm_unmap(&node->vma[0]);
+ nvkm_vm_put(&node->vma[0]);
}
if (node->vma[1].node) {
- nouveau_vm_unmap(&node->vma[1]);
- nouveau_vm_put(&node->vma[1]);
+ nvkm_vm_unmap(&node->vma[1]);
+ nvkm_vm_put(&node->vma[1]);
}
}
@@ -64,9 +64,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nvkm_fb(&drm->device);
- nouveau_mem_node_cleanup(mem->mm_node);
- pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+ nvkm_mem_node_cleanup(mem->mm_node);
+ pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node);
}
static int
@@ -76,9 +76,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nvkm_fb(&drm->device);
+ struct nvkm_fb *pfb = nvxx_fb(&drm->device);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_mem *node;
+ struct nvkm_mem *node;
u32 size_nc = 0;
int ret;
@@ -103,9 +103,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
static void
nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
- struct nouveau_fb *pfb = man->priv;
- struct nouveau_mm *mm = &pfb->vram;
- struct nouveau_mm_node *r;
+ struct nvkm_fb *pfb = man->priv;
+ struct nvkm_mm *mm = &pfb->vram;
+ struct nvkm_mm_node *r;
u32 total = 0, free = 0;
mutex_lock(&nv_subdev(pfb)->mutex);
@@ -150,7 +150,7 @@ static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- nouveau_mem_node_cleanup(mem->mm_node);
+ nvkm_mem_node_cleanup(mem->mm_node);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
@@ -163,7 +163,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_mem *node;
+ struct nvkm_mem *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
@@ -203,15 +203,15 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
};
/*XXX*/
-#include <core/subdev/vm/nv04.h>
+#include <subdev/mmu/nv04.h>
static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_vmmgr *vmm = nvkm_vmmgr(&drm->device);
- struct nv04_vmmgr_priv *priv = (void *)vmm;
- struct nouveau_vm *vm = NULL;
- nouveau_vm_ref(priv->vm, &vm, NULL);
+ struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
+ struct nv04_mmu_priv *priv = (void *)mmu;
+ struct nvkm_vm *vm = NULL;
+ nvkm_vm_ref(priv->vm, &vm, NULL);
man->priv = vm;
return 0;
}
@@ -219,8 +219,8 @@ nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
static int
nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
{
- struct nouveau_vm *vm = man->priv;
- nouveau_vm_ref(NULL, &vm, NULL);
+ struct nvkm_vm *vm = man->priv;
+ nvkm_vm_ref(NULL, &vm, NULL);
man->priv = NULL;
return 0;
}
@@ -228,9 +228,9 @@ nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
static void
nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node = mem->mm_node;
+ struct nvkm_mem *node = mem->mm_node;
if (node->vma[0].node)
- nouveau_vm_put(&node->vma[0]);
+ nvkm_vm_put(&node->vma[0]);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
@@ -241,7 +241,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node;
+ struct nvkm_mem *node;
int ret;
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -250,8 +250,8 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
- ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
- NV_MEM_ACCESS_RW, &node->vma[0]);
+ ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
+ NV_MEM_ACCESS_RW, &node->vma[0]);
if (ret) {
kfree(node);
return ret;
@@ -354,8 +354,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
u32 bits;
int ret;
- bits = nvkm_vmmgr(&drm->device)->dma_bits;
- if (nv_device_is_pci(nvkm_device(&drm->device))) {
+ bits = nvxx_mmu(&drm->device)->dma_bits;
+ if (nv_device_is_pci(nvxx_device(&drm->device))) {
if (drm->agp.stat == ENABLED ||
!pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
bits = 32;
@@ -396,12 +396,12 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
- drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvkm_device(&drm->device), 1),
- nv_device_resource_len(nvkm_device(&drm->device), 1));
+ drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1),
+ nv_device_resource_len(nvxx_device(&drm->device), 1));
/* GART init */
if (drm->agp.stat != ENABLED) {
- drm->gem.gart_available = nvkm_vmmgr(&drm->device)->limit;
+ drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
} else {
drm->gem.gart_available = drm->agp.size;
}