aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_ttm.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2012-07-19 17:54:21 +1000
committerBen Skeggs <bskeggs@redhat.com>2012-10-03 13:12:54 +1000
commitbc9e7b9a61e9e92ddb58920cb2cb5c2e2825ca8a (patch)
treed7384189e54e2d8fa4630c05e2abc1ac7c7900e5 /drivers/gpu/drm/nouveau/nouveau_ttm.c
parentdrm/nvc0-nve0/graph: rename dev to priv, no code changes (diff)
downloadlinux-dev-bc9e7b9a61e9e92ddb58920cb2cb5c2e2825ca8a.tar.xz
linux-dev-bc9e7b9a61e9e92ddb58920cb2cb5c2e2825ca8a.zip
drm/nouveau: move some more code around to more appropriate places
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_ttm.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c229
1 files changed, 228 insertions, 1 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index bd35f930568c..e729535e9b26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,6 +28,234 @@
#include "nouveau_drv.h"
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static int
+nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static inline void
+nouveau_mem_node_cleanup(struct nouveau_mem *node)
+{
+ if (node->vma[0].node) {
+ nouveau_vm_unmap(&node->vma[0]);
+ nouveau_vm_put(&node->vma[0]);
+ }
+
+ if (node->vma[1].node) {
+ nouveau_vm_unmap(&node->vma[1]);
+ nouveau_vm_put(&node->vma[1]);
+ }
+}
+
+static void
+nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ nouveau_mem_node_cleanup(mem->mm_node);
+ nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node);
+}
+
+static int
+nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct drm_device *dev = dev_priv->dev;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_mem *node;
+ u32 size_nc = 0;
+ int ret;
+
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
+ size_nc = 1 << nvbo->page_shift;
+
+ ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT,
+ mem->page_alignment << PAGE_SHIFT, size_nc,
+ (nvbo->tile_flags >> 8) & 0x3ff, &node);
+ if (ret) {
+ mem->mm_node = NULL;
+ return (ret == -ENOSPC) ? 0 : ret;
+ }
+
+ node->page_shift = nvbo->page_shift;
+
+ mem->mm_node = node;
+ mem->start = node->offset >> PAGE_SHIFT;
+ return 0;
+}
+
+void
+nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ u32 total = 0, free = 0;
+
+ mutex_lock(&mm->mutex);
+ list_for_each_entry(r, &mm->nodes, nl_entry) {
+ printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
+ prefix, r->type, ((u64)r->offset << 12),
+ (((u64)r->offset + r->length) << 12));
+
+ total += r->length;
+ if (!r->type)
+ free += r->length;
+ }
+ mutex_unlock(&mm->mutex);
+
+ printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
+ prefix, (u64)total << 12, (u64)free << 12);
+ printk(KERN_DEBUG "%s block: 0x%08x\n",
+ prefix, mm->block_size << 12);
+}
+
+const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+ nouveau_vram_manager_init,
+ nouveau_vram_manager_fini,
+ nouveau_vram_manager_new,
+ nouveau_vram_manager_del,
+ nouveau_vram_manager_debug
+};
+
+static int
+nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+ return 0;
+}
+
+static int
+nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+ return 0;
+}
+
+static void
+nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ nouveau_mem_node_cleanup(mem->mm_node);
+ kfree(mem->mm_node);
+ mem->mm_node = NULL;
+}
+
+static int
+nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *node;
+
+ if (unlikely((mem->num_pages << PAGE_SHIFT) >=
+ dev_priv->gart_info.aper_size))
+ return -ENOMEM;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+ node->page_shift = 12;
+
+ mem->mm_node = node;
+ mem->start = 0;
+ return 0;
+}
+
+void
+nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nouveau_gart_manager = {
+ nouveau_gart_manager_init,
+ nouveau_gart_manager_fini,
+ nouveau_gart_manager_new,
+ nouveau_gart_manager_del,
+ nouveau_gart_manager_debug
+};
+
+static int
+nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct drm_device *dev = dev_priv->dev;
+ man->priv = nv04vm_ref(dev);
+ return (man->priv != NULL) ? 0 : -ENODEV;
+}
+
+static int
+nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+ struct nouveau_vm *vm = man->priv;
+ nouveau_vm_ref(NULL, &vm, NULL);
+ man->priv = NULL;
+ return 0;
+}
+
+static void
+nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
+{
+ struct nouveau_mem *node = mem->mm_node;
+ if (node->vma[0].node)
+ nouveau_vm_put(&node->vma[0]);
+ kfree(mem->mm_node);
+ mem->mm_node = NULL;
+}
+
+static int
+nv04_gart_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct nouveau_mem *node;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->page_shift = 12;
+
+ ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
+ NV_MEM_ACCESS_RW, &node->vma[0]);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+
+ mem->mm_node = node;
+ mem->start = node->vma[0].offset >> PAGE_SHIFT;
+ return 0;
+}
+
+void
+nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nv04_gart_manager = {
+ nv04_gart_manager_init,
+ nv04_gart_manager_fini,
+ nv04_gart_manager_new,
+ nv04_gart_manager_del,
+ nv04_gart_manager_debug
+};
+
int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
@@ -100,4 +328,3 @@ nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
dev_priv->ttm.mem_global_ref.release = NULL;
}
-