aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c149
1 files changed, 80 insertions, 69 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b6dc85c614be..da5d631aa5b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -27,13 +27,9 @@
* Jeremy Kolb <jkolb@brandeis.edu>
*/
-#include <core/engine.h>
+#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-#include <subdev/bar.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -52,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct nouveau_fb_tile *tile = &pfb->tile.region[i];
struct nouveau_engine *engine;
@@ -109,7 +105,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 flags)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
@@ -153,23 +149,23 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int *align, int *size)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
- if (device->card_type < NV_50) {
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->tile_mode) {
- if (device->chipset >= 0x40) {
+ if (device->info.chipset >= 0x40) {
*align = 65536;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (device->chipset >= 0x30) {
+ } else if (device->info.chipset >= 0x30) {
*align = 32768;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (device->chipset >= 0x20) {
+ } else if (device->info.chipset >= 0x20) {
*align = 16384;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (device->chipset >= 0x10) {
+ } else if (device->info.chipset >= 0x10) {
*align = 16384;
*size = roundup(*size, 32 * nvbo->tile_mode);
}
@@ -196,12 +192,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
int lpg_shift = 12;
int max_size;
- if (drm->client.base.vm)
- lpg_shift = drm->client.base.vm->vmm->lpg_shift;
+ if (drm->client.vm)
+ lpg_shift = drm->client.vm->vmm->lpg_shift;
max_size = INT_MAX & ~((1 << lpg_shift) - 1);
if (size <= 0 || size > max_size) {
- nv_warn(drm, "skipped size %x\n", (u32)size);
+ NV_WARN(drm, "skipped size %x\n", (u32)size);
return -EINVAL;
}
@@ -219,9 +215,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->bo.bdev = &drm->ttm.bdev;
nvbo->page_shift = 12;
- if (drm->client.base.vm) {
+ if (drm->client.vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
- nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
+ nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
}
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
@@ -261,11 +257,9 @@ static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
- u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
+ u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
- if ((nv_device(drm->device)->card_type == NV_10 ||
- nv_device(drm->device)->card_type == NV_11) &&
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
@@ -309,7 +303,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
- ret = ttm_bo_reserve(bo, false, false, false, 0);
+ ret = ttm_bo_reserve(bo, false, false, false, NULL);
if (ret)
goto out;
@@ -350,7 +344,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret, ref;
- ret = ttm_bo_reserve(bo, false, false, false, 0);
+ ret = ttm_bo_reserve(bo, false, false, false, NULL);
if (ret)
return ret;
@@ -385,7 +379,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
{
int ret;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return ret;
@@ -500,21 +494,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
- if (nv_device(drm->device)->card_type >= NV_50) {
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ /* Some BARs do not support being ioremapped WC */
+ if (nvkm_bar(&drm->device)->iomap_uncached) {
+ man->available_caching = TTM_PL_FLAG_UNCACHED;
+ man->default_caching = TTM_PL_FLAG_UNCACHED;
+ }
+
man->func = &nouveau_vram_manager;
man->io_reserve_fastpath = false;
man->use_io_reserve_lru = true;
} else {
man->func = &ttm_bo_manager_func;
}
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
man->func = &nouveau_gart_manager;
else
if (drm->agp.stat != ENABLED)
@@ -763,9 +764,9 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
- OUT_RING (chan, NvNotify0);
- OUT_RING (chan, NvDmaFB);
- OUT_RING (chan, NvDmaFB);
+ OUT_RING (chan, chan->drm->ntfy.handle);
+ OUT_RING (chan, chan->vram.handle);
+ OUT_RING (chan, chan->vram.handle);
}
return ret;
@@ -852,7 +853,7 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
- OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, chan->drm->ntfy.handle);
}
return ret;
@@ -864,7 +865,7 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
{
if (mem->mem_type == TTM_PL_TT)
return NvDmaTT;
- return NvDmaFB;
+ return chan->vram.handle;
}
static int
@@ -922,12 +923,12 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
u64 size = (u64)mem->num_pages << PAGE_SHIFT;
int ret;
- ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift,
+ ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
NV_MEM_ACCESS_RW, &old_node->vma[0]);
if (ret)
return ret;
- ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift,
+ ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
NV_MEM_ACCESS_RW, &old_node->vma[1]);
if (ret) {
nouveau_vm_put(&old_node->vma[0]);
@@ -945,6 +946,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
+ struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
struct nouveau_fence *fence;
int ret;
@@ -952,13 +954,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
* old nouveau_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/
- if (nv_device(drm->device)->card_type >= NV_50) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_move_prep(drm, bo, new_mem);
if (ret)
return ret;
}
- mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(bo->sync_obj, chan);
if (ret == 0) {
ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
@@ -973,7 +975,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
}
}
}
- mutex_unlock(&chan->cli->mutex);
+ mutex_unlock(&cli->mutex);
return ret;
}
@@ -1005,9 +1007,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
int ret;
do {
- struct nouveau_object *object;
struct nouveau_channel *chan;
- u32 handle = (mthd->engine << 16) | mthd->oclass;
if (mthd->engine)
chan = drm->cechan;
@@ -1016,13 +1016,14 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
if (chan == NULL)
continue;
- ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
- mthd->oclass, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL,
+ mthd->oclass | (mthd->engine << 16),
+ mthd->oclass, NULL, 0,
+ &drm->ttm.copy);
if (ret == 0) {
- ret = mthd->init(chan, handle);
+ ret = mthd->init(chan, drm->ttm.copy.handle);
if (ret) {
- nouveau_object_del(nv_object(drm),
- chan->handle, handle);
+ nvif_object_fini(&drm->ttm.copy);
continue;
}
@@ -1135,7 +1136,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
@@ -1166,7 +1167,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
if (ret)
return ret;
@@ -1203,7 +1204,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
out:
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (ret)
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
else
@@ -1249,16 +1250,16 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
- if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
/* untiled */
break;
/* fallthrough, tiled memory */
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1);
+ mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
mem->bus.is_iomem = true;
- if (nv_device(drm->device)->card_type >= NV_50) {
- struct nouveau_bar *bar = nouveau_bar(drm->device);
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ struct nouveau_bar *bar = nvkm_bar(&drm->device);
ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
&node->bar_vma);
@@ -1278,7 +1279,7 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nouveau_bar *bar = nouveau_bar(drm->device);
+ struct nouveau_bar *bar = nvkm_bar(&drm->device);
struct nouveau_mem *node = mem->mm_node;
if (!node->bar_vma.node)
@@ -1292,15 +1293,15 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_device *device = nv_device(drm->device);
- u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT;
+ struct nvif_device *device = &drm->device;
+ u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
int ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
- if (nv_device(drm->device)->card_type < NV_50 ||
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
!nouveau_bo_tile_layout(nvbo))
return 0;
@@ -1315,7 +1316,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
}
/* make sure bo is in mappable vram */
- if (nv_device(drm->device)->card_type >= NV_50 ||
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
bo->mem.start + bo->mem.num_pages < mappable)
return 0;
@@ -1333,6 +1334,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
struct nouveau_drm *drm;
struct nouveau_device *device;
struct drm_device *dev;
+ struct device *pdev;
unsigned i;
int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1349,8 +1351,9 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
drm = nouveau_bdev(ttm->bdev);
- device = nv_device(drm->device);
+ device = nvkm_device(&drm->device);
dev = drm->dev;
+ pdev = nv_device_base(device);
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
@@ -1370,17 +1373,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
for (i = 0; i < ttm->num_pages; i++) {
- ttm_dma->dma_address[i] = nv_device_map_page(device,
- ttm->pages[i]);
- if (!ttm_dma->dma_address[i]) {
+ dma_addr_t addr;
+
+ addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(pdev, addr)) {
while (--i) {
- nv_device_unmap_page(device,
- ttm_dma->dma_address[i]);
+ dma_unmap_page(pdev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
ttm_dma->dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
+
+ ttm_dma->dma_address[i] = addr;
}
return 0;
}
@@ -1392,6 +1400,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
struct nouveau_drm *drm;
struct nouveau_device *device;
struct drm_device *dev;
+ struct device *pdev;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1399,8 +1408,9 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
drm = nouveau_bdev(ttm->bdev);
- device = nv_device(drm->device);
+ device = nvkm_device(&drm->device);
dev = drm->dev;
+ pdev = nv_device_base(device);
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
@@ -1418,7 +1428,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
if (ttm_dma->dma_address[i]) {
- nv_device_unmap_page(device, ttm_dma->dma_address[i]);
+ dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
}
}