aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c83
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c6
20 files changed, 149 insertions, 173 deletions
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 1c9c0cdf85db..f949767698fc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1617,7 +1617,9 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
mstm->mgr.cbs = &nv50_mstm;
ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
- max_payloads, conn_base_id);
+ max_payloads, outp->dcb->dpconf.link_nr,
+ drm_dp_bw_code_to_link_rate(outp->dcb->dpconf.link_bw),
+ conn_base_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0cb1f9d848d3..8d048bacd6f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -561,7 +561,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.handle[0] = ctxdma->object.handle;
}
- asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
+ asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
asyw->image.offset[0] = nvbo->offset;
if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 0a9334deffe2..b45ec3086285 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -312,7 +312,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
else
- if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
+ if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index cf11febf60c0..4f3a5357dd56 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -302,7 +302,6 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
int ret;
- nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
@@ -364,12 +363,12 @@ static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
+ u64 vram_size = drm->client.device.info.ram_size;
unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
- nvbo->bo.mem.num_pages < vram_pages / 4) {
+ nvbo->bo.base.size < vram_size / 4) {
/*
* Make sure that the color and depth buffers are handled
* by independent memory controller units. Up to a 9x
@@ -377,11 +376,11 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
* at the same time.
*/
if (nvbo->zeta) {
- fpfn = vram_pages / 2;
+ fpfn = (vram_size / 2) >> PAGE_SHIFT;
lpfn = ~0;
} else {
fpfn = 0;
- lpfn = vram_pages / 2;
+ lpfn = (vram_size / 2) >> PAGE_SHIFT;
}
for (i = 0; i < nvbo->placement.num_placement; ++i) {
nvbo->placements[i].fpfn = fpfn;
@@ -434,7 +433,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
if (nvbo->bo.pin_count) {
bool error = evict;
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
break;
@@ -448,7 +447,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo,
- bo->mem.mem_type, domain);
+ bo->resource->mem_type, domain);
ret = -EBUSY;
}
ttm_bo_pin(&nvbo->bo);
@@ -469,7 +468,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
ttm_bo_pin(&nvbo->bo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available -= bo->base.size;
break;
@@ -500,7 +499,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
ttm_bo_unpin(&nvbo->bo);
if (!nvbo->bo.pin_count) {
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available += bo->base.size;
break;
@@ -525,7 +524,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
@@ -739,7 +738,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
NOUVEAU_GEM_DOMAIN_CPU);
@@ -756,7 +755,7 @@ static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_resource *reg)
{
- struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
+ struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
struct nouveau_mem *new_mem = nouveau_mem(reg);
struct nvif_vmm *vmm = &drm->client.vmm.vmm;
int ret;
@@ -811,7 +810,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
- ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
+ ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
@@ -920,12 +919,8 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
}
}
- if (new_reg) {
- if (new_reg->mm_node)
- nvbo->offset = (new_reg->start << PAGE_SHIFT);
- else
- nvbo->offset = 0;
- }
+ if (new_reg)
+ nvbo->offset = (new_reg->start << PAGE_SHIFT);
}
@@ -957,7 +952,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
+ struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -971,7 +966,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct ttm_resource *old_reg = &bo->mem;
+ struct ttm_resource *old_reg = bo->resource;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
@@ -1011,7 +1006,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_reg->mem_type == TTM_PL_TT &&
new_reg->mem_type == TTM_PL_SYSTEM) {
nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_reg);
goto out;
}
@@ -1047,20 +1042,11 @@ out:
}
out_ntfy:
if (ret) {
- nouveau_bo_move_ntfy(bo, &bo->mem);
+ nouveau_bo_move_ntfy(bo, bo->resource);
}
return ret;
}
-static int
-nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct nouveau_bo *nvbo = nouveau_bo(bo);
-
- return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
- filp->private_data);
-}
-
static void
nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
struct ttm_resource *reg)
@@ -1181,7 +1167,7 @@ out:
list_del_init(&nvbo->io_reserve_lru);
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
- nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
+ nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
goto retry;
}
@@ -1211,12 +1197,12 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
- if (bo->mem.mem_type != TTM_PL_VRAM) {
+ if (bo->resource->mem_type != TTM_PL_VRAM) {
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
!nvbo->kind)
return 0;
- if (bo->mem.mem_type != TTM_PL_SYSTEM)
+ if (bo->resource->mem_type != TTM_PL_SYSTEM)
return 0;
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
@@ -1224,7 +1210,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} else {
/* make sure bo is in mappable vram */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
- bo->mem.start + bo->mem.num_pages < mappable)
+ bo->resource->start + bo->resource->num_pages < mappable)
return 0;
for (i = 0; i < nvbo->placement.num_placement; ++i) {
@@ -1333,7 +1319,6 @@ struct ttm_device_funcs nouveau_bo_driver = {
.evict_flags = nouveau_bo_evict_flags,
.delete_mem_notify = nouveau_bo_delete_mem_notify,
.move = nouveau_bo_move,
- .verify_access = nouveau_bo_verify_access,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 7cfac265fd45..40362600eed2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -212,7 +212,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
} else
- if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
+ if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index eb844cdcaec2..22b83a6577eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -402,7 +402,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
if (nv_connector->aux.transfer) {
drm_dp_cec_unregister_connector(&nv_connector->aux);
- drm_dp_aux_unregister(&nv_connector->aux);
kfree(nv_connector->aux.name);
}
kfree(connector);
@@ -462,7 +461,8 @@ nouveau_connector_of_detect(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
- struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct device_node *cn, *dn = pci_device_to_OF_node(pdev);
if (!dn ||
!((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) ||
@@ -906,13 +906,29 @@ nouveau_connector_late_register(struct drm_connector *connector)
int ret;
ret = nouveau_backlight_init(connector);
+ if (ret)
+ return ret;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ ret = drm_dp_aux_register(&nouveau_connector(connector)->aux);
+ if (ret)
+ goto backlight_fini;
+ }
+
+ return 0;
+backlight_fini:
+ nouveau_backlight_fini(connector);
return ret;
}
static void
nouveau_connector_early_unregister(struct drm_connector *connector)
{
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
+ drm_dp_aux_unregister(&nouveau_connector(connector)->aux);
+
nouveau_backlight_fini(connector);
}
@@ -1340,18 +1356,19 @@ nouveau_connector_create(struct drm_device *dev,
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
nv_connector->aux.dev = connector->kdev;
+ nv_connector->aux.drm_dev = dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
dcbe->hasht, dcbe->hashm);
nv_connector->aux.name = kstrdup(aux_name, GFP_KERNEL);
- ret = drm_dp_aux_register(&nv_connector->aux);
+ drm_dp_aux_init(&nv_connector->aux);
if (ret) {
- NV_ERROR(drm, "failed to register aux channel\n");
+ NV_ERROR(drm, "Failed to init AUX adapter for sor-%04x-%04x: %d\n",
+ dcbe->hasht, dcbe->hashm, ret);
kfree(nv_connector);
return ERR_PTR(ret);
}
- funcs = &nouveau_connector_funcs;
- break;
+ fallthrough;
default:
funcs = &nouveau_connector_funcs;
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index dac02c7be54d..929de41c281f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -697,7 +697,6 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.allow_fb_modifiers = true;
if (drm->client.device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
@@ -838,21 +837,3 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
drm_gem_object_put(&bo->bo.base);
return ret;
}
-
-int
-nouveau_display_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *poffset)
-{
- struct drm_gem_object *gem;
-
- gem = drm_gem_object_lookup(file_priv, handle);
- if (gem) {
- struct nouveau_bo *bo = nouveau_gem_object(gem);
- *poffset = drm_vma_node_offset_addr(&bo->bo.base.vma_node);
- drm_gem_object_put(gem);
- return 0;
- }
-
- return -ENOENT;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 616c43427059..2ab2ddb1eadf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -58,8 +58,6 @@ bool nouveau_display_scanoutpos(struct drm_crtc *crtc,
int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *args);
-int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
- u32 handle, u64 *offset);
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 885815ea917f..a616cf4573b8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -30,7 +30,9 @@
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_vblank.h>
@@ -736,7 +738,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
nvkm_device_del(&device);
/* Remove conflicting drivers (vesafb, efifb etc). */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
if (ret)
return ret;
@@ -1177,7 +1179,7 @@ nouveau_driver_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = nouveau_drm_ioctl,
- .mmap = nouveau_ttm_mmap,
+ .mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#if defined(CONFIG_COMPAT)
@@ -1210,9 +1212,10 @@ driver_stub = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.dumb_create = nouveau_display_dumb_create,
- .dumb_map_offset = nouveau_display_dumb_map_offset,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 4fc0fa696461..4f9b3aa5deda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -378,11 +378,11 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = nvbo->bo.mem.bus.offset;
- info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ info->fix.smem_start = nvbo->bo.resource->bus.offset;
+ info->fix.smem_len = nvbo->bo.base.size;
info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
- info->screen_size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ info->screen_size = nvbo->bo.base.size;
drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index e5dcbf67de7e..6b43918035df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -355,8 +355,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
return ret;
}
- fobj = dma_resv_get_list(resv);
- fence = dma_resv_get_excl(resv);
+ fobj = dma_resv_shared_list(resv);
+ fence = dma_resv_excl_fence(resv);
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
struct nouveau_channel *prev = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c88cbb85f101..5b27845075a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -39,6 +39,40 @@
#include <nvif/class.h>
#include <nvif/push206e.h>
+static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ pgprot_t prot;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ttm_fault_reserve_notify(bo);
+ if (ret)
+ goto error_unlock;
+
+ nouveau_bo_del_io_reserve_lru(bo);
+ prot = vm_get_page_prot(vma->vm_flags);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+ nouveau_bo_add_io_reserve_lru(bo);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+error_unlock:
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+static const struct vm_operations_struct nouveau_ttm_vm_ops = {
+ .fault = nouveau_ttm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
@@ -180,6 +214,8 @@ const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
.get_sg_table = nouveau_gem_prime_get_sg_table,
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .vm_ops = &nouveau_ttm_vm_ops,
};
int
@@ -240,7 +276,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
if (is_power_of_2(nvbo->valid_domains))
rep->domain = nvbo->valid_domains;
- else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
@@ -253,7 +289,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
rep->offset = vma->addr;
}
- rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ rep->size = nvbo->bo.base.size;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
rep->tile_mode = nvbo->mode;
rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
@@ -311,11 +347,11 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
- bo->mem.mem_type == TTM_PL_VRAM)
+ bo->resource->mem_type == TTM_PL_VRAM)
pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
- bo->mem.mem_type == TTM_PL_TT)
+ bo->resource->mem_type == TTM_PL_TT)
pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
@@ -525,13 +561,13 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->offset == b->presumed.offset &&
- ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+ ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
- (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ (nvbo->bo.resource->mem_type == TTM_PL_TT &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ if (nvbo->bo.resource->mem_type == TTM_PL_TT)
b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
@@ -638,14 +674,14 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
if (unlikely(r->reloc_bo_offset + 4 >
- nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
+ nvbo->bo.base.size)) {
NV_PRINTK(err, cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
if (!nvbo->kmap.virtual) {
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
&nvbo->kmap);
if (ret) {
NV_PRINTK(err, cli, "failed kmap for reloc\n");
@@ -834,7 +870,7 @@ revalidate:
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0,
- nvbo->bo.mem.
+ nvbo->bo.resource->
num_pages,
&nvbo->kmap);
if (ret) {
@@ -928,8 +964,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
- no_wait ? 0 : 30 * HZ);
+ lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
+ no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
else if (lret > 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index a1049e9feee1..0de6549fb875 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -178,25 +178,24 @@ void
nouveau_mem_del(struct ttm_resource *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
- if (!mem)
- return;
+
nouveau_mem_fini(mem);
- kfree(reg->mm_node);
- reg->mm_node = NULL;
+ kfree(mem);
}
int
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_mem *mem;
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
+
mem->cli = cli;
mem->kind = kind;
mem->comp = comp;
- reg->mm_node = mem;
+ *res = &mem->base;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 7df3848e85aa..2c01166a90f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -6,13 +6,8 @@ struct ttm_tt;
#include <nvif/mem.h>
#include <nvif/vmm.h>
-static inline struct nouveau_mem *
-nouveau_mem(struct ttm_resource *reg)
-{
- return reg->mm_node;
-}
-
struct nouveau_mem {
+ struct ttm_resource base;
struct nouveau_cli *cli;
u8 kind;
u8 comp;
@@ -20,8 +15,14 @@ struct nouveau_mem {
struct nvif_vma vma[2];
};
+static inline struct nouveau_mem *
+nouveau_mem(struct ttm_resource *reg)
+{
+ return container_of(reg, struct nouveau_mem, base);
+}
+
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
- struct ttm_resource *);
+ struct ttm_resource **);
void nouveau_mem_del(struct ttm_resource *);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index e8b506a6685b..f4c2e46b6fe1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -26,13 +26,13 @@
#include <linux/limits.h>
#include <linux/swiotlb.h>
+#include <drm/ttm/ttm_range_manager.h>
+
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h"
-#include <drm/drm_legacy.h>
-
#include <core/tegra.h>
static void
@@ -45,7 +45,7 @@ static int
nouveau_vram_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -54,13 +54,15 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
- ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
+ ttm_resource_init(bo, place, *res);
+
+ ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
if (ret) {
- nouveau_mem_del(reg);
+ nouveau_mem_del(*res);
return ret;
}
@@ -76,17 +78,18 @@ static int
nouveau_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
- reg->start = 0;
+ ttm_resource_init(bo, place, *res);
+ (*res)->start = 0;
return 0;
}
@@ -99,26 +102,27 @@ static int
nv04_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_mem *mem;
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
- mem = nouveau_mem(reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
+ mem = nouveau_mem(*res);
+ ttm_resource_init(bo, place, *res);
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
- (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
+ (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) {
- nouveau_mem_del(reg);
+ nouveau_mem_del(*res);
return ret;
}
- reg->start = mem->vma[0].addr >> PAGE_SHIFT;
+ (*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
return 0;
}
@@ -127,55 +131,6 @@ const struct ttm_resource_manager_func nv04_gart_manager = {
.free = nouveau_manager_del,
};
-static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct ttm_buffer_object *bo = vma->vm_private_data;
- pgprot_t prot;
- vm_fault_t ret;
-
- ret = ttm_bo_vm_reserve(bo, vmf);
- if (ret)
- return ret;
-
- ret = nouveau_ttm_fault_reserve_notify(bo);
- if (ret)
- goto error_unlock;
-
- nouveau_bo_del_io_reserve_lru(bo);
- prot = vm_get_page_prot(vma->vm_flags);
- ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
- nouveau_bo_add_io_reserve_lru(bo);
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
-
-error_unlock:
- dma_resv_unlock(bo->base.resv);
- return ret;
-}
-
-static const struct vm_operations_struct nouveau_ttm_vm_ops = {
- .fault = nouveau_ttm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
-};
-
-int
-nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
- int ret;
-
- ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
- if (ret)
- return ret;
-
- vma->vm_ops = &nouveau_ttm_vm_ops;
- return 0;
-}
-
static int
nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index dbf6dc238efd..2f0efda7ccdb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -17,7 +17,6 @@ struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo,
int nouveau_ttm_init(struct nouveau_drm *drm);
void nouveau_ttm_fini(struct nouveau_drm *drm);
-int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
int nouveau_ttm_global_init(struct nouveau_drm *);
void nouveau_ttm_global_release(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index a49e88129c92..67d6619fcd5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -77,7 +77,7 @@ int
nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
struct nouveau_vma **pvma)
{
- struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
+ struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
struct nouveau_vma *vma;
struct nvif_vma tmp;
int ret;
@@ -96,7 +96,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
vma->fence = NULL;
list_add_tail(&vma->head, &nvbo->vma_list);
- if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+ if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
mem->mem.size, &tmp);
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index b1cd8d7dd87d..07c2e0878c24 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -77,8 +77,8 @@ static int
nv17_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
+ struct ttm_resource *reg = priv->bo->bo.resource;
struct nv10_fence_chan *fctx;
- struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
u32 limit = start + priv->bo->bo.base.size - 1;
int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 1625826505f6..ea1e1f480bfe 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -37,7 +37,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- struct ttm_resource *reg = &priv->bo->bo.mem;
+ struct ttm_resource *reg = priv->bo->bo.resource;
u32 start = reg->start * PAGE_SIZE;
u32 limit = start + priv->bo->bo.base.size - 1;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
index 58db83ebadc5..a96084b34a78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
@@ -46,7 +46,7 @@ tu102_mc_intr_update(struct tu102_mc *mc)
nvkm_wr32(device, 0xb81610, 0x6);
}
-void
+static void
tu102_mc_intr_unarm(struct nvkm_mc *base)
{
struct tu102_mc *mc = tu102_mc(base);
@@ -58,7 +58,7 @@ tu102_mc_intr_unarm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
-void
+static void
tu102_mc_intr_rearm(struct nvkm_mc *base)
{
struct tu102_mc *mc = tu102_mc(base);
@@ -70,7 +70,7 @@ tu102_mc_intr_rearm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
-void
+static void
tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
{
struct tu102_mc *mc = tu102_mc(base);