aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/ttm/ttm_bo_util.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2020-09-30 16:44:16 +0200
committerChristian König <christian.koenig@amd.com>2020-10-15 12:51:35 +0200
commitce65b874001d756294e0b7cf06c51137af964f38 (patch)
tree609d12c729903bc175c665ce3cd39ef9b690c234 /drivers/gpu/drm/ttm/ttm_bo_util.c
parentdrm/ttm: use caching instead of placement for ttm_io_prot (diff)
downloadwireguard-linux-ce65b874001d756294e0b7cf06c51137af964f38.tar.xz
wireguard-linux-ce65b874001d756294e0b7cf06c51137af964f38.zip
drm/ttm: nuke caching placement flags
Changing the caching on the fly never really worked flawlessly. So stop this completely and just let drivers specific the desired caching in the tt or bus object. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com> Link: https://patchwork.freedesktop.org/patch/394256/
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 0542097dc419..ba7ab5ed85d0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -72,10 +72,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
old_mem->mem_type = TTM_PL_SYSTEM;
}
- ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
- if (unlikely(ret != 0))
- return ret;
-
if (new_mem->mem_type != TTM_PL_SYSTEM) {
ret = ttm_tt_populate(bo->bdev, ttm, ctx);
@@ -135,7 +131,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
} else {
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
- if (mem->placement & TTM_PL_FLAG_WC)
+ if (mem->bus.caching == ttm_write_combined)
addr = ioremap_wc(mem->bus.offset, bus_size);
else
addr = ioremap(mem->bus.offset, bus_size);
@@ -427,7 +423,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
- if (mem->placement & TTM_PL_FLAG_WC)
+ if (mem->bus.caching == ttm_write_combined)
map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
size);
else
@@ -457,7 +453,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
if (ret)
return ret;
- if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ if (num_pages == 1 && ttm->caching == ttm_cached) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.