From 6c28aed6e5b7fa9538ad1a468e3dd5a94ebe5b19 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:10 +1000 Subject: drm/amdgfx/ttm: use wrapper to get ttm memory managers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-38-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 5ac7b5561475..ced418cba2f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -449,7 +449,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, * allow fall back to GTT */ if (domain & AMDGPU_GEM_DOMAIN_GTT) { - man = &adev->mman.bdev.man[TTM_PL_TT]; + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); if (size < (man->size << PAGE_SHIFT)) return true; @@ -458,7 +458,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, } if (domain & AMDGPU_GEM_DOMAIN_VRAM) { - man = &adev->mman.bdev.man[TTM_PL_VRAM]; + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); if (size < (man->size << PAGE_SHIFT)) return true; -- cgit v1.2.3-59-g8ed1b From 9de59bc201496f28bb8835c2bcbae3ddb186b548 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:31 +1000 Subject: drm/ttm: rename ttm_mem_type_manager -> ttm_resource_manager. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This name makes a lot more sense, since these are about managing driver resources rather than just memory ranges. Acked-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-59-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 36 +++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 8 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 36 +++++++-------- drivers/gpu/drm/drm_gem_vram_helper.c | 4 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 44 +++++++++--------- drivers/gpu/drm/nouveau/nouveau_ttm.h | 6 +-- drivers/gpu/drm/qxl/qxl_ttm.c | 4 +- drivers/gpu/drm/radeon/radeon_gem.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 4 +- drivers/gpu/drm/ttm/ttm_bo.c | 66 +++++++++++++-------------- drivers/gpu/drm/ttm/ttm_bo_util.c | 26 +++++------ drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +- drivers/gpu/drm/ttm/ttm_range_manager.c | 28 ++++++------ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 20 ++++---- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 26 +++++------ drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 26 +++++------ include/drm/ttm/ttm_bo_api.h | 6 +-- include/drm/ttm/ttm_bo_driver.h | 60 ++++++++++++------------ 23 files changed, 209 insertions(+), 209 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index ba4d11e8a960..e2b4d3fc601d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -517,7 +517,7 @@ out_put: uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - struct ttm_mem_type_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return amdgpu_vram_mgr_usage(vram_man); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5ef7b3b7c9af..65b67c82a4b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -299,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, { s64 time_us, increment_us; u64 free_vram, total_vram, used_vram; - struct ttm_mem_type_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); /* Allow a maximum of 200 accumulated ms. This is basically per-IB * throttling. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index e9de6f9538c0..b9050b7221d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -25,13 +25,13 @@ #include "amdgpu.h" struct amdgpu_gtt_mgr { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; atomic64_t available; }; -static inline struct amdgpu_gtt_mgr *to_gtt_mgr(struct ttm_mem_type_manager *man) +static inline struct amdgpu_gtt_mgr *to_gtt_mgr(struct ttm_resource_manager *man) { return container_of(man, struct amdgpu_gtt_mgr, manager); } @@ -54,7 +54,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); return snprintf(buf, PAGE_SIZE, "%llu\n", man->size * PAGE_SIZE); } @@ -72,7 +72,7 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_gtt_mgr_usage(man)); } @@ -82,7 +82,7 @@ static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO, static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO, amdgpu_mem_info_gtt_used_show, NULL); -static const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; +static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func; /** * amdgpu_gtt_mgr_init - init GTT manager and DRM MM * @@ -93,7 +93,7 @@ static const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; */ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct amdgpu_gtt_mgr *mgr; uint64_t start, size; int ret; @@ -108,7 +108,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(man, gtt_size >> PAGE_SHIFT); + ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT); start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; @@ -128,7 +128,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) } ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } @@ -142,13 +142,13 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) */ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) { - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(&adev->mman.bdev, man); + ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); if (ret) return; @@ -159,7 +159,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total); device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL); kfree(mgr); } @@ -186,7 +186,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) * * Dummy, allocate the node but no space for it yet. */ -static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, +static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -251,7 +251,7 @@ err_out: * * Free the allocated GTT again. */ -static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, +static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); @@ -274,7 +274,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, * * Return how many bytes are used in the GTT domain */ -uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); s64 result = man->size - atomic64_read(&mgr->available); @@ -282,7 +282,7 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) return (result > 0 ? result : 0) * PAGE_SIZE; } -int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) +int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_node *node; @@ -309,7 +309,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) * * Dump the table content using printk. */ -static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, +static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); @@ -323,7 +323,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, amdgpu_gtt_mgr_usage(man) >> 20); } -static const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { +static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = { .get_node = amdgpu_gtt_mgr_new, .put_node = amdgpu_gtt_mgr_del, .debug = amdgpu_gtt_mgr_debug diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 134cca1af744..fff9c013f337 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -616,9 +616,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file } case AMDGPU_INFO_MEMORY: { struct drm_amdgpu_memory_info mem; - struct ttm_mem_type_manager *vram_man = + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - struct ttm_mem_type_manager *gtt_man = + struct ttm_resource_manager *gtt_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); memset(&mem, 0, sizeof(mem)); mem.vram.total_heap_size = adev->gmc.real_vram_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ced418cba2f7..ce98df5b0c21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -442,7 +442,7 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, unsigned long size, u32 domain) { - struct ttm_mem_type_manager *man = NULL; + struct ttm_resource_manager *man = NULL; /* * If GTT is part of requested domains the check must succeed to diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 555695854076..2fc0214d9a95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2012,7 +2012,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) */ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) { - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); uint64_t size; int r; @@ -2234,7 +2234,7 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) unsigned ttm_pl = (uintptr_t)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl); struct drm_printer p = drm_seq_file_printer(m); man->func->debug(man, &p); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index c01fdb3f0458..3db29ae1f802 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -73,8 +73,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev); void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); -uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); -int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); +uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man); +int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man); u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, @@ -86,8 +86,8 @@ void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, struct device *dev, enum dma_data_direction dir, struct sg_table *sgt); -uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); +uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man); +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man); int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_late_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 03a6248f0c4e..6f888a63f22d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -29,7 +29,7 @@ #include "atom.h" struct amdgpu_vram_mgr { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; atomic64_t usage; @@ -37,7 +37,7 @@ struct amdgpu_vram_mgr { struct amdgpu_device *adev; }; -static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_mem_type_manager *man) +static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man) { return container_of(man, struct amdgpu_vram_mgr, manager); } @@ -89,7 +89,7 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_vram_mgr_usage(man)); } @@ -107,7 +107,7 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_vram_mgr_vis_usage(man)); } @@ -165,7 +165,7 @@ static const struct attribute *amdgpu_vram_mgr_attributes[] = { NULL }; -static const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; +static const struct ttm_resource_manager_func amdgpu_vram_mgr_func; /** * amdgpu_vram_mgr_init - init VRAM manager and DRM MM @@ -177,7 +177,7 @@ static const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; */ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct amdgpu_vram_mgr *mgr; int ret; @@ -190,7 +190,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - ttm_mem_type_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); + ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); man->func = &amdgpu_vram_mgr_func; @@ -205,7 +205,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) DRM_ERROR("Failed to register sysfs\n"); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } @@ -219,13 +219,13 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) */ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) { - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(&adev->mman.bdev, man); + ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); if (ret) return; @@ -235,7 +235,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); kfree(mgr); } @@ -321,7 +321,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, * * Allocate VRAM for the given BO. */ -static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, +static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -441,7 +441,7 @@ error: * * Free the allocated VRAM again. */ -static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, +static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -575,7 +575,7 @@ void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, * * Returns how many bytes are used in this domain. */ -uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -589,7 +589,7 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) * * Returns how many bytes are used in the visible part of VRAM */ -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -604,7 +604,7 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) * * Dump the table content using printk. */ -static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, +static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -618,7 +618,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, amdgpu_vram_mgr_vis_usage(man) >> 20); } -static const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { +static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { .get_node = amdgpu_vram_mgr_new, .put_node = amdgpu_vram_mgr_del, .debug = amdgpu_vram_mgr_debug diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 2187787f397e..e3660d00987d 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1075,10 +1075,10 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_vram_mm *vmm = node->minor->dev->vram_mm; - struct ttm_mem_type_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM); struct drm_printer p = drm_seq_file_printer(m); - ttm_mem_type_manager_debug(man, &p); + ttm_resource_manager_debug(man, &p); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 84387c810540..78b5a87b9855 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -32,13 +32,13 @@ #include static void -nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg) +nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_mem_reg *reg) { nouveau_mem_del(reg); } static int -nouveau_vram_manager_new(struct ttm_mem_type_manager *man, +nouveau_vram_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *reg) @@ -63,13 +63,13 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, return 0; } -const struct ttm_mem_type_manager_func nouveau_vram_manager = { +const struct ttm_resource_manager_func nouveau_vram_manager = { .get_node = nouveau_vram_manager_new, .put_node = nouveau_manager_del, }; static int -nouveau_gart_manager_new(struct ttm_mem_type_manager *man, +nouveau_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *reg) @@ -86,13 +86,13 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, return 0; } -const struct ttm_mem_type_manager_func nouveau_gart_manager = { +const struct ttm_resource_manager_func nouveau_gart_manager = { .get_node = nouveau_gart_manager_new, .put_node = nouveau_manager_del, }; static int -nv04_gart_manager_new(struct ttm_mem_type_manager *man, +nv04_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *reg) @@ -118,7 +118,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man, return 0; } -const struct ttm_mem_type_manager_func nv04_gart_manager = { +const struct ttm_resource_manager_func nv04_gart_manager = { .get_node = nv04_gart_manager_new, .put_node = nouveau_manager_del, }; @@ -160,7 +160,7 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { /* Some BARs do not support being ioremapped WC */ const u8 type = mmu->type[drm->ttm.type_vram].type; - struct ttm_mem_type_manager *man = kzalloc(sizeof(*man), GFP_KERNEL); + struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL); if (!man) return -ENOMEM; @@ -175,10 +175,10 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) man->func = &nouveau_vram_manager; man->use_io_reserve_lru = true; - ttm_mem_type_manager_init(man, + ttm_resource_manager_init(man, drm->gem.vram_available >> PAGE_SHIFT); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } else { return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, @@ -191,12 +191,12 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) static void nouveau_ttm_fini_vram(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - ttm_mem_type_manager_set_used(man, false); - ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_set_used(man, false); + ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); kfree(man); } else @@ -206,10 +206,10 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm) static int nouveau_ttm_init_gtt(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT; unsigned available_caching, default_caching; - const struct ttm_mem_type_manager_func *func = NULL; + const struct ttm_resource_manager_func *func = NULL; if (drm->agp.bridge) { available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; @@ -237,24 +237,24 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) man->available_caching = available_caching; man->default_caching = default_caching; man->use_tt = true; - ttm_mem_type_manager_init(man, size_pages); + ttm_resource_manager_init(man, size_pages); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } static void nouveau_ttm_fini_gtt(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT); if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA && drm->agp.bridge) ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT); else { - ttm_mem_type_manager_set_used(man, false); - ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_set_used(man, false); + ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL); kfree(man); } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h index 085280754b3e..eaf25461cd91 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.h +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h @@ -8,9 +8,9 @@ nouveau_bdev(struct ttm_bo_device *bd) return container_of(bd, struct nouveau_drm, ttm.bdev); } -extern const struct ttm_mem_type_manager_func nouveau_vram_manager; -extern const struct ttm_mem_type_manager_func nouveau_gart_manager; -extern const struct ttm_mem_type_manager_func nv04_gart_manager; +extern const struct ttm_resource_manager_func nouveau_vram_manager; +extern const struct ttm_resource_manager_func nouveau_gart_manager; +extern const struct ttm_resource_manager_func nv04_gart_manager; struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, u32 page_flags); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 7b9f7a94332a..727049046014 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -275,10 +275,10 @@ void qxl_ttm_fini(struct qxl_device *qdev) static int qxl_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; - struct ttm_mem_type_manager *man = (struct ttm_mem_type_manager *)node->info_ent->data; + struct ttm_resource_manager *man = (struct ttm_resource_manager *)node->info_ent->data; struct drm_printer p = drm_seq_file_printer(m); - ttm_mem_type_manager_debug(man, &p); + ttm_resource_manager_debug(man, &p); return 0; } #endif diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3ec028dba739..7f5dfe04789e 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -224,7 +224,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_info *args = data; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 5f536de3986d..21a01737b1be 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -838,7 +838,7 @@ void radeon_ttm_fini(struct radeon_device *rdev) * isn't running */ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!rdev->mman.initialized) return; @@ -897,7 +897,7 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) unsigned ttm_pl = *(int*)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&rdev->mman.bdev, ttm_pl); + struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev, ttm_pl); struct drm_printer p = drm_seq_file_printer(m); man->func->debug(man, &p); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e0188250b6ec..ff68f25ddbd4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -77,7 +77,7 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place, return 0; } -void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, +void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p) { drm_printf(p, " use_type: %d\n", man->use_type); @@ -88,14 +88,14 @@ void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, if (man->func && man->func->debug) (*man->func->debug)(man, p); } -EXPORT_SYMBOL(ttm_mem_type_manager_debug); +EXPORT_SYMBOL(ttm_resource_manager_debug); static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct drm_printer p = drm_debug_printer(TTM_PFX); int i, ret, mem_type; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, @@ -108,7 +108,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, drm_printf(&p, " placement[%d]=0x%08X (%d)\n", i, placement->placement[i].flags, mem_type); man = ttm_manager_type(bo->bdev, mem_type); - ttm_mem_type_manager_debug(man, &p); + ttm_resource_manager_debug(man, &p); } } @@ -148,7 +148,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!list_empty(&bo->lru)) return; @@ -223,7 +223,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!pos->first) continue; @@ -238,7 +238,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!pos->first) continue; @@ -272,8 +272,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); - struct ttm_mem_type_manager *new_man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; ret = ttm_mem_io_lock(old_man, true); @@ -551,7 +551,7 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); size_t acc_size = bo->acc_size; int ret; @@ -768,7 +768,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, } static int ttm_mem_evict_first(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man, + struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_operation_ctx *ctx, struct ww_acquire_ctx *ticket) @@ -843,7 +843,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); mem->mm_node = NULL; if (!man->func || !man->func->get_node) @@ -854,7 +854,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); if (!man->func || !man->func->put_node) return; @@ -869,7 +869,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put); * Add the last move fence to the BO and reserve a new shared slot. */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, - struct ttm_mem_type_manager *man, + struct ttm_resource_manager *man, struct ttm_mem_reg *mem, bool no_wait_gpu) { @@ -909,7 +909,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); struct ww_acquire_ctx *ticket; int ret; @@ -929,7 +929,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } -static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, +static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man, uint32_t cur_placement, uint32_t proposed_placement) { @@ -954,7 +954,7 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, return result; } -static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, +static bool ttm_bo_mt_compatible(struct ttm_resource_manager *man, uint32_t mem_type, const struct ttm_place *place, uint32_t *masked_placement) @@ -991,7 +991,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, { struct ttm_bo_device *bdev = bo->bdev; uint32_t mem_type = TTM_PL_SYSTEM; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; uint32_t cur_flags = 0; int ret; @@ -1000,7 +1000,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, return ret; man = ttm_manager_type(bdev, mem_type); - if (!man || !ttm_mem_type_manager_used(man)) + if (!man || !ttm_resource_manager_used(man)) return -EBUSY; if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) @@ -1047,7 +1047,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, for (i = 0; i < placement->num_placement; ++i) { const struct ttm_place *place = &placement->placement[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; ret = ttm_bo_mem_placement(bo, place, mem, ctx); if (ret == -EBUSY) @@ -1404,8 +1404,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_create); -int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man) +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man) { struct ttm_operation_ctx ctx = { .interruptible = false, @@ -1447,12 +1447,12 @@ int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, return 0; } -EXPORT_SYMBOL(ttm_mem_type_manager_force_list_clean); +EXPORT_SYMBOL(ttm_resource_manager_force_list_clean); int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { pr_err("Illegal memory manager memory type %u\n", mem_type); @@ -1464,11 +1464,11 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) return 0; } - return ttm_mem_type_manager_force_list_clean(bdev, man); + return ttm_resource_manager_force_list_clean(bdev, man); } EXPORT_SYMBOL(ttm_bo_evict_mm); -void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, +void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size) { unsigned i; @@ -1483,7 +1483,7 @@ void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, INIT_LIST_HEAD(&man->lru[i]); man->move = NULL; } -EXPORT_SYMBOL(ttm_mem_type_manager_init); +EXPORT_SYMBOL(ttm_resource_manager_init); static void ttm_bo_global_kobj_release(struct kobject *kobj) { @@ -1550,10 +1550,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) struct ttm_bo_global *glob = &ttm_bo_glob; int ret = 0; unsigned i; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; man = ttm_manager_type(bdev, TTM_PL_SYSTEM); - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); mutex_lock(&ttm_global_mutex); @@ -1580,7 +1580,7 @@ EXPORT_SYMBOL(ttm_bo_device_release); static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) { - struct ttm_mem_type_manager *man = &bdev->sysman; + struct ttm_resource_manager *man = &bdev->sysman; /* * Initialize the system memory buffer type. @@ -1590,9 +1590,9 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(man, 0); + ttm_resource_manager_init(man, 0); ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); } int ttm_bo_device_init(struct ttm_bo_device *bdev, @@ -1643,7 +1643,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); ttm_mem_io_lock(man, false); ttm_bo_unmap_virtual_locked(bo); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 879c8ded0cd8..8ef0de8e36c5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -91,7 +91,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) +int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible) { if (likely(!man->use_io_reserve_lru)) return 0; @@ -103,7 +103,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) return 0; } -void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) +void ttm_mem_io_unlock(struct ttm_resource_manager *man) { if (likely(!man->use_io_reserve_lru)) return; @@ -111,7 +111,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) mutex_unlock(&man->io_reserve_mutex); } -static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) +static int ttm_mem_io_evict(struct ttm_resource_manager *man) { struct ttm_buffer_object *bo; @@ -129,7 +129,7 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; if (mem->bus.io_reserved_count++) @@ -162,7 +162,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) { - struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); struct ttm_mem_reg *mem = &bo->mem; int ret; @@ -195,7 +195,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void **virtual) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; void *addr; @@ -230,7 +230,7 @@ static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void *virtual) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; man = ttm_manager_type(bdev, mem->mem_type); @@ -303,7 +303,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg old_copy = *old_mem; @@ -570,7 +570,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_type_manager *man = + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); unsigned long offset, size; int ret; @@ -600,7 +600,7 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { struct ttm_buffer_object *bo = map->bo; - struct ttm_mem_type_manager *man = + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); if (!map->virtual) @@ -634,7 +634,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_mem_reg *old_mem = &bo->mem; int ret; struct ttm_buffer_object *ghost_obj; @@ -697,8 +697,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_type_manager *from = ttm_manager_type(bdev, old_mem->mem_type); - struct ttm_mem_type_manager *to = ttm_manager_type(bdev, new_mem->mem_type); + struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type); + struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); int ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index db4e21d11967..ba2e8bd198ad 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -281,7 +281,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgoff_t i; vm_fault_t ret = VM_FAULT_NOPAGE; unsigned long address = vmf->address; - struct ttm_mem_type_manager *man = + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); /* diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 7fddc74b3827..df62177cd913 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -44,17 +44,17 @@ */ struct ttm_range_manager { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; }; -static inline struct ttm_range_manager *to_range_manager(struct ttm_mem_type_manager *man) +static inline struct ttm_range_manager *to_range_manager(struct ttm_resource_manager *man) { return container_of(man, struct ttm_range_manager, manager); } -static int ttm_range_man_get_node(struct ttm_mem_type_manager *man, +static int ttm_range_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -95,7 +95,7 @@ static int ttm_range_man_get_node(struct ttm_mem_type_manager *man, return ret; } -static void ttm_range_man_put_node(struct ttm_mem_type_manager *man, +static void ttm_range_man_put_node(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = to_range_manager(man); @@ -110,7 +110,7 @@ static void ttm_range_man_put_node(struct ttm_mem_type_manager *man, } } -static const struct ttm_mem_type_manager_func ttm_range_manager_func; +static const struct ttm_resource_manager_func ttm_range_manager_func; int ttm_range_man_init(struct ttm_bo_device *bdev, unsigned type, @@ -119,7 +119,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, bool use_tt, unsigned long p_size) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct ttm_range_manager *rman; rman = kzalloc(sizeof(*rman), GFP_KERNEL); @@ -133,13 +133,13 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, man->func = &ttm_range_manager_func; - ttm_mem_type_manager_init(man, p_size); + ttm_resource_manager_init(man, p_size); drm_mm_init(&rman->mm, 0, p_size); spin_lock_init(&rman->lock); ttm_set_driver_manager(bdev, type, &rman->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } EXPORT_SYMBOL(ttm_range_man_init); @@ -147,14 +147,14 @@ EXPORT_SYMBOL(ttm_range_man_init); int ttm_range_man_fini(struct ttm_bo_device *bdev, unsigned type) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, type); struct ttm_range_manager *rman = to_range_manager(man); struct drm_mm *mm = &rman->mm; int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(bdev, man); + ret = ttm_resource_manager_force_list_clean(bdev, man); if (ret) return ret; @@ -163,14 +163,14 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev, drm_mm_takedown(mm); spin_unlock(&rman->lock); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(bdev, type, NULL); kfree(rman); return 0; } EXPORT_SYMBOL(ttm_range_man_fini); -static void ttm_range_man_debug(struct ttm_mem_type_manager *man, +static void ttm_range_man_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct ttm_range_manager *rman = to_range_manager(man); @@ -180,7 +180,7 @@ static void ttm_range_man_debug(struct ttm_mem_type_manager *man, spin_unlock(&rman->lock); } -static const struct ttm_mem_type_manager_func ttm_range_manager_func = { +static const struct ttm_resource_manager_func ttm_range_manager_func = { .get_node = ttm_range_man_get_node, .put_node = ttm_range_man_put_node, .debug = ttm_range_man_debug diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a7b3c8ee7f21..a68ae0204bf5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -630,7 +630,7 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv) TTM_PL_FLAG_CACHED, TTM_PL_FLAG_CACHED, false, dev_priv->vram_size >> PAGE_SHIFT); #endif - ttm_mem_type_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); + ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); return ret; } @@ -1189,12 +1189,12 @@ static void vmw_master_drop(struct drm_device *dev, */ static void __vmw_svga_enable(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); spin_lock(&dev_priv->svga_lock); - if (!ttm_mem_type_manager_used(man)) { + if (!ttm_resource_manager_used(man)) { vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); } spin_unlock(&dev_priv->svga_lock); } @@ -1220,11 +1220,11 @@ void vmw_svga_enable(struct vmw_private *dev_priv) */ static void __vmw_svga_disable(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); spin_lock(&dev_priv->svga_lock); - if (ttm_mem_type_manager_used(man)) { - ttm_mem_type_manager_set_used(man, false); + if (ttm_resource_manager_used(man)) { + ttm_resource_manager_set_used(man, false); vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_HIDE | SVGA_REG_ENABLE_ENABLE); @@ -1241,7 +1241,7 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv) */ void vmw_svga_disable(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); /* * Disabling SVGA will turn off device modesetting capabilities, so * notify KMS about that so that it doesn't cache atomic state that @@ -1257,8 +1257,8 @@ void vmw_svga_disable(struct vmw_private *dev_priv) vmw_kms_lost_device(dev_priv->dev); ttm_write_lock(&dev_priv->reservation_sem, false); spin_lock(&dev_priv->svga_lock); - if (ttm_mem_type_manager_used(man)) { - ttm_mem_type_manager_set_used(man, false); + if (ttm_resource_manager_used(man)) { + ttm_resource_manager_set_used(man, false); spin_unlock(&dev_priv->svga_lock); if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) DRM_ERROR("Failed evicting VRAM buffers.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index ca5037184814..c8fe6e9cf092 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -37,7 +37,7 @@ #include struct vmwgfx_gmrid_man { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; spinlock_t lock; struct ida gmr_ida; uint32_t max_gmr_ids; @@ -45,12 +45,12 @@ struct vmwgfx_gmrid_man { uint32_t used_gmr_pages; }; -static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_mem_type_manager *man) +static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man) { return container_of(man, struct vmwgfx_gmrid_man, manager); } -static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, +static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -84,7 +84,7 @@ nospace: return -ENOSPC; } -static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, +static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); @@ -98,11 +98,11 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, } } -static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; +static const struct ttm_resource_manager_func vmw_gmrid_manager_func; int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct vmwgfx_gmrid_man *gman = kzalloc(sizeof(*gman), GFP_KERNEL); @@ -116,7 +116,7 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) man->default_caching = TTM_PL_FLAG_CACHED; /* TODO: This is most likely not correct */ man->use_tt = true; - ttm_mem_type_manager_init(man, 0); + ttm_resource_manager_init(man, 0); spin_lock_init(&gman->lock); gman->used_gmr_pages = 0; ida_init(&gman->gmr_ida); @@ -134,20 +134,20 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) BUG(); } ttm_set_driver_manager(&dev_priv->bdev, type, &gman->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, type); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, type); struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ttm_mem_type_manager_force_list_clean(&dev_priv->bdev, man); + ttm_resource_manager_force_list_clean(&dev_priv->bdev, man); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&dev_priv->bdev, type, NULL); ida_destroy(&gman->gmr_ida); @@ -155,7 +155,7 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) } -static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { +static const struct ttm_resource_manager_func vmw_gmrid_manager_func = { .get_node = vmw_gmrid_man_get_node, .put_node = vmw_gmrid_man_put_node, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 4110e8309188..6cac7b091205 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -16,12 +16,12 @@ * @lock: Manager lock. */ struct vmw_thp_manager { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; }; -static struct vmw_thp_manager *to_thp_manager(struct ttm_mem_type_manager *man) +static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man) { return container_of(man, struct vmw_thp_manager, manager); } @@ -44,7 +44,7 @@ static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, return -ENOSPC; } -static int vmw_thp_get_node(struct ttm_mem_type_manager *man, +static int vmw_thp_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -106,7 +106,7 @@ found_unlock: -static void vmw_thp_put_node(struct ttm_mem_type_manager *man, +static void vmw_thp_put_node(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct vmw_thp_manager *rman = to_thp_manager(man); @@ -123,7 +123,7 @@ static void vmw_thp_put_node(struct ttm_mem_type_manager *man, int vmw_thp_init(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct vmw_thp_manager *rman; rman = kzalloc(sizeof(*rman), GFP_KERNEL); @@ -134,39 +134,39 @@ int vmw_thp_init(struct vmw_private *dev_priv) man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(man, + ttm_resource_manager_init(man, dev_priv->vram_size >> PAGE_SHIFT); drm_mm_init(&rman->mm, 0, man->size); spin_lock_init(&rman->lock); ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } void vmw_thp_fini(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); struct vmw_thp_manager *rman = to_thp_manager(man); struct drm_mm *mm = &rman->mm; int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(&dev_priv->bdev, man); + ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man); if (ret) return; spin_lock(&rman->lock); drm_mm_clean(mm); drm_mm_takedown(mm); spin_unlock(&rman->lock); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL); kfree(rman); } -static void vmw_thp_debug(struct ttm_mem_type_manager *man, +static void vmw_thp_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct vmw_thp_manager *rman = to_thp_manager(man); @@ -176,7 +176,7 @@ static void vmw_thp_debug(struct ttm_mem_type_manager *man, spin_unlock(&rman->lock); } -const struct ttm_mem_type_manager_func vmw_thp_func = { +const struct ttm_resource_manager_func vmw_thp_func = { .get_node = vmw_thp_get_node, .put_node = vmw_thp_put_node, .debug = vmw_thp_debug diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 95d6c648d5c6..7b0655bc13da 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -54,7 +54,7 @@ struct ttm_place; struct ttm_lru_bulk_move; -struct ttm_mem_type_manager; +struct ttm_resource_manager; /** * struct ttm_bus_placement @@ -534,14 +534,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, struct ttm_buffer_object **p_bo); /** - * ttm_mem_type_manager_init + * ttm_resource_manager_init * * @man: memory manager object to init * @p_size: size managed area in pages. * * Initialise core parts of a manager object. */ -void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, +void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size); /** diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 31264a09ec63..d17e25ba80d4 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -45,11 +45,11 @@ #define TTM_MAX_BO_PRIORITY 4U -struct ttm_mem_type_manager; +struct ttm_resource_manager; -struct ttm_mem_type_manager_func { +struct ttm_resource_manager_func { /** - * struct ttm_mem_type_manager member get_node + * struct ttm_resource_manager member get_node * * @man: Pointer to a memory type manager. * @bo: Pointer to the buffer object we're allocating space for. @@ -69,20 +69,20 @@ struct ttm_mem_type_manager_func { * the function should return a negative error code. * * Note that @mem::mm_node will only be dereferenced by - * struct ttm_mem_type_manager functions and optionally by the driver, + * struct ttm_resource_manager functions and optionally by the driver, * which has knowledge of the underlying type. * * This function may not be called from within atomic context, so * an implementation can and must use either a mutex or a spinlock to * protect any data structures managing the space. */ - int (*get_node)(struct ttm_mem_type_manager *man, + int (*get_node)(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem); /** - * struct ttm_mem_type_manager member put_node + * struct ttm_resource_manager member put_node * * @man: Pointer to a memory type manager. * @mem: Pointer to a struct ttm_mem_reg to be filled in. @@ -91,11 +91,11 @@ struct ttm_mem_type_manager_func { * and that are identified by @mem::mm_node and @mem::start. May not * be called from within atomic context. */ - void (*put_node)(struct ttm_mem_type_manager *man, + void (*put_node)(struct ttm_resource_manager *man, struct ttm_mem_reg *mem); /** - * struct ttm_mem_type_manager member debug + * struct ttm_resource_manager member debug * * @man: Pointer to a memory type manager. * @printer: Prefix to be used in printout to identify the caller. @@ -104,12 +104,12 @@ struct ttm_mem_type_manager_func { * type manager to aid debugging of out-of-memory conditions. * It may not be called from within atomic context. */ - void (*debug)(struct ttm_mem_type_manager *man, + void (*debug)(struct ttm_resource_manager *man, struct drm_printer *printer); }; /** - * struct ttm_mem_type_manager + * struct ttm_resource_manager * * @use_type: The memory type is enabled. * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory @@ -136,7 +136,7 @@ struct ttm_mem_type_manager_func { -struct ttm_mem_type_manager { +struct ttm_resource_manager { /* * No protection. Constant from start. */ @@ -145,7 +145,7 @@ struct ttm_mem_type_manager { uint64_t size; uint32_t available_caching; uint32_t default_caching; - const struct ttm_mem_type_manager_func *func; + const struct ttm_resource_manager_func *func; struct mutex io_reserve_mutex; bool use_io_reserve_lru; spinlock_t move_lock; @@ -390,7 +390,7 @@ extern struct ttm_bo_global { * struct ttm_bo_device - Buffer object driver device-specific data. * * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. - * @man: An array of mem_type_managers. + * @man: An array of resource_managers. * @vma_manager: Address space manager (pointer) * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. @@ -411,8 +411,8 @@ struct ttm_bo_device { /* * access via ttm_manager_type. */ - struct ttm_mem_type_manager sysman; - struct ttm_mem_type_manager *man_drv[TTM_NUM_MEM_TYPES]; + struct ttm_resource_manager sysman; + struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; /* * Protected by internal locks. */ @@ -440,7 +440,7 @@ struct ttm_bo_device { bool no_retry; }; -static inline struct ttm_mem_type_manager *ttm_manager_type(struct ttm_bo_device *bdev, +static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev, int mem_type) { return bdev->man_drv[mem_type]; @@ -448,7 +448,7 @@ static inline struct ttm_mem_type_manager *ttm_manager_type(struct ttm_bo_device static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev, int type, - struct ttm_mem_type_manager *manager) + struct ttm_resource_manager *manager) { bdev->man_drv[type] = manager; } @@ -581,8 +581,8 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); -int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); -void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); +int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible); +void ttm_mem_io_unlock(struct ttm_resource_manager *man); /** * ttm_bo_reserve: @@ -676,7 +676,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) } /** - * ttm_mem_type_manager_set_used + * ttm_resource_manager_set_used * * @man: A memory manager object. * @used: usage state to set. @@ -684,13 +684,13 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) * Set the manager in use flag. If disabled the manager is no longer * used for object placement. */ -static inline void ttm_mem_type_manager_set_used(struct ttm_mem_type_manager *man, bool used) +static inline void ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used) { man->use_type = used; } /** - * ttm_mem_type_manager_used + * ttm_resource_manager_used * * @man: Manager to get used state for * @@ -698,26 +698,26 @@ static inline void ttm_mem_type_manager_set_used(struct ttm_mem_type_manager *ma * Returns: * true is used, false if not. */ -static inline bool ttm_mem_type_manager_used(struct ttm_mem_type_manager *man) +static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man) { return man->use_type; } /** - * ttm_mem_type_manager_cleanup + * ttm_resource_manager_cleanup * * @man: A memory manager object. * * Cleanup the move fences from the memory manager object. */ -static inline void ttm_mem_type_manager_cleanup(struct ttm_mem_type_manager *man) +static inline void ttm_resource_manager_cleanup(struct ttm_resource_manager *man) { dma_fence_put(man->move); man->move = NULL; } /* - * ttm_mem_type_manager_force_list_clean + * ttm_resource_manager_force_list_clean * * @bdev - device to use * @man - manager to use @@ -725,8 +725,8 @@ static inline void ttm_mem_type_manager_cleanup(struct ttm_mem_type_manager *man * Force all the objects out of a memory manager until clean. * Part of memory manager cleanup sequence. */ -int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man); +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man); /* * ttm_bo_util.c @@ -875,12 +875,12 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev, unsigned type); /** - * ttm_mem_type_manager_debug + * ttm_resource_manager_debug * * @man: manager type to dump. * @p: printer to use for debug. */ -void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, +void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p); #endif -- cgit v1.2.3-59-g8ed1b From 2966141ad2dda23d1b37997de6a4389b7864c169 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:32 +1000 Subject: drm/ttm: rename ttm_mem_reg to ttm_resource. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This name better reflects what the object does. I didn't rename all the pointers it seemed too messy. Signed-off-by: Dave Airlie Acked-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-60-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 46 ++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 10 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 12 +++---- drivers/gpu/drm/drm_gem_vram_helper.c | 6 ++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 46 ++++++++++++------------- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 +- drivers/gpu/drm/nouveau/nouveau_mem.c | 8 ++--- drivers/gpu/drm/nouveau/nouveau_mem.h | 10 +++--- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 4 +-- drivers/gpu/drm/nouveau/nouveau_ttm.c | 8 ++--- drivers/gpu/drm/nouveau/nv17_fence.c | 2 +- drivers/gpu/drm/nouveau/nv50_fence.c | 2 +- drivers/gpu/drm/qxl/qxl_drv.h | 2 +- drivers/gpu/drm/qxl/qxl_ttm.c | 14 ++++---- drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 2 +- drivers/gpu/drm/radeon/radeon_object.h | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 28 ++++++++-------- drivers/gpu/drm/radeon/radeon_vm.c | 2 +- drivers/gpu/drm/ttm/ttm_agp_backend.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 26 +++++++-------- drivers/gpu/drm/ttm/ttm_bo_util.c | 46 ++++++++++++------------- drivers/gpu/drm/ttm/ttm_range_manager.c | 4 +-- drivers/gpu/drm/ttm/ttm_tt.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 4 +-- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 +-- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 4 +-- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 6 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 8 ++--- include/drm/ttm/ttm_bo_api.h | 10 +++--- include/drm/ttm/ttm_bo_driver.h | 48 +++++++++++++-------------- include/drm/ttm/ttm_tt.h | 10 +++--- 37 files changed, 199 insertions(+), 199 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index b9050b7221d5..c847a5fe94c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -171,7 +171,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) * * Check if a mem object has already address space allocated. */ -bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) { return mem->mm_node != NULL; } @@ -189,7 +189,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_node *node; @@ -252,7 +252,7 @@ err_out: * Free the allocated GTT again. */ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_node *node = mem->mm_node; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ce98df5b0c21..43f4966331dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1268,11 +1268,11 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, */ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; if (!amdgpu_bo_is_amdgpu_bo(bo)) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index e01e8903741e..5ddb6cf96030 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -283,7 +283,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, uint64_t *flags); void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2fc0214d9a95..28557839f132 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -182,9 +182,9 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) * Assign the memory from new_mem to the memory of the buffer object bo. */ static void amdgpu_move_null(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; @@ -201,7 +201,7 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo, */ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, struct drm_mm_node *mm_node, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { uint64_t addr = 0; @@ -221,7 +221,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, * @offset: The offset that drm_mm_node is used for finding. * */ -static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, +static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem, uint64_t *offset) { struct drm_mm_node *mm_node = mem->mm_node; @@ -249,7 +249,7 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, * the physical address for local memory. */ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct drm_mm_node *mm_node, unsigned num_pages, uint64_t offset, unsigned window, struct amdgpu_ring *ring, @@ -473,8 +473,8 @@ error: */ static int amdgpu_move_blit(struct ttm_buffer_object *bo, bool evict, bool no_wait_gpu, - struct ttm_mem_reg *new_mem, - struct ttm_mem_reg *old_mem) + struct ttm_resource *new_mem, + struct ttm_resource *old_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); @@ -533,10 +533,10 @@ error: */ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource tmp_mem; struct ttm_place placements; struct ttm_placement placement; int r; @@ -589,10 +589,10 @@ out_cleanup: */ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource tmp_mem; struct ttm_placement placement; struct ttm_place placements; int r; @@ -635,7 +635,7 @@ out_cleanup: * Called by amdgpu_bo_move() */ static bool amdgpu_mem_visible(struct amdgpu_device *adev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct drm_mm_node *nodes = mem->mm_node; @@ -645,7 +645,7 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, if (mem->mem_type != TTM_PL_VRAM) return false; - /* ttm_mem_reg_ioremap only supports contiguous memory */ + /* ttm_resource_ioremap only supports contiguous memory */ if (nodes->size != mem->num_pages) return false; @@ -660,11 +660,11 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, */ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct amdgpu_device *adev; struct amdgpu_bo *abo; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int r; /* Can't move a pinned BO */ @@ -746,7 +746,7 @@ memcpy: * * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() */ -static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct drm_mm_node *mm_node = mem->mm_node; @@ -770,7 +770,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ return -EINVAL; /* Only physically contiguous buffers apply. In a contiguous * buffer, size of the first mm_node would match the number of - * pages in ttm_mem_reg. + * pages in ttm_resource. */ if (adev->mman.aper_base_kaddr && (mm_node->size == mem->num_pages)) @@ -1115,7 +1115,7 @@ gart_bind_fail: * This handles binding GTT memory to the device address space. */ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) + struct ttm_resource *bo_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void*)ttm; @@ -1166,7 +1166,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; - struct ttm_mem_reg tmp; + struct ttm_resource tmp; struct ttm_placement placement; struct ttm_place placements; uint64_t addr, flags; @@ -1507,7 +1507,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) * * Figure out the flags to use for a VM PDE (Page Directory Entry). */ -uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem) +uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) { uint64_t flags = 0; @@ -1533,7 +1533,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem) * Figure out the flags to use for a VM PTE (Page Table Entry). */ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 3db29ae1f802..36b024fd077e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -63,7 +63,7 @@ struct amdgpu_mman { struct amdgpu_copy_mem { struct ttm_buffer_object *bo; - struct ttm_mem_reg *mem; + struct ttm_resource *mem; unsigned long offset; }; @@ -72,13 +72,13 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev); int amdgpu_vram_mgr_init(struct amdgpu_device *adev); void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); -bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man); int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man); u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct device *dev, enum dma_data_direction dir, struct sg_table **sgt); @@ -142,9 +142,9 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated); bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); -uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem); +uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem); uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7417754e9141..920a0553e172 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1765,7 +1765,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; - struct ttm_mem_reg *mem; + struct ttm_resource *mem; struct drm_mm_node *nodes; struct dma_fence **last_update; struct dma_resv *resv; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 6f888a63f22d..895634cbf999 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -272,7 +272,7 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct ttm_mem_reg *mem = &bo->tbo.mem; + struct ttm_resource *mem = &bo->tbo.mem; struct drm_mm_node *nodes = mem->mm_node; unsigned pages = mem->num_pages; u64 usage; @@ -292,13 +292,13 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) /** * amdgpu_vram_mgr_virt_start - update virtual start address * - * @mem: ttm_mem_reg to update + * @mem: ttm_resource to update * @node: just allocated node * * Calculate a virtual BO start address to easily check if everything is CPU * accessible. */ -static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, +static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, struct drm_mm_node *node) { unsigned long start; @@ -324,7 +324,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = mgr->adev; @@ -442,7 +442,7 @@ error: * Free the allocated VRAM again. */ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = mgr->adev; @@ -482,7 +482,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, * Allocate and fill a sg table from a VRAM allocation. */ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct device *dev, enum dma_data_direction dir, struct sg_table **sgt) diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index e3660d00987d..b410930d94a0 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -653,7 +653,7 @@ static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_bo_kmap_obj *kmap = &gbo->kmap; @@ -1020,7 +1020,7 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo, static void bo_driver_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct drm_gem_vram_object *gbo; @@ -1034,7 +1034,7 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo, } static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 51416086e2f4..604a74323696 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -679,7 +679,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) static int nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); int ret = RING_SPACE(chan, 10); @@ -711,7 +711,7 @@ nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) static int nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); u64 src_offset = mem->vma[0].addr; @@ -749,7 +749,7 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); u64 src_offset = mem->vma[0].addr; @@ -788,7 +788,7 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); u64 src_offset = mem->vma[0].addr; @@ -826,7 +826,7 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); int ret = RING_SPACE(chan, 7); @@ -844,7 +844,7 @@ nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); int ret = RING_SPACE(chan, 7); @@ -878,7 +878,7 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) static int nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); u64 length = (new_reg->num_pages << PAGE_SHIFT); @@ -965,7 +965,7 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) static inline uint32_t nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, - struct nouveau_channel *chan, struct ttm_mem_reg *reg) + struct nouveau_channel *chan, struct ttm_resource *reg) { if (reg->mem_type == TTM_PL_TT) return NvDmaTT; @@ -974,7 +974,7 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, static int nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { u32 src_offset = old_reg->start << PAGE_SHIFT; u32 dst_offset = new_reg->start << PAGE_SHIFT; @@ -1020,7 +1020,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, - struct ttm_mem_reg *reg) + struct ttm_resource *reg) { struct nouveau_mem *old_mem = nouveau_mem(&bo->mem); struct nouveau_mem *new_mem = nouveau_mem(reg); @@ -1052,7 +1052,7 @@ done: static int nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_reg) + bool no_wait_gpu, struct ttm_resource *new_reg) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_channel *chan = drm->ttm.chan; @@ -1062,7 +1062,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, /* create temporary vmas for the transfer and attach them to the * old nvkm_mem node, these will get cleaned up after ttm has - * destroyed the ttm_mem_reg + * destroyed the ttm_resource */ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_move_prep(drm, bo, new_reg); @@ -1098,7 +1098,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) s32 oclass; int (*exec)(struct nouveau_channel *, struct ttm_buffer_object *, - struct ttm_mem_reg *, struct ttm_mem_reg *); + struct ttm_resource *, struct ttm_resource *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, @@ -1160,7 +1160,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_reg) + bool no_wait_gpu, struct ttm_resource *new_reg) { struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; struct ttm_place placement_memtype = { @@ -1169,7 +1169,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING }; struct ttm_placement placement; - struct ttm_mem_reg tmp_reg; + struct ttm_resource tmp_reg; int ret; placement.num_placement = placement.num_busy_placement = 1; @@ -1197,7 +1197,7 @@ out: static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_reg) + bool no_wait_gpu, struct ttm_resource *new_reg) { struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; struct ttm_place placement_memtype = { @@ -1206,7 +1206,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING }; struct ttm_placement placement; - struct ttm_mem_reg tmp_reg; + struct ttm_resource tmp_reg; int ret; placement.num_placement = placement.num_busy_placement = 1; @@ -1233,7 +1233,7 @@ out: static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_reg) + struct ttm_resource *new_reg) { struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL; struct nouveau_bo *nvbo = nouveau_bo(bo); @@ -1265,7 +1265,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, } static int -nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg, +nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, struct nouveau_drm_tile **new_tile) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -1301,11 +1301,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, static int nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_reg) + struct ttm_resource *new_reg) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct ttm_mem_reg *old_reg = &bo->mem; + struct ttm_resource *old_reg = &bo->mem; struct nouveau_drm_tile *new_tile = NULL; int ret = 0; @@ -1374,7 +1374,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) } static int -nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) +nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); struct nvkm_device *device = nvxx_device(&drm->client.device); @@ -1454,7 +1454,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) } static void -nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) +nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_mem *mem = nouveau_mem(reg); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 2a6519737800..b4314c01e313 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -155,7 +155,7 @@ struct nouveau_drm { atomic_t validate_sequence; int (*move)(struct nouveau_channel *, struct ttm_buffer_object *, - struct ttm_mem_reg *, struct ttm_mem_reg *); + struct ttm_resource *, struct ttm_resource *); struct nouveau_channel *chan; struct nvif_object copy; int mtrr; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index c002f8968507..9559f925bb53 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -92,7 +92,7 @@ nouveau_mem_fini(struct nouveau_mem *mem) } int -nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt) +nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt) { struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_cli *cli = mem->cli; @@ -130,7 +130,7 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt) } int -nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page) +nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) { struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_cli *cli = mem->cli; @@ -173,7 +173,7 @@ nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page) } void -nouveau_mem_del(struct ttm_mem_reg *reg) +nouveau_mem_del(struct ttm_resource *reg) { struct nouveau_mem *mem = nouveau_mem(reg); nouveau_mem_fini(mem); @@ -183,7 +183,7 @@ nouveau_mem_del(struct ttm_mem_reg *reg) int nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp, - struct ttm_mem_reg *reg) + struct ttm_resource *reg) { struct nouveau_mem *mem; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h index f6d039e73812..3fe1cfed57a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.h +++ b/drivers/gpu/drm/nouveau/nouveau_mem.h @@ -7,7 +7,7 @@ struct ttm_dma_tt; #include static inline struct nouveau_mem * -nouveau_mem(struct ttm_mem_reg *reg) +nouveau_mem(struct ttm_resource *reg) { return reg->mm_node; } @@ -21,10 +21,10 @@ struct nouveau_mem { }; int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp, - struct ttm_mem_reg *); -void nouveau_mem_del(struct ttm_mem_reg *); -int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page); -int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *); + struct ttm_resource *); +void nouveau_mem_del(struct ttm_resource *); +int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); +int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *); void nouveau_mem_fini(struct nouveau_mem *); int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index feaac908efed..1ec97f5c3cf5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -26,7 +26,7 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm) } static int -nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) +nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_mem *mem = nouveau_mem(reg); @@ -61,7 +61,7 @@ static struct ttm_backend_func nv04_sgdma_backend = { }; static int -nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) +nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_mem *mem = nouveau_mem(reg); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 78b5a87b9855..e6a30865a00b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -32,7 +32,7 @@ #include static void -nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_mem_reg *reg) +nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg) { nouveau_mem_del(reg); } @@ -41,7 +41,7 @@ static int nouveau_vram_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *reg) + struct ttm_resource *reg) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -72,7 +72,7 @@ static int nouveau_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *reg) + struct ttm_resource *reg) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -95,7 +95,7 @@ static int nv04_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *reg) + struct ttm_resource *reg) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 5d613d43b84d..5121124267ff 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan) { struct nv10_fence_priv *priv = chan->drm->fence; struct nv10_fence_chan *fctx; - struct ttm_mem_reg *reg = &priv->bo->bo.mem; + struct ttm_resource *reg = &priv->bo->bo.mem; u32 start = reg->start * PAGE_SIZE; u32 limit = start + reg->size - 1; int ret = 0; diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index a00ecc3de053..d7288691a874 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -37,7 +37,7 @@ nv50_fence_context_new(struct nouveau_channel *chan) { struct nv10_fence_priv *priv = chan->drm->fence; struct nv10_fence_chan *fctx; - struct ttm_mem_reg *reg = &priv->bo->bo.mem; + struct ttm_resource *reg = &priv->bo->bo.mem; u32 start = reg->start * PAGE_SIZE; u32 limit = start + reg->size - 1; int ret; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 9691449aefdb..aae90a9ee1db 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -350,7 +350,7 @@ int qxl_mode_dumb_mmap(struct drm_file *filp, int qxl_ttm_init(struct qxl_device *qdev); void qxl_ttm_fini(struct qxl_device *qdev); int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); /* qxl image */ diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 727049046014..b1ea984f143a 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -71,7 +71,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, } int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct qxl_device *qdev = qxl_get_qdev(bdev); @@ -111,7 +111,7 @@ struct qxl_ttm_tt { }; static int qxl_ttm_backend_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) + struct ttm_resource *bo_mem) { struct qxl_ttm_tt *gtt = (void *)ttm; @@ -164,9 +164,9 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, } static void qxl_move_null(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; @@ -175,9 +175,9 @@ static void qxl_move_null(struct ttm_buffer_object *bo, static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int ret; ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); @@ -193,7 +193,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, static void qxl_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct qxl_bo *qbo; struct qxl_device *qdev; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b7c3fb2bfb54..cc4f58d16589 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2857,7 +2857,7 @@ int radeon_vm_clear_invalids(struct radeon_device *rdev, struct radeon_vm *vm); int radeon_vm_bo_update(struct radeon_device *rdev, struct radeon_bo_va *bo_va, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); void radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo); struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index f3dee01250da..bb7582afd803 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -775,7 +775,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, void radeon_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct radeon_bo *rbo; diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 60275b822f79..44b47241ee42 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -165,7 +165,7 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop); extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 21a01737b1be..3355b69b13d1 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -166,9 +166,9 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) } static void radeon_move_null(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; @@ -177,8 +177,8 @@ static void radeon_move_null(struct ttm_buffer_object *bo, static int radeon_move_blit(struct ttm_buffer_object *bo, bool evict, bool no_wait_gpu, - struct ttm_mem_reg *new_mem, - struct ttm_mem_reg *old_mem) + struct ttm_resource *new_mem, + struct ttm_resource *old_mem) { struct radeon_device *rdev; uint64_t old_start, new_start; @@ -233,11 +233,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, static int radeon_move_vram_ram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource tmp_mem; struct ttm_place placements; struct ttm_placement placement; int r; @@ -278,11 +278,11 @@ out_cleanup: static int radeon_move_ram_vram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource tmp_mem; struct ttm_placement placement; struct ttm_place placements; int r; @@ -315,11 +315,11 @@ out_cleanup: static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct radeon_device *rdev; struct radeon_bo *rbo; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int r; r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); @@ -376,7 +376,7 @@ memcpy: return 0; } -static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { struct radeon_device *rdev = radeon_get_rdev(bdev); @@ -544,7 +544,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) } static int radeon_ttm_backend_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) + struct ttm_resource *bo_mem) { struct radeon_ttm_tt *gtt = (void*)ttm; uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ | diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index f60fae0aed11..71e2c3785ab9 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -911,7 +911,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm, */ int radeon_vm_bo_update(struct radeon_device *rdev, struct radeon_bo_va *bo_va, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct radeon_vm *vm = bo_va->vm; struct radeon_ib ib; diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 6050dc846894..8f24663c3df3 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -48,7 +48,7 @@ struct ttm_agp_backend { struct agp_bridge_data *bridge; }; -static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) +static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index ff68f25ddbd4..ad09329b62d3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -145,7 +145,7 @@ static inline uint32_t ttm_bo_type_flags(unsigned type) } static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *man; @@ -268,7 +268,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, bool evict, + struct ttm_resource *mem, bool evict, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; @@ -642,7 +642,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_reg evict_mem; + struct ttm_resource evict_mem; struct ttm_placement placement; int ret = 0; @@ -841,7 +841,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, static int ttm_bo_mem_get(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); @@ -852,7 +852,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, return man->func->get_node(man, bo, place, mem); } -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); @@ -870,7 +870,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put); */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_resource_manager *man, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, bool no_wait_gpu) { struct dma_fence *fence; @@ -905,7 +905,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, */ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; @@ -986,7 +986,7 @@ static bool ttm_bo_mt_compatible(struct ttm_resource_manager *man, */ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; @@ -1034,7 +1034,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; @@ -1112,7 +1112,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { int ret = 0; - struct ttm_mem_reg mem; + struct ttm_resource mem; dma_resv_assert_held(bo->base.resv); @@ -1138,7 +1138,7 @@ out_unlock: static bool ttm_bo_places_compat(const struct ttm_place *places, unsigned num_placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, uint32_t *new_flags) { unsigned i; @@ -1161,7 +1161,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places, } bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, uint32_t *new_flags) { if (ttm_bo_places_compat(placement->placement, placement->num_placement, @@ -1730,7 +1730,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm->caching_state != tt_cached) { struct ttm_operation_ctx ctx = { false, false }; - struct ttm_mem_reg evict_mem; + struct ttm_resource evict_mem; evict_mem = bo->mem; evict_mem.mm_node = NULL; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 8ef0de8e36c5..496158acd5b9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -52,10 +52,10 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) int ttm_bo_move_ttm(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_tt *ttm = bo->ttm; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { @@ -127,7 +127,7 @@ static int ttm_mem_io_evict(struct ttm_resource_manager *man) } int ttm_mem_io_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; @@ -149,7 +149,7 @@ retry: } void ttm_mem_io_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { if (--mem->bus.io_reserved_count) return; @@ -163,7 +163,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); - struct ttm_mem_reg *mem = &bo->mem; + struct ttm_resource *mem = &bo->mem; int ret; if (mem->bus.io_reserved_vm) @@ -181,7 +181,7 @@ int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) { - struct ttm_mem_reg *mem = &bo->mem; + struct ttm_resource *mem = &bo->mem; if (!mem->bus.io_reserved_vm) return; @@ -191,8 +191,8 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) ttm_mem_io_free(bo->bdev, mem); } -static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem, +static int ttm_resource_ioremap(struct ttm_bo_device *bdev, + struct ttm_resource *mem, void **virtual) { struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); @@ -226,8 +226,8 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, return 0; } -static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem, +static void ttm_resource_iounmap(struct ttm_bo_device *bdev, + struct ttm_resource *mem, void *virtual) { struct ttm_resource_manager *man; @@ -300,13 +300,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_tt *ttm = bo->ttm; - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg old_copy = *old_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; @@ -319,10 +319,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, if (ret) return ret; - ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); + ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap); if (ret) return ret; - ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); + ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap); if (ret) goto out; @@ -390,9 +390,9 @@ out2: } out1: - ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); + ttm_resource_iounmap(bdev, old_mem, new_iomap); out: - ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); + ttm_resource_iounmap(bdev, &old_copy, old_iomap); /* * On error, keep the mm node! @@ -502,7 +502,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, unsigned long size, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_reg *mem = &bo->mem; + struct ttm_resource *mem = &bo->mem; if (bo->mem.bus.addr) { map->bo_kmap_type = ttm_bo_map_premapped; @@ -526,7 +526,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_reg *mem = &bo->mem; + struct ttm_resource *mem = &bo->mem; struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false @@ -631,11 +631,11 @@ EXPORT_SYMBOL(ttm_bo_kunmap); int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int ret; struct ttm_buffer_object *ghost_obj; @@ -692,10 +692,10 @@ EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type); struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index df62177cd913..274a05ca13d3 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -57,7 +57,7 @@ static inline struct ttm_range_manager *to_range_manager(struct ttm_resource_man static int ttm_range_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_range_manager *rman = to_range_manager(man); struct drm_mm *mm = &rman->mm; @@ -96,7 +96,7 @@ static int ttm_range_man_get_node(struct ttm_resource_manager *man, } static void ttm_range_man_put_node(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_range_manager *rman = to_range_manager(man); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index e25d4097aa16..bdd6169cef13 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -322,7 +322,7 @@ void ttm_tt_unbind(struct ttm_tt *ttm) } } -int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, +int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, struct ttm_operation_ctx *ctx) { int ret = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 1e59c019affa..3229451d0706 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -1135,14 +1135,14 @@ void vmw_bo_swap_notify(struct ttm_buffer_object *bo) * vmw_bo_move_notify - TTM move_notify_callback * * @bo: The TTM buffer object about to move. - * @mem: The struct ttm_mem_reg indicating to what memory + * @mem: The struct ttm_resource indicating to what memory * region the move is taking place. * * Detaches cached maps and device bindings that require that the * buffer doesn't move. */ void vmw_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmw_buffer_object *vbo; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index aa763c6b1146..871ad738dadb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -793,7 +793,7 @@ extern void vmw_resource_unreserve(struct vmw_resource *res, struct vmw_buffer_object *new_backup, unsigned long new_backup_offset); extern void vmw_query_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); extern void vmw_resource_evict_all(struct vmw_private *dev_priv); extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); @@ -878,7 +878,7 @@ extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); extern struct vmw_buffer_object * vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c8fe6e9cf092..3fea7a6c7cfa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -53,7 +53,7 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); int id; @@ -85,7 +85,7 @@ nospace: } static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c8441030637a..c0f156078dda 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -855,7 +855,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob) * states from the device. */ void vmw_query_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmw_buffer_object *dx_query_mob; struct ttm_bo_device *bdev = bo->bdev; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 6cac7b091205..f594e2e6ab7e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -29,7 +29,7 @@ static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man) static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, unsigned long align_pages, const struct ttm_place *place, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, unsigned long lpfn, enum drm_mm_insert_mode mode) { @@ -47,7 +47,7 @@ static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, static int vmw_thp_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmw_thp_manager *rman = to_thp_manager(man); struct drm_mm *mm = &rman->mm; @@ -107,7 +107,7 @@ found_unlock: static void vmw_thp_put_node(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmw_thp_manager *rman = to_thp_manager(man); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 01c81e89ed7a..69e7e7fe2a4c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -539,7 +539,7 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) } -static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) +static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); @@ -713,7 +713,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) return vmw_user_bo_verify_access(bo, tfile); } -static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); @@ -743,7 +743,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg * vmw_move_notify - TTM move_notify_callback * * @bo: The TTM buffer object about to move. - * @mem: The struct ttm_mem_reg indicating to what memory + * @mem: The struct ttm_resource indicating to what memory * region the move is taking place. * * Calls move_notify for all subsystems needing it. @@ -751,7 +751,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg */ static void vmw_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { vmw_bo_move_notify(bo, mem); vmw_query_move_notify(bo, mem); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 7b0655bc13da..770ad2195875 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -81,7 +81,7 @@ struct ttm_bus_placement { /** - * struct ttm_mem_reg + * struct ttm_resource * * @mm_node: Memory manager node. * @size: Requested size of memory region. @@ -94,7 +94,7 @@ struct ttm_bus_placement { * buffer object. */ -struct ttm_mem_reg { +struct ttm_resource { void *mm_node; unsigned long start; unsigned long size; @@ -187,7 +187,7 @@ struct ttm_buffer_object { * Members protected by the bo::resv::reserved lock. */ - struct ttm_mem_reg mem; + struct ttm_resource mem; struct file *persistent_swap_storage; struct ttm_tt *ttm; bool evicted; @@ -316,12 +316,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo * * @placement: Return immediately if buffer is busy. - * @mem: The struct ttm_mem_reg indicating the region where the bo resides + * @mem: The struct ttm_resource indicating the region where the bo resides * @new_flags: Describes compatible placement found * * Returns true if the placement is compatible */ -bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem, +bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem, uint32_t *new_flags); /** diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index d17e25ba80d4..eb1c3312e175 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -55,7 +55,7 @@ struct ttm_resource_manager_func { * @bo: Pointer to the buffer object we're allocating space for. * @placement: Placement details. * @flags: Additional placement flags. - * @mem: Pointer to a struct ttm_mem_reg to be filled in. + * @mem: Pointer to a struct ttm_resource to be filled in. * * This function should allocate space in the memory type managed * by @man. Placement details if @@ -79,20 +79,20 @@ struct ttm_resource_manager_func { int (*get_node)(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); /** * struct ttm_resource_manager member put_node * * @man: Pointer to a memory type manager. - * @mem: Pointer to a struct ttm_mem_reg to be filled in. + * @mem: Pointer to a struct ttm_resource to be filled in. * * This function frees memory type resources previously allocated * and that are identified by @mem::mm_node and @mem::start. May not * be called from within atomic context. */ void (*put_node)(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); /** * struct ttm_resource_manager member debug @@ -251,7 +251,7 @@ struct ttm_bo_driver { */ int (*move)(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /** * struct ttm_bo_driver_member verify_access @@ -277,7 +277,7 @@ struct ttm_bo_driver { */ void (*move_notify)(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /* notify the driver we are taking a fault on this BO * and have reserved it */ int (*fault_reserve_notify)(struct ttm_buffer_object *bo); @@ -294,9 +294,9 @@ struct ttm_bo_driver { * are balanced. */ int (*io_mem_reserve)(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); void (*io_mem_free)(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); /** * Return the pfn for a given page_offset inside the BO. @@ -503,15 +503,15 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) */ /** - * ttm_mem_reg_is_pci + * ttm_resource_is_pci * * @bdev: Pointer to a struct ttm_bo_device. - * @mem: A valid struct ttm_mem_reg. + * @mem: A valid struct ttm_resource. * * Returns true if the memory described by @mem is PCI memory, * false otherwise. */ -bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); +bool ttm_resource_is_pci(struct ttm_bo_device *bdev, struct ttm_resource *mem); /** * ttm_bo_mem_space @@ -519,7 +519,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); * @bo: Pointer to a struct ttm_buffer_object. the data of which * we want to allocate space for. * @proposed_placement: Proposed new placement for the buffer object. - * @mem: A struct ttm_mem_reg. + * @mem: A struct ttm_resource. * @interruptible: Sleep interruptible when sliping. * @no_wait_gpu: Return immediately if the GPU is busy. * @@ -534,10 +534,10 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx); -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem); int ttm_bo_device_release(struct ttm_bo_device *bdev); @@ -733,16 +733,16 @@ int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, */ int ttm_mem_io_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); void ttm_mem_io_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); /** * ttm_bo_move_ttm * * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. * @no_wait_gpu: Return immediately if the GPU is busy. - * @new_mem: struct ttm_mem_reg indicating where to move. + * @new_mem: struct ttm_resource indicating where to move. * * Optimized move function for a buffer object with both old and * new placement backed by a TTM. The function will, if successful, @@ -756,7 +756,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, int ttm_bo_move_ttm(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /** * ttm_bo_move_memcpy @@ -764,7 +764,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. * @no_wait_gpu: Return immediately if the GPU is busy. - * @new_mem: struct ttm_mem_reg indicating where to move. + * @new_mem: struct ttm_resource indicating where to move. * * Fallback move function for a mappable buffer object in mappable memory. * The function will, if successful, @@ -778,7 +778,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /** * ttm_bo_free_old_node @@ -795,7 +795,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo); * @bo: A pointer to a struct ttm_buffer_object. * @fence: A fence object that signals when moving is complete. * @evict: This is an evict move. Don't return until the buffer is idle. - * @new_mem: struct ttm_mem_reg indicating where to move. + * @new_mem: struct ttm_resource indicating where to move. * * Accelerated move function to be called when an accelerated move * has been scheduled. The function will create a new temporary buffer object @@ -806,7 +806,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo); */ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /** * ttm_bo_pipeline_move. @@ -814,14 +814,14 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * @bo: A pointer to a struct ttm_buffer_object. * @fence: A fence object that signals when moving is complete. * @evict: This is an evict move. Don't return until the buffer is idle. - * @new_mem: struct ttm_mem_reg indicating where to move. + * @new_mem: struct ttm_resource indicating where to move. * * Function for pipelining accelerated moves. Either free the memory * immediately or hang it on a temporary buffer object. */ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /** * ttm_bo_pipeline_gutting. diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index c0e928abf592..2ac34219ecb5 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -30,7 +30,7 @@ #include struct ttm_tt; -struct ttm_mem_reg; +struct ttm_resource; struct ttm_buffer_object; struct ttm_operation_ctx; @@ -53,14 +53,14 @@ struct ttm_backend_func { * struct ttm_backend_func member bind * * @ttm: Pointer to a struct ttm_tt. - * @bo_mem: Pointer to a struct ttm_mem_reg describing the + * @bo_mem: Pointer to a struct ttm_resource describing the * memory type and location for binding. * * Bind the backend pages into the aperture in the location * indicated by @bo_mem. This function should be able to handle * differences between aperture and system page sizes. */ - int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); + int (*bind) (struct ttm_tt *ttm, struct ttm_resource *bo_mem); /** * struct ttm_backend_func member unbind @@ -179,11 +179,11 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); * ttm_ttm_bind: * * @ttm: The struct ttm_tt containing backing pages. - * @bo_mem: The struct ttm_mem_reg identifying the binding location. + * @bo_mem: The struct ttm_resource identifying the binding location. * * Bind the pages of @ttm to an aperture location identified by @bo_mem */ -int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, +int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, struct ttm_operation_ctx *ctx); /** -- cgit v1.2.3-59-g8ed1b From b2458726b38cb69f3da3677dbdf53e47af0e8792 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 3 Aug 2020 16:25:15 +0200 Subject: drm/ttm: give resource functions their own [ch] files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a separate object we work within TTM. Signed-off-by: Christian König Reviewed-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/384338/?series=80346&rev=1 --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 4 +- drivers/gpu/drm/radeon/radeon_ttm.c | 4 +- drivers/gpu/drm/ttm/Makefile | 3 +- drivers/gpu/drm/ttm/ttm_bo.c | 124 ++------------ drivers/gpu/drm/ttm/ttm_bo_util.c | 4 +- drivers/gpu/drm/ttm/ttm_resource.c | 151 +++++++++++++++++ include/drm/ttm/ttm_bo_api.h | 70 +------- include/drm/ttm/ttm_bo_driver.h | 189 --------------------- include/drm/ttm/ttm_resource.h | 263 +++++++++++++++++++++++++++++ 11 files changed, 446 insertions(+), 376 deletions(-) create mode 100644 drivers/gpu/drm/ttm/ttm_resource.c create mode 100644 include/drm/ttm/ttm_resource.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 43f4966331dd..b36d94f57d42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -381,7 +381,7 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, if (cpu_addr) amdgpu_bo_kunmap(*bo_ptr); - ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); + ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 67d2eef2f9eb..462402fcd1a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -578,7 +578,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, /* move BO (in tmp_mem) to new_mem */ r = ttm_bo_move_ttm(bo, ctx, new_mem); out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); + ttm_resource_free(bo, &tmp_mem); return r; } @@ -625,7 +625,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, goto out_cleanup; } out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); + ttm_resource_free(bo, &tmp_mem); return r; } @@ -1203,11 +1203,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) gtt->offset = (u64)tmp.start << PAGE_SHIFT; r = amdgpu_ttm_gart_bind(adev, bo, flags); if (unlikely(r)) { - ttm_bo_mem_put(bo, &tmp); + ttm_resource_free(bo, &tmp); return r; } - ttm_bo_mem_put(bo, &bo->mem); + ttm_resource_free(bo, &bo->mem); bo->mem = tmp; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 604a74323696..29d7d7e100f7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1191,7 +1191,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ret = ttm_bo_move_ttm(bo, &ctx, new_reg); out: - ttm_bo_mem_put(bo, &tmp_reg); + ttm_resource_free(bo, &tmp_reg); return ret; } @@ -1227,7 +1227,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, goto out; out: - ttm_bo_mem_put(bo, &tmp_reg); + ttm_resource_free(bo, &tmp_reg); return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 3355b69b13d1..31f4cf211b6a 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -271,7 +271,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, } r = ttm_bo_move_ttm(bo, &ctx, new_mem); out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); + ttm_resource_free(bo, &tmp_mem); return r; } @@ -309,7 +309,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, goto out_cleanup; } out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); + ttm_resource_free(bo, &tmp_mem); return r; } diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index e54326e6cea4..90c0da88cc98 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,7 +4,8 @@ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o + ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \ + ttm_resource.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index ae71c3ab6cc4..55890314316b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -77,19 +77,6 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place, return 0; } -void ttm_resource_manager_debug(struct ttm_resource_manager *man, - struct drm_printer *p) -{ - drm_printf(p, " use_type: %d\n", man->use_type); - drm_printf(p, " use_tt: %d\n", man->use_tt); - drm_printf(p, " size: %llu\n", man->size); - drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); - drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); - if (man->func && man->func->debug) - (*man->func->debug)(man, p); -} -EXPORT_SYMBOL(ttm_resource_manager_debug); - static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { @@ -363,7 +350,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ttm_tt_destroy(bo->ttm); bo->ttm = NULL; - ttm_bo_mem_put(bo, &bo->mem); + ttm_resource_free(bo, &bo->mem); } static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) @@ -678,7 +665,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, if (unlikely(ret)) { if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); - ttm_bo_mem_put(bo, &evict_mem); + ttm_resource_free(bo, &evict_mem); goto out; } bo->evicted = true; @@ -767,11 +754,11 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, return r == -EDEADLK ? -EBUSY : r; } -static int ttm_mem_evict_first(struct ttm_bo_device *bdev, - struct ttm_resource_manager *man, - const struct ttm_place *place, - struct ttm_operation_ctx *ctx, - struct ww_acquire_ctx *ticket) +int ttm_mem_evict_first(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man, + const struct ttm_place *place, + struct ttm_operation_ctx *ctx, + struct ww_acquire_ctx *ticket) { struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; bool locked = false; @@ -839,32 +826,6 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, return ret; } -static int ttm_bo_mem_get(struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_resource *mem) -{ - struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); - - mem->mm_node = NULL; - if (!man->func || !man->func->alloc) - return 0; - - return man->func->alloc(man, bo, place, mem); -} - -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem) -{ - struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); - - if (!man->func || !man->func->free) - return; - - man->func->free(man, mem); - mem->mm_node = NULL; - mem->mem_type = TTM_PL_SYSTEM; -} -EXPORT_SYMBOL(ttm_bo_mem_put); - /** * Add the last move fence to the BO and reserve a new shared slot. */ @@ -915,7 +876,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ticket = dma_resv_locking_ctx(bo->base.resv); do { - ret = ttm_bo_mem_get(bo, place, mem); + ret = ttm_resource_alloc(bo, place, mem); if (likely(!ret)) break; if (unlikely(ret != -ENOSPC)) @@ -1056,7 +1017,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, goto error; type_found = true; - ret = ttm_bo_mem_get(bo, place, mem); + ret = ttm_resource_alloc(bo, place, mem); if (ret == -ENOSPC) continue; if (unlikely(ret)) @@ -1065,7 +1026,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, man = ttm_manager_type(bdev, mem->mem_type); ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); if (unlikely(ret)) { - ttm_bo_mem_put(bo, mem); + ttm_resource_free(bo, mem); if (ret == -EBUSY) continue; @@ -1132,7 +1093,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx); out_unlock: if (ret) - ttm_bo_mem_put(bo, &mem); + ttm_resource_free(bo, &mem); return ret; } @@ -1404,52 +1365,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_create); -int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_resource_manager *man) -{ - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false, - .flags = TTM_OPT_FLAG_FORCE_ALLOC - }; - struct ttm_bo_global *glob = &ttm_bo_glob; - struct dma_fence *fence; - int ret; - unsigned i; - - /* - * Can't use standard list traversal since we're unlocking. - */ - - spin_lock(&glob->lru_lock); - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { - while (!list_empty(&man->lru[i])) { - spin_unlock(&glob->lru_lock); - ret = ttm_mem_evict_first(bdev, man, NULL, &ctx, - NULL); - if (ret) - return ret; - spin_lock(&glob->lru_lock); - } - } - spin_unlock(&glob->lru_lock); - - spin_lock(&man->move_lock); - fence = dma_fence_get(man->move); - spin_unlock(&man->move_lock); - - if (fence) { - ret = dma_fence_wait(fence, false); - dma_fence_put(fence); - if (ret) - return ret; - } - - return 0; -} -EXPORT_SYMBOL(ttm_resource_manager_force_list_clean); - - int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); @@ -1468,23 +1383,6 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) } EXPORT_SYMBOL(ttm_bo_evict_mm); -void ttm_resource_manager_init(struct ttm_resource_manager *man, - unsigned long p_size) -{ - unsigned i; - - man->use_io_reserve_lru = false; - mutex_init(&man->io_reserve_mutex); - spin_lock_init(&man->move_lock); - INIT_LIST_HEAD(&man->io_reserve_lru); - man->size = p_size; - - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) - INIT_LIST_HEAD(&man->lru[i]); - man->move = NULL; -} -EXPORT_SYMBOL(ttm_resource_manager_init); - static void ttm_bo_global_kobj_release(struct kobject *kobj) { struct ttm_bo_global *glob = diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 496158acd5b9..12be2d3fcc81 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -47,7 +47,7 @@ struct ttm_transfer_obj { void ttm_bo_free_old_node(struct ttm_buffer_object *bo) { - ttm_bo_mem_put(bo, &bo->mem); + ttm_resource_free(bo, &bo->mem); } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, @@ -398,7 +398,7 @@ out: * On error, keep the mm node! */ if (!ret) - ttm_bo_mem_put(bo, &old_copy); + ttm_resource_free(bo, &old_copy); return ret; } EXPORT_SYMBOL(ttm_bo_move_memcpy); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c new file mode 100644 index 000000000000..33b642532e5c --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -0,0 +1,151 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#include +#include + +int ttm_resource_alloc(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *res) +{ + struct ttm_resource_manager *man = + ttm_manager_type(bo->bdev, res->mem_type); + + res->mm_node = NULL; + if (!man->func || !man->func->alloc) + return 0; + + return man->func->alloc(man, bo, place, res); +} + +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res) +{ + struct ttm_resource_manager *man = + ttm_manager_type(bo->bdev, res->mem_type); + + if (man->func && man->func->free) + man->func->free(man, res); + + res->mm_node = NULL; + res->mem_type = TTM_PL_SYSTEM; +} +EXPORT_SYMBOL(ttm_resource_free); + +/** + * ttm_resource_manager_init + * + * @man: memory manager object to init + * @p_size: size managed area in pages. + * + * Initialise core parts of a manager object. + */ +void ttm_resource_manager_init(struct ttm_resource_manager *man, + unsigned long p_size) +{ + unsigned i; + + man->use_io_reserve_lru = false; + mutex_init(&man->io_reserve_mutex); + spin_lock_init(&man->move_lock); + INIT_LIST_HEAD(&man->io_reserve_lru); + man->size = p_size; + + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + INIT_LIST_HEAD(&man->lru[i]); + man->move = NULL; +} +EXPORT_SYMBOL(ttm_resource_manager_init); + +/* + * ttm_resource_manager_force_list_clean + * + * @bdev - device to use + * @man - manager to use + * + * Force all the objects out of a memory manager until clean. + * Part of memory manager cleanup sequence. + */ +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man) +{ + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false, + .flags = TTM_OPT_FLAG_FORCE_ALLOC + }; + struct ttm_bo_global *glob = &ttm_bo_glob; + struct dma_fence *fence; + int ret; + unsigned i; + + /* + * Can't use standard list traversal since we're unlocking. + */ + + spin_lock(&glob->lru_lock); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + while (!list_empty(&man->lru[i])) { + spin_unlock(&glob->lru_lock); + ret = ttm_mem_evict_first(bdev, man, NULL, &ctx, + NULL); + if (ret) + return ret; + spin_lock(&glob->lru_lock); + } + } + spin_unlock(&glob->lru_lock); + + spin_lock(&man->move_lock); + fence = dma_fence_get(man->move); + spin_unlock(&man->move_lock); + + if (fence) { + ret = dma_fence_wait(fence, false); + dma_fence_put(fence); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ttm_resource_manager_force_list_clean); + +/** + * ttm_resource_manager_debug + * + * @man: manager type to dump. + * @p: printer to use for debug. + */ +void ttm_resource_manager_debug(struct ttm_resource_manager *man, + struct drm_printer *p) +{ + drm_printf(p, " use_type: %d\n", man->use_type); + drm_printf(p, " use_tt: %d\n", man->use_tt); + drm_printf(p, " size: %llu\n", man->size); + drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); + drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); + if (man->func && man->func->debug) + (*man->func->debug)(man, p); +} +EXPORT_SYMBOL(ttm_resource_manager_debug); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 770ad2195875..dbb276abcf6a 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -42,6 +42,8 @@ #include #include +#include "ttm_resource.h" + struct ttm_bo_global; struct ttm_bo_device; @@ -54,57 +56,6 @@ struct ttm_place; struct ttm_lru_bulk_move; -struct ttm_resource_manager; - -/** - * struct ttm_bus_placement - * - * @addr: mapped virtual address - * @base: bus base address - * @is_iomem: is this io memory ? - * @size: size in byte - * @offset: offset from the base address - * @io_reserved_vm: The VM system has a refcount in @io_reserved_count - * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve - * - * Structure indicating the bus placement of an object. - */ -struct ttm_bus_placement { - void *addr; - phys_addr_t base; - unsigned long size; - unsigned long offset; - bool is_iomem; - bool io_reserved_vm; - uint64_t io_reserved_count; -}; - - -/** - * struct ttm_resource - * - * @mm_node: Memory manager node. - * @size: Requested size of memory region. - * @num_pages: Actual size of memory region in pages. - * @page_alignment: Page alignment. - * @placement: Placement flags. - * @bus: Placement on io bus accessible to the CPU - * - * Structure indicating the placement and space resources used by a - * buffer object. - */ - -struct ttm_resource { - void *mm_node; - unsigned long start; - unsigned long size; - unsigned long num_pages; - uint32_t page_alignment; - uint32_t mem_type; - uint32_t placement; - struct ttm_bus_placement bus; -}; - /** * enum ttm_bo_type * @@ -533,17 +484,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_alignment, bool interruptible, struct ttm_buffer_object **p_bo); -/** - * ttm_resource_manager_init - * - * @man: memory manager object to init - * @p_size: size managed area in pages. - * - * Initialise core parts of a manager object. - */ -void ttm_resource_manager_init(struct ttm_resource_manager *man, - unsigned long p_size); - /** * ttm_bo_evict_mm * @@ -680,6 +620,12 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo) return bo->base.dev != NULL; } +int ttm_mem_evict_first(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man, + const struct ttm_place *place, + struct ttm_operation_ctx *ctx, + struct ww_acquire_ctx *ticket); + /* Default number of pre-faulted pages in the TTM fault handler */ #define TTM_BO_VM_NUM_PREFAULT 16 diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index bfdda61edadb..27b4a1e92875 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -43,131 +43,6 @@ #include "ttm_placement.h" #include "ttm_tt.h" -#define TTM_MAX_BO_PRIORITY 4U - -struct ttm_resource_manager; - -struct ttm_resource_manager_func { - /** - * struct ttm_resource_manager_func member alloc - * - * @man: Pointer to a memory type manager. - * @bo: Pointer to the buffer object we're allocating space for. - * @placement: Placement details. - * @flags: Additional placement flags. - * @mem: Pointer to a struct ttm_resource to be filled in. - * - * This function should allocate space in the memory type managed - * by @man. Placement details if - * applicable are given by @placement. If successful, - * @mem::mm_node should be set to a non-null value, and - * @mem::start should be set to a value identifying the beginning - * of the range allocated, and the function should return zero. - * If the memory region accommodate the buffer object, @mem::mm_node - * should be set to NULL, and the function should return 0. - * If a system error occurred, preventing the request to be fulfilled, - * the function should return a negative error code. - * - * Note that @mem::mm_node will only be dereferenced by - * struct ttm_resource_manager functions and optionally by the driver, - * which has knowledge of the underlying type. - * - * This function may not be called from within atomic context, so - * an implementation can and must use either a mutex or a spinlock to - * protect any data structures managing the space. - */ - int (*alloc)(struct ttm_resource_manager *man, - struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_resource *mem); - - /** - * struct ttm_resource_manager_func member free - * - * @man: Pointer to a memory type manager. - * @mem: Pointer to a struct ttm_resource to be filled in. - * - * This function frees memory type resources previously allocated - * and that are identified by @mem::mm_node and @mem::start. May not - * be called from within atomic context. - */ - void (*free)(struct ttm_resource_manager *man, - struct ttm_resource *mem); - - /** - * struct ttm_resource_manager_func member debug - * - * @man: Pointer to a memory type manager. - * @printer: Prefix to be used in printout to identify the caller. - * - * This function is called to print out the state of the memory - * type manager to aid debugging of out-of-memory conditions. - * It may not be called from within atomic context. - */ - void (*debug)(struct ttm_resource_manager *man, - struct drm_printer *printer); -}; - -/** - * struct ttm_resource_manager - * - * @use_type: The memory type is enabled. - * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory - * managed by this memory type. - * @gpu_offset: If used, the GPU offset of the first managed page of - * fixed memory or the first managed location in an aperture. - * @size: Size of the managed region. - * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, - * as defined in ttm_placement_common.h - * @default_caching: The default caching policy used for a buffer object - * placed in this memory type if the user doesn't provide one. - * @func: structure pointer implementing the range manager. See above - * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures - * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions - * reserved by the TTM vm system. - * @io_reserve_lru: Optional lru list for unreserving io mem regions. - * @move_lock: lock for move fence - * static information. bdev::driver::io_mem_free is never used. - * @lru: The lru list for this memory type. - * @move: The fence of the last pipelined move operation. - * - * This structure is used to identify and manage memory types for a device. - */ - - - -struct ttm_resource_manager { - /* - * No protection. Constant from start. - */ - bool use_type; - bool use_tt; - uint64_t size; - uint32_t available_caching; - uint32_t default_caching; - const struct ttm_resource_manager_func *func; - struct mutex io_reserve_mutex; - bool use_io_reserve_lru; - spinlock_t move_lock; - - /* - * Protected by @io_reserve_mutex: - */ - - struct list_head io_reserve_lru; - - /* - * Protected by the global->lru_lock. - */ - - struct list_head lru[TTM_MAX_BO_PRIORITY]; - - /* - * Protected by @move_lock. - */ - struct dma_fence *move; -}; - /** * struct ttm_bo_driver * @@ -537,8 +412,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_resource *mem, struct ttm_operation_ctx *ctx); -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem); - int ttm_bo_device_release(struct ttm_bo_device *bdev); /** @@ -675,59 +548,6 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) dma_resv_unlock(bo->base.resv); } -/** - * ttm_resource_manager_set_used - * - * @man: A memory manager object. - * @used: usage state to set. - * - * Set the manager in use flag. If disabled the manager is no longer - * used for object placement. - */ -static inline void ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used) -{ - man->use_type = used; -} - -/** - * ttm_resource_manager_used - * - * @man: Manager to get used state for - * - * Get the in use flag for a manager. - * Returns: - * true is used, false if not. - */ -static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man) -{ - return man->use_type; -} - -/** - * ttm_resource_manager_cleanup - * - * @man: A memory manager object. - * - * Cleanup the move fences from the memory manager object. - */ -static inline void ttm_resource_manager_cleanup(struct ttm_resource_manager *man) -{ - dma_fence_put(man->move); - man->move = NULL; -} - -/* - * ttm_resource_manager_force_list_clean - * - * @bdev - device to use - * @man - manager to use - * - * Force all the objects out of a memory manager until clean. - * Part of memory manager cleanup sequence. - */ -int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_resource_manager *man); - /* * ttm_bo_util.c */ @@ -874,13 +694,4 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, int ttm_range_man_fini(struct ttm_bo_device *bdev, unsigned type); -/** - * ttm_resource_manager_debug - * - * @man: manager type to dump. - * @p: printer to use for debug. - */ -void ttm_resource_manager_debug(struct ttm_resource_manager *man, - struct drm_printer *p); - #endif diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h new file mode 100644 index 000000000000..bac22a56f6cd --- /dev/null +++ b/include/drm/ttm/ttm_resource.h @@ -0,0 +1,263 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_RESOURCE_H_ +#define _TTM_RESOURCE_H_ + +#include +#include +#include +#include + +#define TTM_MAX_BO_PRIORITY 4U + +struct ttm_bo_device; +struct ttm_resource_manager; +struct ttm_resource; +struct ttm_place; +struct ttm_buffer_object; + +struct ttm_resource_manager_func { + /** + * struct ttm_resource_manager_func member alloc + * + * @man: Pointer to a memory type manager. + * @bo: Pointer to the buffer object we're allocating space for. + * @placement: Placement details. + * @flags: Additional placement flags. + * @mem: Pointer to a struct ttm_resource to be filled in. + * + * This function should allocate space in the memory type managed + * by @man. Placement details if + * applicable are given by @placement. If successful, + * @mem::mm_node should be set to a non-null value, and + * @mem::start should be set to a value identifying the beginning + * of the range allocated, and the function should return zero. + * If the memory region accommodate the buffer object, @mem::mm_node + * should be set to NULL, and the function should return 0. + * If a system error occurred, preventing the request to be fulfilled, + * the function should return a negative error code. + * + * Note that @mem::mm_node will only be dereferenced by + * struct ttm_resource_manager functions and optionally by the driver, + * which has knowledge of the underlying type. + * + * This function may not be called from within atomic context, so + * an implementation can and must use either a mutex or a spinlock to + * protect any data structures managing the space. + */ + int (*alloc)(struct ttm_resource_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *mem); + + /** + * struct ttm_resource_manager_func member free + * + * @man: Pointer to a memory type manager. + * @mem: Pointer to a struct ttm_resource to be filled in. + * + * This function frees memory type resources previously allocated + * and that are identified by @mem::mm_node and @mem::start. May not + * be called from within atomic context. + */ + void (*free)(struct ttm_resource_manager *man, + struct ttm_resource *mem); + + /** + * struct ttm_resource_manager_func member debug + * + * @man: Pointer to a memory type manager. + * @printer: Prefix to be used in printout to identify the caller. + * + * This function is called to print out the state of the memory + * type manager to aid debugging of out-of-memory conditions. + * It may not be called from within atomic context. + */ + void (*debug)(struct ttm_resource_manager *man, + struct drm_printer *printer); +}; + +/** + * struct ttm_resource_manager + * + * @use_type: The memory type is enabled. + * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory + * managed by this memory type. + * @gpu_offset: If used, the GPU offset of the first managed page of + * fixed memory or the first managed location in an aperture. + * @size: Size of the managed region. + * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, + * as defined in ttm_placement_common.h + * @default_caching: The default caching policy used for a buffer object + * placed in this memory type if the user doesn't provide one. + * @func: structure pointer implementing the range manager. See above + * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures + * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions + * reserved by the TTM vm system. + * @io_reserve_lru: Optional lru list for unreserving io mem regions. + * @move_lock: lock for move fence + * static information. bdev::driver::io_mem_free is never used. + * @lru: The lru list for this memory type. + * @move: The fence of the last pipelined move operation. + * + * This structure is used to identify and manage memory types for a device. + */ +struct ttm_resource_manager { + /* + * No protection. Constant from start. + */ + bool use_type; + bool use_tt; + uint64_t size; + uint32_t available_caching; + uint32_t default_caching; + const struct ttm_resource_manager_func *func; + struct mutex io_reserve_mutex; + bool use_io_reserve_lru; + spinlock_t move_lock; + + /* + * Protected by @io_reserve_mutex: + */ + + struct list_head io_reserve_lru; + + /* + * Protected by the global->lru_lock. + */ + + struct list_head lru[TTM_MAX_BO_PRIORITY]; + + /* + * Protected by @move_lock. + */ + struct dma_fence *move; +}; + +/** + * struct ttm_bus_placement + * + * @addr: mapped virtual address + * @base: bus base address + * @is_iomem: is this io memory ? + * @size: size in byte + * @offset: offset from the base address + * @io_reserved_vm: The VM system has a refcount in @io_reserved_count + * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve + * + * Structure indicating the bus placement of an object. + */ +struct ttm_bus_placement { + void *addr; + phys_addr_t base; + unsigned long size; + unsigned long offset; + bool is_iomem; + bool io_reserved_vm; + uint64_t io_reserved_count; +}; + +/** + * struct ttm_resource + * + * @mm_node: Memory manager node. + * @size: Requested size of memory region. + * @num_pages: Actual size of memory region in pages. + * @page_alignment: Page alignment. + * @placement: Placement flags. + * @bus: Placement on io bus accessible to the CPU + * + * Structure indicating the placement and space resources used by a + * buffer object. + */ +struct ttm_resource { + void *mm_node; + unsigned long start; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint32_t placement; + struct ttm_bus_placement bus; +}; + +/** + * ttm_resource_manager_set_used + * + * @man: A memory manager object. + * @used: usage state to set. + * + * Set the manager in use flag. If disabled the manager is no longer + * used for object placement. + */ +static inline void +ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used) +{ + man->use_type = used; +} + +/** + * ttm_resource_manager_used + * + * @man: Manager to get used state for + * + * Get the in use flag for a manager. + * Returns: + * true is used, false if not. + */ +static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man) +{ + return man->use_type; +} + +/** + * ttm_resource_manager_cleanup + * + * @man: A memory manager object. + * + * Cleanup the move fences from the memory manager object. + */ +static inline void +ttm_resource_manager_cleanup(struct ttm_resource_manager *man) +{ + dma_fence_put(man->move); + man->move = NULL; +} + +int ttm_resource_alloc(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *res); +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res); + +void ttm_resource_manager_init(struct ttm_resource_manager *man, + unsigned long p_size); + +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man); + +void ttm_resource_manager_debug(struct ttm_resource_manager *man, + struct drm_printer *p); + +#endif -- cgit v1.2.3-59-g8ed1b