aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc_dma.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c99
1 files changed, 54 insertions, 45 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 6b2627fe9bc1..a88051552ace 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -61,6 +61,7 @@
#define SMALL_ALLOCATION 4
#define FREE_ALL_PAGES (~0U)
#define VADDR_FLAG_HUGE_POOL 1UL
+#define VADDR_FLAG_UPDATED_COUNT 2UL
enum pool_type {
IS_UNDEFINED = 0,
@@ -333,14 +334,18 @@ static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
{
struct dma_page *d_page;
+ unsigned long attrs = 0;
void *vaddr;
d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
if (!d_page)
return NULL;
- vaddr = dma_alloc_coherent(pool->dev, pool->size, &d_page->dma,
- pool->gfp_flags);
+ if (pool->type & IS_HUGE)
+ attrs = DMA_ATTR_NO_WARN;
+
+ vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
+ pool->gfp_flags, attrs);
if (vaddr) {
if (is_vmalloc_addr(vaddr))
d_page->p = vmalloc_to_page(vaddr);
@@ -870,18 +875,18 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
}
/*
- * @return count of pages still required to fulfill the request.
* The populate list is actually a stack (not that is matters as TTM
* allocates one page at a time.
+ * return dma_page pointer if success, otherwise NULL.
*/
-static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
struct ttm_dma_tt *ttm_dma,
unsigned index)
{
- struct dma_page *d_page;
+ struct dma_page *d_page = NULL;
struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long irq_flags;
- int count, r = -ENOMEM;
+ int count;
spin_lock_irqsave(&pool->lock, irq_flags);
count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
@@ -890,12 +895,11 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
ttm->pages[index] = d_page->p;
ttm_dma->dma_address[index] = d_page->dma;
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
- r = 0;
pool->npages_in_use += 1;
pool->npages_free -= 1;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
- return r;
+ return d_page;
}
static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
@@ -923,12 +927,14 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
+ struct dma_page *d_page;
enum pool_type type;
unsigned i;
int ret;
@@ -957,17 +963,18 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
while (num_pages >= HPAGE_PMD_NR) {
unsigned j;
- ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
- if (ret != 0)
+ d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (!d_page)
break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size);
+ pool->size, ctx);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
+ d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
ttm->pages[j] = ttm->pages[j - 1] + 1;
ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
@@ -991,19 +998,20 @@ skip_huge:
}
while (num_pages) {
- ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
- if (ret != 0) {
+ d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (!d_page) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size);
+ pool->size, ctx);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
+ d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
++i;
--num_pages;
}
@@ -1044,8 +1052,11 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
continue;
count++;
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- d_page->p, pool->size);
+ if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p, pool->size);
+ d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
+ }
ttm_dma_page_put(pool, d_page);
}
@@ -1065,9 +1076,19 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* make sure pages array match list and count number of pages */
count = 0;
- list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+ list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
+ page_list) {
ttm->pages[count] = d_page->p;
count++;
+
+ if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p, pool->size);
+ d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
+ }
+
+ if (is_cached)
+ ttm_dma_page_put(pool, d_page);
}
spin_lock_irqsave(&pool->lock, irq_flags);
@@ -1087,19 +1108,6 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (is_cached) {
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- d_page->p, pool->size);
- ttm_dma_page_put(pool, d_page);
- }
- } else {
- for (i = 0; i < count; i++) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i], pool->size);
- }
- }
-
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
@@ -1177,12 +1185,12 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
return count;
}
-static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
- register_shrinker(&manager->mm_shrink);
+ return register_shrinker(&manager->mm_shrink);
}
static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
@@ -1192,7 +1200,7 @@ static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
- int ret = -ENOMEM;
+ int ret;
WARN_ON(_manager);
@@ -1200,7 +1208,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
if (!_manager)
- goto err;
+ return -ENOMEM;
mutex_init(&_manager->lock);
INIT_LIST_HEAD(&_manager->pools);
@@ -1212,13 +1220,17 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
/* This takes care of auto-freeing the _manager */
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
&glob->kobj, "dma_pool");
- if (unlikely(ret != 0)) {
- kobject_put(&_manager->kobj);
- goto err;
- }
- ttm_dma_pool_mm_shrink_init(_manager);
+ if (unlikely(ret != 0))
+ goto error;
+
+ ret = ttm_dma_pool_mm_shrink_init(_manager);
+ if (unlikely(ret != 0))
+ goto error;
return 0;
-err:
+
+error:
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
return ret;
}
@@ -1244,15 +1256,12 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct device_pools *p;
struct dma_pool *pool = NULL;
- char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
- "name", "virt", "busaddr"};
if (!_manager) {
seq_printf(m, "No pool allocator running.\n");
return 0;
}
- seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
- h[0], h[1], h[2], h[3], h[4], h[5]);
+ seq_printf(m, " pool refills pages freed inuse available name\n");
mutex_lock(&_manager->lock);
list_for_each_entry(p, &_manager->pools, pools) {
struct device *dev = p->dev;