aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c500
1 files changed, 383 insertions, 117 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 871599826773..5d252fb27a82 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -81,6 +81,7 @@ struct ttm_page_pool {
char *name;
unsigned long nfrees;
unsigned long nrefills;
+ unsigned int order;
};
/**
@@ -95,7 +96,7 @@ struct ttm_pool_opts {
unsigned small;
};
-#define NUM_POOLS 4
+#define NUM_POOLS 6
/**
* struct ttm_pool_manager - Holds memory pools for fst allocation
@@ -122,6 +123,8 @@ struct ttm_pool_manager {
struct ttm_page_pool uc_pool;
struct ttm_page_pool wc_pool_dma32;
struct ttm_page_pool uc_pool_dma32;
+ struct ttm_page_pool wc_pool_huge;
+ struct ttm_page_pool uc_pool_huge;
} ;
};
};
@@ -220,6 +223,17 @@ static struct kobj_type ttm_pool_kobj_type = {
static struct ttm_pool_manager *_manager;
#ifndef CONFIG_X86
+static int set_pages_wb(struct page *page, int numpages)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ int i;
+
+ for (i = 0; i < numpages; i++)
+ unmap_page_from_agp(page++);
+#endif
+ return 0;
+}
+
static int set_pages_array_wb(struct page **pages, int addrinarray)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -256,8 +270,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray)
/**
* Select the right pool or requested caching state and ttm flags. */
-static struct ttm_page_pool *ttm_get_pool(int flags,
- enum ttm_caching_state cstate)
+static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
+ enum ttm_caching_state cstate)
{
int pool_index;
@@ -269,20 +283,36 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
else
pool_index = 0x1;
- if (flags & TTM_PAGE_FLAG_DMA32)
+ if (flags & TTM_PAGE_FLAG_DMA32) {
+ if (huge)
+ return NULL;
pool_index |= 0x2;
+ } else if (huge) {
+ pool_index |= 0x4;
+ }
+
return &_manager->pools[pool_index];
}
/* set memory back to wb and free the pages. */
-static void ttm_pages_put(struct page *pages[], unsigned npages)
+static void ttm_pages_put(struct page *pages[], unsigned npages,
+ unsigned int order)
{
- unsigned i;
- if (set_pages_array_wb(pages, npages))
- pr_err("Failed to set %d pages to wb!\n", npages);
- for (i = 0; i < npages; ++i)
- __free_page(pages[i]);
+ unsigned int i, pages_nr = (1 << order);
+
+ if (order == 0) {
+ if (set_pages_array_wb(pages, npages))
+ pr_err("Failed to set %d pages to wb!\n", npages);
+ }
+
+ for (i = 0; i < npages; ++i) {
+ if (order > 0) {
+ if (set_pages_wb(pages[i], pages_nr))
+ pr_err("Failed to set %d pages to wb!\n", pages_nr);
+ }
+ __free_pages(pages[i], order);
+ }
}
static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
@@ -321,7 +351,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
- pr_err("Failed to allocate memory for pool free operation\n");
+ pr_debug("Failed to allocate memory for pool free operation\n");
return 0;
}
@@ -345,7 +375,7 @@ restart:
*/
spin_unlock_irqrestore(&pool->lock, irq_flags);
- ttm_pages_put(pages_to_free, freed_pages);
+ ttm_pages_put(pages_to_free, freed_pages, pool->order);
if (likely(nr_free != FREE_ALL_PAGES))
nr_free -= freed_pages;
@@ -380,7 +410,7 @@ restart:
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (freed_pages)
- ttm_pages_put(pages_to_free, freed_pages);
+ ttm_pages_put(pages_to_free, freed_pages, pool->order);
out:
if (pages_to_free != static_buf)
kfree(pages_to_free);
@@ -404,6 +434,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;
+ unsigned int nr_free_pool;
if (!mutex_trylock(&lock))
return SHRINK_STOP;
@@ -411,12 +442,20 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
+ unsigned page_nr;
+
if (shrink_pages == 0)
break;
+
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+ page_nr = (1 << pool->order);
/* OK to use static buffer since global mutex is held. */
- shrink_pages = ttm_page_pool_free(pool, nr_free, true);
- freed += nr_free - shrink_pages;
+ nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
+ shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
+ freed += (nr_free_pool - shrink_pages) << pool->order;
+ if (freed >= sc->nr_to_scan)
+ break;
+ shrink_pages <<= pool->order;
}
mutex_unlock(&lock);
return freed;
@@ -428,9 +467,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned i;
unsigned long count = 0;
+ struct ttm_page_pool *pool;
- for (i = 0; i < NUM_POOLS; ++i)
- count += _manager->pools[i].npages;
+ for (i = 0; i < NUM_POOLS; ++i) {
+ pool = &_manager->pools[i];
+ count += (pool->npages << pool->order);
+ }
return count;
}
@@ -494,28 +536,29 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
* pages returned in pages array.
*/
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
- int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+ int ttm_flags, enum ttm_caching_state cstate,
+ unsigned count, unsigned order)
{
struct page **caching_array;
struct page *p;
int r = 0;
- unsigned i, cpages;
- unsigned max_cpages = min(count,
- (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+ unsigned i, j, cpages;
+ unsigned npages = 1 << order;
+ unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- pr_err("Unable to allocate table for new pages\n");
+ pr_debug("Unable to allocate table for new pages\n");
return -ENOMEM;
}
for (i = 0, cpages = 0; i < count; ++i) {
- p = alloc_page(gfp_flags);
+ p = alloc_pages(gfp_flags, order);
if (!p) {
- pr_err("Unable to get page %u\n", i);
+ pr_debug("Unable to get page %u\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
@@ -531,14 +574,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
goto out;
}
+ list_add(&p->lru, pages);
+
#ifdef CONFIG_HIGHMEM
/* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case
*/
- if (!PageHighMem(p))
+ if (PageHighMem(p))
+ continue;
+
#endif
- {
- caching_array[cpages++] = p;
+ for (j = 0; j < npages; ++j) {
+ caching_array[cpages++] = p++;
if (cpages == max_cpages) {
r = ttm_set_pages_caching(caching_array,
@@ -552,8 +599,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
cpages = 0;
}
}
-
- list_add(&p->lru, pages);
}
if (cpages) {
@@ -573,9 +618,9 @@ out:
* Fill the given pool if there aren't enough pages and the requested number of
* pages is small.
*/
-static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
- int ttm_flags, enum ttm_caching_state cstate, unsigned count,
- unsigned long *irq_flags)
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count, unsigned long *irq_flags)
{
struct page *p;
int r;
@@ -605,7 +650,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
INIT_LIST_HEAD(&new_pages);
r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
- cstate, alloc_size);
+ cstate, alloc_size, 0);
spin_lock_irqsave(&pool->lock, *irq_flags);
if (!r) {
@@ -613,7 +658,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
- pr_err("Failed to fill pool (%p)\n", pool);
+ pr_debug("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &new_pages, lru) {
++cpages;
@@ -627,22 +672,25 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
}
/**
- * Cut 'count' number of pages from the pool and put them on the return list.
+ * Allocate pages from the pool and put them on the return list.
*
- * @return count of pages still required to fulfill the request.
+ * @return zero for success or negative error code.
*/
-static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages,
- int ttm_flags,
- enum ttm_caching_state cstate,
- unsigned count)
+static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+ struct list_head *pages,
+ int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count, unsigned order)
{
unsigned long irq_flags;
struct list_head *p;
unsigned i;
+ int r = 0;
spin_lock_irqsave(&pool->lock, irq_flags);
- ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+ if (!order)
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
+ &irq_flags);
if (count >= pool->npages) {
/* take all pages from the pool */
@@ -672,32 +720,128 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
count = 0;
out:
spin_unlock_irqrestore(&pool->lock, irq_flags);
- return count;
+
+ /* clear the pages coming from the pool if requested */
+ if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ struct page *page;
+
+ list_for_each_entry(page, pages, lru) {
+ if (PageHighMem(page))
+ clear_highpage(page);
+ else
+ clear_page(page_address(page));
+ }
+ }
+
+ /* If pool didn't have enough pages allocate new one. */
+ if (count) {
+ gfp_t gfp_flags = pool->gfp_flags;
+
+ /* set zero flag for page allocation if required */
+ if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ /* ttm_alloc_new_pages doesn't reference pool so we can run
+ * multiple requests in parallel.
+ **/
+ r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
+ count, order);
+ }
+
+ return r;
}
/* Put all pages in pages list to correct pool to wait for reuse */
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
+#endif
unsigned long irq_flags;
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
unsigned i;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
- for (i = 0; i < npages; i++) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- __free_page(pages[i]);
- pages[i] = NULL;
+ i = 0;
+ while (i < npages) {
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct page *p = pages[i];
+#endif
+ unsigned order = 0, j;
+
+ if (!pages[i]) {
+ ++i;
+ continue;
+ }
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (!(flags & TTM_PAGE_FLAG_DMA32)) {
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ if (p++ != pages[i + j])
+ break;
+
+ if (j == HPAGE_PMD_NR)
+ order = HPAGE_PMD_ORDER;
+ }
+#endif
+
+ if (page_count(pages[i]) != 1)
+ pr_err("Erroneous page count. Leaking pages.\n");
+ __free_pages(pages[i], order);
+
+ j = 1 << order;
+ while (j) {
+ pages[i++] = NULL;
+ --j;
}
}
return;
}
+ i = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (huge) {
+ unsigned max_size, n2free;
+
+ spin_lock_irqsave(&huge->lock, irq_flags);
+ while (i < npages) {
+ struct page *p = pages[i];
+ unsigned j;
+
+ if (!p)
+ break;
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ if (p++ != pages[i + j])
+ break;
+
+ if (j != HPAGE_PMD_NR)
+ break;
+
+ list_add_tail(&pages[i]->lru, &huge->list);
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[i++] = NULL;
+ huge->npages++;
+ }
+
+ /* Check that we don't go over the pool limit */
+ max_size = _manager->options.max_size;
+ max_size /= HPAGE_PMD_NR;
+ if (huge->npages > max_size)
+ n2free = huge->npages - max_size;
+ else
+ n2free = 0;
+ spin_unlock_irqrestore(&huge->lock, irq_flags);
+ if (n2free)
+ ttm_page_pool_free(huge, n2free, false);
+ }
+#endif
+
spin_lock_irqsave(&pool->lock, irq_flags);
- for (i = 0; i < npages; i++) {
+ while (i < npages) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
pr_err("Erroneous page count. Leaking pages.\n");
@@ -705,6 +849,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
pages[i] = NULL;
pool->npages++;
}
+ ++i;
}
/* Check that we don't go over the pool limit */
npages = 0;
@@ -727,82 +872,117 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
+#endif
struct list_head plist;
struct page *p = NULL;
- gfp_t gfp_flags = GFP_USER;
- unsigned count;
+ unsigned count, first;
int r;
- /* set zero flag for page allocation if required */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
/* No pool for cached pages */
if (pool == NULL) {
+ gfp_t gfp_flags = GFP_USER;
+ unsigned i;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ unsigned j;
+#endif
+
+ /* set zero flag for page allocation if required */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32;
else
gfp_flags |= GFP_HIGHUSER;
- for (r = 0; r < npages; ++r) {
+ i = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (!(gfp_flags & GFP_DMA32)) {
+ while (npages >= HPAGE_PMD_NR) {
+ gfp_t huge_flags = gfp_flags;
+
+ huge_flags |= GFP_TRANSHUGE;
+ huge_flags &= ~__GFP_MOVABLE;
+ huge_flags &= ~__GFP_COMP;
+ p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+ if (!p)
+ break;
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[i++] = p++;
+
+ npages -= HPAGE_PMD_NR;
+ }
+ }
+#endif
+
+ first = i;
+ while (npages) {
p = alloc_page(gfp_flags);
if (!p) {
-
- pr_err("Unable to allocate page\n");
+ pr_debug("Unable to allocate page\n");
return -ENOMEM;
}
- pages[r] = p;
+ /* Swap the pages if we detect consecutive order */
+ if (i > first && pages[i - 1] == p - 1)
+ swap(p, pages[i - 1]);
+
+ pages[i++] = p;
+ --npages;
}
return 0;
}
- /* combine zero flag to pool flags */
- gfp_flags |= pool->gfp_flags;
-
- /* First we take pages from the pool */
- INIT_LIST_HEAD(&plist);
- npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
- /* clear the pages coming from the pool if requested */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (huge && npages >= HPAGE_PMD_NR) {
+ INIT_LIST_HEAD(&plist);
+ ttm_page_pool_get_pages(huge, &plist, flags, cstate,
+ npages / HPAGE_PMD_NR,
+ HPAGE_PMD_ORDER);
+
list_for_each_entry(p, &plist, lru) {
- if (PageHighMem(p))
- clear_highpage(p);
- else
- clear_page(page_address(p));
+ unsigned j;
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[count++] = &p[j];
}
}
+#endif
- /* If pool didn't have enough pages allocate new one. */
- if (npages > 0) {
- /* ttm_alloc_new_pages doesn't reference pool so we can run
- * multiple requests in parallel.
- **/
- INIT_LIST_HEAD(&plist);
- r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
- if (r) {
- /* If there is any pages in the list put them back to
- * the pool. */
- pr_err("Failed to allocate extra pages for large request\n");
- ttm_put_pages(pages, count, flags, cstate);
- return r;
- }
+ INIT_LIST_HEAD(&plist);
+ r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
+ npages - count, 0);
+
+ first = count;
+ list_for_each_entry(p, &plist, lru) {
+ struct page *tmp = p;
+
+ /* Swap the pages if we detect consecutive order */
+ if (count > first && pages[count - 1] == tmp - 1)
+ swap(tmp, pages[count - 1]);
+ pages[count++] = tmp;
+ }
+
+ if (r) {
+ /* If there is any pages in the list put them back to
+ * the pool.
+ */
+ pr_debug("Failed to allocate extra pages for large request\n");
+ ttm_put_pages(pages, count, flags, cstate);
+ return r;
}
return 0;
}
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
- char *name)
+ char *name, unsigned int order)
{
spin_lock_init(&pool->lock);
pool->fill_lock = false;
@@ -810,27 +990,43 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
pool->npages = pool->nfrees = 0;
pool->gfp_flags = flags;
pool->name = name;
+ pool->order = order;
}
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
int ret;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ unsigned order = HPAGE_PMD_ORDER;
+#else
+ unsigned order = 0;
+#endif
WARN_ON(_manager);
pr_info("Initializing pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ return -ENOMEM;
- ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+ ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
- ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
+ ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
- GFP_USER | GFP_DMA32, "wc dma");
+ GFP_USER | GFP_DMA32, "wc dma", 0);
ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
- GFP_USER | GFP_DMA32, "uc dma");
+ GFP_USER | GFP_DMA32, "uc dma", 0);
+
+ ttm_page_pool_init_locked(&_manager->wc_pool_huge,
+ GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
+ "wc huge", order);
+
+ ttm_page_pool_init_locked(&_manager->uc_pool_huge,
+ GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
+ , "uc huge", order);
_manager->options.max_size = max_pages;
_manager->options.small = SMALL_ALLOCATION;
@@ -873,17 +1069,16 @@ int ttm_pool_populate(struct ttm_tt *ttm)
if (ttm->state != tt_unpopulated)
return 0;
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_get_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- if (ret != 0) {
- ttm_pool_unpopulate(ttm);
- return -ENOMEM;
- }
+ ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+ ttm->caching_state);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return ret;
+ }
+ for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- false, false);
+ PAGE_SIZE);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
@@ -908,18 +1103,89 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
unsigned i;
for (i = 0; i < ttm->num_pages; ++i) {
- if (ttm->pages[i]) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
- ttm_put_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- }
+ if (!ttm->pages[i])
+ continue;
+
+ ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i],
+ PAGE_SIZE);
}
+ ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+ ttm->caching_state);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
+{
+ unsigned i, j;
+ int r;
+
+ r = ttm_pool_populate(&tt->ttm);
+ if (r)
+ return r;
+
+ for (i = 0; i < tt->ttm.num_pages; ++i) {
+ struct page *p = tt->ttm.pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < tt->ttm.num_pages; ++j) {
+ if (++p != tt->ttm.pages[j])
+ break;
+
+ ++num_pages;
+ }
+
+ tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
+ 0, num_pages * PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, tt->dma_address[i])) {
+ while (i--) {
+ dma_unmap_page(dev, tt->dma_address[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ tt->dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(&tt->ttm);
+ return -EFAULT;
+ }
+
+ for (j = 1; j < num_pages; ++j) {
+ tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
+ ++i;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_populate_and_map_pages);
+
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
+{
+ unsigned i, j;
+
+ for (i = 0; i < tt->ttm.num_pages;) {
+ struct page *p = tt->ttm.pages[i];
+ size_t num_pages = 1;
+
+ if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
+ ++i;
+ continue;
+ }
+
+ for (j = i + 1; j < tt->ttm.num_pages; ++j) {
+ if (++p != tt->ttm.pages[j])
+ break;
+
+ ++num_pages;
+ }
+
+ dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ i += num_pages;
+ }
+ ttm_pool_unpopulate(&tt->ttm);
+}
+EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
+
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
@@ -929,12 +1195,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
seq_printf(m, "No pool allocator running.\n");
return 0;
}
- seq_printf(m, "%6s %12s %13s %8s\n",
+ seq_printf(m, "%7s %12s %13s %8s\n",
h[0], h[1], h[2], h[3]);
for (i = 0; i < NUM_POOLS; ++i) {
p = &_manager->pools[i];
- seq_printf(m, "%6s %12ld %13ld %8d\n",
+ seq_printf(m, "%7s %12ld %13ld %8d\n",
p->name, p->nrefills,
p->nfrees, p->npages);
}