aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/dma/pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/dma/pool.c')
-rw-r--r--kernel/dma/pool.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index 35bb51c31fff..39ca26fa41b5 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -175,10 +175,9 @@ static int __init dma_atomic_pool_init(void)
* sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
*/
if (!atomic_pool_size) {
- atomic_pool_size = max(totalram_pages() >> PAGE_SHIFT, 1UL) *
- SZ_128K;
- atomic_pool_size = min_t(size_t, atomic_pool_size,
- 1 << (PAGE_SHIFT + MAX_ORDER-1));
+ unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
+ pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
+ atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
}
INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
@@ -240,12 +239,16 @@ void *dma_alloc_from_pool(struct device *dev, size_t size,
}
val = gen_pool_alloc(pool, size);
- if (val) {
+ if (likely(val)) {
phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
*ret_page = pfn_to_page(__phys_to_pfn(phys));
ptr = (void *)val;
memset(ptr, 0, size);
+ } else {
+ WARN_ONCE(1, "DMA coherent pool depleted, increase size "
+ "(recommended min coherent_pool=%zuK)\n",
+ gen_pool_size(pool) >> 9);
}
if (gen_pool_avail(pool) < atomic_pool_size)
schedule_work(&atomic_pool_work);