aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-17 11:29:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-17 11:29:37 -0700
commit1b5044021070efa3259f3e9548dc35d1eb6aa844 (patch)
tree8c63c3f2a4dd2a52f024457eec9ca56422eb68fe /kernel
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net (diff)
parentdma-pool: decouple DMA_REMAP from DMA_COHERENT_POOL (diff)
downloadwireguard-linux-1b5044021070efa3259f3e9548dc35d1eb6aa844.tar.xz
wireguard-linux-1b5044021070efa3259f3e9548dc35d1eb6aa844.zip
Merge tag 'dma-mapping-5.8-3' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: "Fixes for the SEV atomic pool (Geert Uytterhoeven and David Rientjes)" * tag 'dma-mapping-5.8-3' of git://git.infradead.org/users/hch/dma-mapping: dma-pool: decouple DMA_REMAP from DMA_COHERENT_POOL dma-pool: fix too large DMA pools on medium memory size systems
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/Kconfig10
-rw-r--r--kernel/dma/pool.c7
2 files changed, 8 insertions, 9 deletions
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index d006668c0027..a0ce3c1494fd 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -73,18 +73,18 @@ config SWIOTLB
config DMA_NONCOHERENT_MMAP
bool
+config DMA_COHERENT_POOL
+ bool
+
config DMA_REMAP
+ bool
depends on MMU
select GENERIC_ALLOCATOR
select DMA_NONCOHERENT_MMAP
- bool
-
-config DMA_COHERENT_POOL
- bool
- select DMA_REMAP
config DMA_DIRECT_REMAP
bool
+ select DMA_REMAP
select DMA_COHERENT_POOL
config DMA_CMA
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index 35bb51c31fff..8cfa01243ed2 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -175,10 +175,9 @@ static int __init dma_atomic_pool_init(void)
* sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
*/
if (!atomic_pool_size) {
- atomic_pool_size = max(totalram_pages() >> PAGE_SHIFT, 1UL) *
- SZ_128K;
- atomic_pool_size = min_t(size_t, atomic_pool_size,
- 1 << (PAGE_SHIFT + MAX_ORDER-1));
+ unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
+ pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
+ atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
}
INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);