diff options
Diffstat (limited to '')
-rw-r--r-- | mm/mempool.c | 35 |
1 files changed, 17 insertions, 18 deletions
diff --git a/mm/mempool.c b/mm/mempool.c index 85efab3da720..96488b13a1ef 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -17,7 +17,6 @@ #include <linux/kmemleak.h> #include <linux/export.h> #include <linux/mempool.h> -#include <linux/blkdev.h> #include <linux/writeback.h> #include "slab.h" @@ -58,11 +57,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size) static void check_element(mempool_t *pool, void *element) { /* Mempools backed by slab allocator */ - if (pool->free == mempool_free_slab || pool->free == mempool_kfree) + if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { __check_element(pool, element, ksize(element)); - - /* Mempools backed by page allocator */ - if (pool->free == mempool_free_pages) { + } else if (pool->free == mempool_free_pages) { + /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; void *addr = kmap_atomic((struct page *)element); @@ -82,11 +80,10 @@ static void __poison_element(void *element, size_t size) static void poison_element(mempool_t *pool, void *element) { /* Mempools backed by slab allocator */ - if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) + if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) { __poison_element(element, ksize(element)); - - /* Mempools backed by page allocator */ - if (pool->alloc == mempool_alloc_pages) { + } else if (pool->alloc == mempool_alloc_pages) { + /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; void *addr = kmap_atomic((struct page *)element); @@ -106,17 +103,19 @@ static inline void poison_element(mempool_t *pool, void *element) static __always_inline void kasan_poison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) - kasan_poison_kfree(element, _RET_IP_); - if (pool->alloc == mempool_alloc_pages) - kasan_free_pages(element, (unsigned long)pool->pool_data); + kasan_slab_free_mempool(element); + else if (pool->alloc == mempool_alloc_pages) + kasan_poison_pages(element, (unsigned long)pool->pool_data, + false); } static void kasan_unpoison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) - kasan_unpoison_slab(element); - if (pool->alloc == mempool_alloc_pages) - kasan_alloc_pages(element, (unsigned long)pool->pool_data); + kasan_unpoison_range(element, __ksize(element)); + else if (pool->alloc == mempool_alloc_pages) + kasan_unpoison_pages(element, (unsigned long)pool->pool_data, + false); } static __always_inline void add_element(mempool_t *pool, void *element) @@ -253,7 +252,7 @@ EXPORT_SYMBOL(mempool_init); mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { - return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, + return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data, GFP_KERNEL, NUMA_NO_NODE); } EXPORT_SYMBOL(mempool_create); @@ -380,7 +379,7 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) gfp_t gfp_temp; VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); - might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); + might_alloc(gfp_mask); gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ @@ -489,7 +488,7 @@ void mempool_free(void *element, mempool_t *pool) * ensures that there will be frees which return elements to the * pool waking up the waiters. */ - if (unlikely(pool->curr_nr < pool->min_nr)) { + if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) { spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr < pool->min_nr)) { add_element(pool, element); |