aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2023-05-25 14:57:07 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-06-09 16:25:41 -0700
commit43d7650234c62201ba3ca5b731226b0b189989a8 (patch)
tree39e61f957966f409b0010dc8c80ea026f975dcc5 /mm/vmalloc.c
parentmm/vmalloc: prevent flushing dirty space over and over (diff)
downloadwireguard-linux-43d7650234c62201ba3ca5b731226b0b189989a8.tar.xz
wireguard-linux-43d7650234c62201ba3ca5b731226b0b189989a8.zip
mm/vmalloc: check free space in vmap_block lockless
vb_alloc() unconditionally locks a vmap_block on the free list to check the free space. This can be done locklessly because vmap_block::free never increases, it's only decreased on allocations. Check the free space lockless and only if that succeeds, recheck under the lock. Link: https://lkml.kernel.org/r/20230525124504.750481992@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> Reviewed-by: Baoquan He <bhe@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ad9a1d9e314f..679112e2ffd2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2168,6 +2168,9 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
+ if (READ_ONCE(vb->free) < (1UL << order))
+ continue;
+
spin_lock(&vb->lock);
if (vb->free < (1UL << order)) {
spin_unlock(&vb->lock);
@@ -2176,7 +2179,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
pages_off = VMAP_BBMAP_BITS - vb->free;
vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
- vb->free -= 1UL << order;
+ WRITE_ONCE(vb->free, vb->free - (1UL << order));
bitmap_set(vb->used_map, pages_off, (1UL << order));
if (vb->free == 0) {
spin_lock(&vbq->lock);