aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/percpu_counter.h
diff options
context:
space:
mode:
authorManfred Spraul <manfred@colorfullife.com>2022-12-16 16:04:40 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:50:01 -0800
commit88ad32a799ddc92eafd2ae204cb43f04ac20a05c (patch)
treea4797d1b353073c535eba0f4c05e1cb758ed7337 /include/linux/percpu_counter.h
parentlib/percpu_counter: percpu_counter_add_batch() overflow/underflow (diff)
downloadwireguard-linux-88ad32a799ddc92eafd2ae204cb43f04ac20a05c.tar.xz
wireguard-linux-88ad32a799ddc92eafd2ae204cb43f04ac20a05c.zip
include/linux/percpu_counter.h: race in uniprocessor percpu_counter_add()
The percpu interface is supposed to be preempt and irq safe. But: The uniprocessor implementation of percpu_counter_add() is not irq safe: if an interrupt happens during the +=, then the result is undefined. Therefore: switch from preempt_disable() to local_irq_save(). This prevents interrupts from interrupting the +=, and as a side effect prevents preemption. Link: https://lkml.kernel.org/r/20221216150441.200533-2-manfred@colorfullife.com Signed-off-by: Manfred Spraul <manfred@colorfullife.com> Cc: "Sun, Jiebin" <jiebin.sun@intel.com> Cc: <1vier1@web.de> Cc: Alexander Sverdlin <alexander.sverdlin@siemens.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'include/linux/percpu_counter.h')
-rw-r--r--include/linux/percpu_counter.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index a3aae8d57a42..521a733e21a9 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -152,9 +152,11 @@ __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
- preempt_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
fbc->count += amount;
- preempt_enable();
+ local_irq_restore(flags);
}
/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */