aboutsummaryrefslogtreecommitdiffstats
path: root/lib/percpu_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r--lib/percpu_counter.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 9c21000df0b5..3bf4a9984f4c 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -72,7 +72,14 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
}
EXPORT_SYMBOL(percpu_counter_set);
-void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
+/**
+ * This function is both preempt and irq safe. The former is due to explicit
+ * preemption disable. The latter is guaranteed by the fact that the slow path
+ * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
+ * this_cpu_add which is irq-safe by definition. Hence there is no need muck
+ * with irq state before calling this one
+ */
+void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
@@ -89,7 +96,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
}
preempt_enable();
}
-EXPORT_SYMBOL(__percpu_counter_add);
+EXPORT_SYMBOL(percpu_counter_add_batch);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate