aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-08 09:51:29 +0900
committerTejun Heo <tj@kernel.org>2014-09-08 09:51:29 +0900
commitebd8fef304f99da84d4a52ad056f6137ac9652d4 (patch)
tree8fc0366c43264d684b0ade18f82c00182db26040
parentpercpu: implement asynchronous chunk population (diff)
downloadlinux-dev-ebd8fef304f99da84d4a52ad056f6137ac9652d4.tar.xz
linux-dev-ebd8fef304f99da84d4a52ad056f6137ac9652d4.zip
percpu_counter: make percpu_counters_lock irq-safe
percpu_counter is scheduled to grow @gfp support to allow atomic initialization. This patch makes percpu_counters_lock irq-safe so that it can be safely used from atomic contexts. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--lib/percpu_counter.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7dd33577b905..3fde78275cd1 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -115,6 +115,8 @@ EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
struct lock_class_key *key)
{
+ unsigned long flags __maybe_unused;
+
raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
@@ -126,9 +128,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
- spin_lock(&percpu_counters_lock);
+ spin_lock_irqsave(&percpu_counters_lock, flags);
list_add(&fbc->list, &percpu_counters);
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;
}
@@ -136,15 +138,17 @@ EXPORT_SYMBOL(__percpu_counter_init);
void percpu_counter_destroy(struct percpu_counter *fbc)
{
+ unsigned long flags __maybe_unused;
+
if (!fbc->counters)
return;
debug_percpu_counter_deactivate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
- spin_lock(&percpu_counters_lock);
+ spin_lock_irqsave(&percpu_counters_lock, flags);
list_del(&fbc->list);
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
@@ -173,7 +177,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
- spin_lock(&percpu_counters_lock);
+ spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
@@ -184,7 +188,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
*pcount = 0;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irq(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}