aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.locks202
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/capability.c15
-rw-r--r--kernel/cgroup.c8
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c20
-rw-r--r--kernel/hung_task.c2
-rw-r--r--kernel/irq/spurious.c2
-rw-r--r--kernel/kmod.c8
-rw-r--r--kernel/kthread.c23
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/params.c17
-rw-r--r--kernel/perf_event.c68
-rw-r--r--kernel/power/hibernate.c11
-rw-r--r--kernel/power/swap.c43
-rw-r--r--kernel/printk.c7
-rw-r--r--kernel/rcupdate.c122
-rw-r--r--kernel/rcutiny.c282
-rw-r--r--kernel/rcutorture.c65
-rw-r--r--kernel/rcutree.c493
-rw-r--r--kernel/rcutree.h84
-rw-r--r--kernel/rcutree_plugin.h343
-rw-r--r--kernel/rcutree_trace.c12
-rw-r--r--kernel/sched.c68
-rw-r--r--kernel/sched_fair.c74
-rw-r--r--kernel/signal.c46
-rw-r--r--kernel/slow-work-debugfs.c227
-rw-r--r--kernel/slow-work.c512
-rw-r--r--kernel/slow-work.h72
-rw-r--r--kernel/smp.c56
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/spinlock.c310
-rw-r--r--kernel/srcu.c74
-rw-r--r--kernel/sys.c25
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/ring_buffer.c14
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace_output.c5
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/workqueue.c17
44 files changed, 2604 insertions, 765 deletions
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
new file mode 100644
index 000000000000..88c92fb44618
--- /dev/null
+++ b/kernel/Kconfig.locks
@@ -0,0 +1,202 @@
+#
+# The ARCH_INLINE foo is necessary because select ignores "depends on"
+#
+config ARCH_INLINE_SPIN_TRYLOCK
+ bool
+
+config ARCH_INLINE_SPIN_TRYLOCK_BH
+ bool
+
+config ARCH_INLINE_SPIN_LOCK
+ bool
+
+config ARCH_INLINE_SPIN_LOCK_BH
+ bool
+
+config ARCH_INLINE_SPIN_LOCK_IRQ
+ bool
+
+config ARCH_INLINE_SPIN_LOCK_IRQSAVE
+ bool
+
+config ARCH_INLINE_SPIN_UNLOCK
+ bool
+
+config ARCH_INLINE_SPIN_UNLOCK_BH
+ bool
+
+config ARCH_INLINE_SPIN_UNLOCK_IRQ
+ bool
+
+config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
+ bool
+
+
+config ARCH_INLINE_READ_TRYLOCK
+ bool
+
+config ARCH_INLINE_READ_LOCK
+ bool
+
+config ARCH_INLINE_READ_LOCK_BH
+ bool
+
+config ARCH_INLINE_READ_LOCK_IRQ
+ bool
+
+config ARCH_INLINE_READ_LOCK_IRQSAVE
+ bool
+
+config ARCH_INLINE_READ_UNLOCK
+ bool
+
+config ARCH_INLINE_READ_UNLOCK_BH
+ bool
+
+config ARCH_INLINE_READ_UNLOCK_IRQ
+ bool
+
+config ARCH_INLINE_READ_UNLOCK_IRQRESTORE
+ bool
+
+
+config ARCH_INLINE_WRITE_TRYLOCK
+ bool
+
+config ARCH_INLINE_WRITE_LOCK
+ bool
+
+config ARCH_INLINE_WRITE_LOCK_BH
+ bool
+
+config ARCH_INLINE_WRITE_LOCK_IRQ
+ bool
+
+config ARCH_INLINE_WRITE_LOCK_IRQSAVE
+ bool
+
+config ARCH_INLINE_WRITE_UNLOCK
+ bool
+
+config ARCH_INLINE_WRITE_UNLOCK_BH
+ bool
+
+config ARCH_INLINE_WRITE_UNLOCK_IRQ
+ bool
+
+config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+ bool
+
+#
+# lock_* functions are inlined when:
+# - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y
+#
+# trylock_* functions are inlined when:
+# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
+#
+# unlock and unlock_irq functions are inlined when:
+# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
+# or
+# - DEBUG_SPINLOCK=n and PREEMPT=n
+#
+# unlock_bh and unlock_irqrestore functions are inlined when:
+# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
+#
+
+config INLINE_SPIN_TRYLOCK
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK
+
+config INLINE_SPIN_TRYLOCK_BH
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH
+
+config INLINE_SPIN_LOCK
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK
+
+config INLINE_SPIN_LOCK_BH
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
+ ARCH_INLINE_SPIN_LOCK_BH
+
+config INLINE_SPIN_LOCK_IRQ
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
+ ARCH_INLINE_SPIN_LOCK_IRQ
+
+config INLINE_SPIN_LOCK_IRQSAVE
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
+ ARCH_INLINE_SPIN_LOCK_IRQSAVE
+
+config INLINE_SPIN_UNLOCK
+ def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK)
+
+config INLINE_SPIN_UNLOCK_BH
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
+
+config INLINE_SPIN_UNLOCK_IRQ
+ def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH)
+
+config INLINE_SPIN_UNLOCK_IRQRESTORE
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
+
+
+config INLINE_READ_TRYLOCK
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK
+
+config INLINE_READ_LOCK
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK
+
+config INLINE_READ_LOCK_BH
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
+ ARCH_INLINE_READ_LOCK_BH
+
+config INLINE_READ_LOCK_IRQ
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
+ ARCH_INLINE_READ_LOCK_IRQ
+
+config INLINE_READ_LOCK_IRQSAVE
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
+ ARCH_INLINE_READ_LOCK_IRQSAVE
+
+config INLINE_READ_UNLOCK
+ def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK)
+
+config INLINE_READ_UNLOCK_BH
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH
+
+config INLINE_READ_UNLOCK_IRQ
+ def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH)
+
+config INLINE_READ_UNLOCK_IRQRESTORE
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE
+
+
+config INLINE_WRITE_TRYLOCK
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK
+
+config INLINE_WRITE_LOCK
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK
+
+config INLINE_WRITE_LOCK_BH
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
+ ARCH_INLINE_WRITE_LOCK_BH
+
+config INLINE_WRITE_LOCK_IRQ
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
+ ARCH_INLINE_WRITE_LOCK_IRQ
+
+config INLINE_WRITE_LOCK_IRQSAVE
+ def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
+ ARCH_INLINE_WRITE_LOCK_IRQSAVE
+
+config INLINE_WRITE_UNLOCK
+ def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK)
+
+config INLINE_WRITE_UNLOCK_BH
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH
+
+config INLINE_WRITE_UNLOCK_IRQ
+ def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH)
+
+config INLINE_WRITE_UNLOCK_IRQRESTORE
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+
+config MUTEX_SPIN_ON_OWNER
+ def_bool SMP && !DEBUG_MUTEXES && !HAVE_DEFAULT_NO_SPIN_MUTEXES
diff --git a/kernel/Makefile b/kernel/Makefile
index b8d4cd8ac0b9..dcf6789bf547 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_TREE_RCU) += rcutree.o
obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
+obj-$(CONFIG_TINY_RCU) += rcutiny.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
@@ -94,6 +95,7 @@ obj-$(CONFIG_X86_DS) += trace/
obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o
+obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
diff --git a/kernel/capability.c b/kernel/capability.c
index 4e17041963f5..7f876e60521f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -29,7 +29,6 @@ EXPORT_SYMBOL(__cap_empty_set);
EXPORT_SYMBOL(__cap_full_set);
EXPORT_SYMBOL(__cap_init_eff_set);
-#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
int file_caps_enabled = 1;
static int __init file_caps_disable(char *str)
@@ -38,7 +37,6 @@ static int __init file_caps_disable(char *str)
return 1;
}
__setup("no_file_caps", file_caps_disable);
-#endif
/*
* More recent versions of libcap are available from:
@@ -169,8 +167,8 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
kernel_cap_t pE, pI, pP;
ret = cap_validate_magic(header, &tocopy);
- if (ret != 0)
- return ret;
+ if ((dataptr == NULL) || (ret != 0))
+ return ((dataptr == NULL) && (ret == -EINVAL)) ? 0 : ret;
if (get_user(pid, &header->pid))
return -EFAULT;
@@ -238,7 +236,7 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
{
struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
- unsigned i, tocopy;
+ unsigned i, tocopy, copybytes;
kernel_cap_t inheritable, permitted, effective;
struct cred *new;
int ret;
@@ -255,8 +253,11 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
if (pid != 0 && pid != task_pid_vnr(current))
return -EPERM;
- if (copy_from_user(&kdata, data,
- tocopy * sizeof(struct __user_cap_data_struct)))
+ copybytes = tocopy * sizeof(struct __user_cap_data_struct);
+ if (copybytes > sizeof(kdata))
+ return -EFAULT;
+
+ if (copy_from_user(&kdata, data, copybytes))
return -EFAULT;
for (i = 0; i < tocopy; i++) {
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ca83b73fba19..0249f4be9b5c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1710,14 +1710,13 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
return -EFAULT;
buffer[nbytes] = 0; /* nul-terminate */
- strstrip(buffer);
if (cft->write_u64) {
- u64 val = simple_strtoull(buffer, &end, 0);
+ u64 val = simple_strtoull(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
retval = cft->write_u64(cgrp, cft, val);
} else {
- s64 val = simple_strtoll(buffer, &end, 0);
+ s64 val = simple_strtoll(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
retval = cft->write_s64(cgrp, cft, val);
@@ -1753,8 +1752,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
}
buffer[nbytes] = 0; /* nul-terminate */
- strstrip(buffer);
- retval = cft->write_string(cgrp, cft, buffer);
+ retval = cft->write_string(cgrp, cft, strstrip(buffer));
if (!retval)
retval = nbytes;
out:
diff --git a/kernel/exit.c b/kernel/exit.c
index e61891f80123..f7864ac2ecc1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -359,10 +359,8 @@ void __set_special_pids(struct pid *pid)
{
struct task_struct *curr = current->group_leader;
- if (task_session(curr) != pid) {
+ if (task_session(curr) != pid)
change_pid(curr, PIDTYPE_SID, pid);
- proc_sid_connector(curr);
- }
if (task_pgrp(curr) != pid)
change_pid(curr, PIDTYPE_PGID, pid);
diff --git a/kernel/fork.c b/kernel/fork.c
index 4c20fff8c13a..166b8c49257c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -91,7 +91,7 @@ int nr_processes(void)
int cpu;
int total = 0;
- for_each_online_cpu(cpu)
+ for_each_possible_cpu(cpu)
total += per_cpu(process_counts, cpu);
return total;
diff --git a/kernel/futex.c b/kernel/futex.c
index 4949d336d88d..fb65e822fc41 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -150,7 +150,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key)
*/
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
- return (key1->both.word == key2->both.word
+ return (key1 && key2
+ && key1->both.word == key2->both.word
&& key1->both.ptr == key2->both.ptr
&& key1->both.offset == key2->both.offset);
}
@@ -1028,7 +1029,6 @@ static inline
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
struct futex_hash_bucket *hb)
{
- drop_futex_key_refs(&q->key);
get_futex_key_refs(key);
q->key = *key;
@@ -1226,6 +1226,7 @@ retry_private:
*/
if (ret == 1) {
WARN_ON(pi_state);
+ drop_count++;
task_count++;
ret = get_futex_value_locked(&curval2, uaddr2);
if (!ret)
@@ -1304,6 +1305,7 @@ retry_private:
if (ret == 1) {
/* We got the lock. */
requeue_pi_wake_futex(this, &key2, hb2);
+ drop_count++;
continue;
} else if (ret) {
/* -EDEADLK */
@@ -1791,6 +1793,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
current->timer_slack_ns);
}
+retry:
/* Prepare to wait on uaddr. */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
@@ -1808,9 +1811,14 @@ static int futex_wait(u32 __user *uaddr, int fshared,
goto out_put_key;
/*
- * We expect signal_pending(current), but another thread may
- * have handled it for us already.
+ * We expect signal_pending(current), but we might be the
+ * victim of a spurious wakeup as well.
*/
+ if (!signal_pending(current)) {
+ put_futex_key(fshared, &q.key);
+ goto retry;
+ }
+
ret = -ERESTARTSYS;
if (!abs_time)
goto out_put_key;
@@ -2118,9 +2126,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
*/
plist_del(&q->list, &q->list.plist);
+ /* Handle spurious wakeups gracefully */
+ ret = -EWOULDBLOCK;
if (timeout && !timeout->task)
ret = -ETIMEDOUT;
- else
+ else if (signal_pending(current))
ret = -ERESTARTNOINTR;
}
return ret;
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index d4e841747400..0c642d51aac2 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -144,7 +144,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
rcu_read_lock();
do_each_thread(g, t) {
- if (!--max_count)
+ if (!max_count--)
goto unlock;
if (!--batch_count) {
batch_count = HUNG_TASK_BATCHING;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 8996b98f9eb2..22b0a6eedf24 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -121,7 +121,9 @@ static void poll_spurious_irqs(unsigned long dummy)
if (!(status & IRQ_SPURIOUS_DISABLED))
continue;
+ local_irq_disable();
try_one_irq(i, desc);
+ local_irq_enable();
}
mod_timer(&poll_spurious_irq_timer,
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 9fcb53a11f87..25b103190364 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -80,16 +80,16 @@ int __request_module(bool wait, const char *fmt, ...)
#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
static int kmod_loop_msg;
- ret = security_kernel_module_request();
- if (ret)
- return ret;
-
va_start(args, fmt);
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
va_end(args);
if (ret >= MODULE_NAME_LEN)
return -ENAMETOOLONG;
+ ret = security_kernel_module_request(module_name);
+ if (ret)
+ return ret;
+
/* If modprobe needs a service that is in a module, we get a recursive
* loop. Limit the number of running kmod threads to max_threads/2 or
* MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5fe709982caa..ab7ae57773e1 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -150,29 +150,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
EXPORT_SYMBOL(kthread_create);
/**
- * kthread_bind - bind a just-created kthread to a cpu.
- * @k: thread created by kthread_create().
- * @cpu: cpu (might not be online, must be possible) for @k to run on.
- *
- * Description: This function is equivalent to set_cpus_allowed(),
- * except that @cpu doesn't need to be online, and the thread must be
- * stopped (i.e., just returned from kthread_create()).
- */
-void kthread_bind(struct task_struct *k, unsigned int cpu)
-{
- /* Must have done schedule() in kthread() before we set_task_cpu */
- if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
- WARN_ON(1);
- return;
- }
- set_task_cpu(k, cpu);
- k->cpus_allowed = cpumask_of_cpu(cpu);
- k->rt.nr_cpus_allowed = 1;
- k->flags |= PF_THREAD_BOUND;
-}
-EXPORT_SYMBOL(kthread_bind);
-
-/**
* kthread_stop - stop a thread created by kthread_create().
* @k: thread created by kthread_create().
*
diff --git a/kernel/module.c b/kernel/module.c
index 8b7d8805819d..5842a71cf052 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1187,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
/* Count loaded sections and allocate structures */
for (i = 0; i < nsect; i++)
- if (sechdrs[i].sh_flags & SHF_ALLOC)
+ if (sechdrs[i].sh_flags & SHF_ALLOC
+ && sechdrs[i].sh_size)
nloaded++;
size[0] = ALIGN(sizeof(*sect_attrs)
+ nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1207,6 +1208,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
for (i = 0; i < nsect; i++) {
if (! (sechdrs[i].sh_flags & SHF_ALLOC))
continue;
+ if (!sechdrs[i].sh_size)
+ continue;
sattr->address = sechdrs[i].sh_addr;
sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
GFP_KERNEL);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 947b3ad551f8..632f04c57d82 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -148,8 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
preempt_disable();
mutex_acquire(&lock->dep_map, subclass, 0, ip);
-#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \
- !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES)
+
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* Optimistic spinning.
*
diff --git a/kernel/params.c b/kernel/params.c
index 9da58eabdcb2..d656c276508d 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -218,15 +218,11 @@ int param_set_charp(const char *val, struct kernel_param *kp)
return -ENOSPC;
}
- if (kp->flags & KPARAM_KMALLOCED)
- kfree(*(char **)kp->arg);
-
/* This is a hack. We can't need to strdup in early boot, and we
* don't need to; this mangled commandline is preserved. */
if (slab_is_available()) {
- kp->flags |= KPARAM_KMALLOCED;
*(char **)kp->arg = kstrdup(val, GFP_KERNEL);
- if (!kp->arg)
+ if (!*(char **)kp->arg)
return -ENOMEM;
} else
*(const char **)kp->arg = val;
@@ -304,6 +300,7 @@ static int param_array(const char *name,
unsigned int min, unsigned int max,
void *elem, int elemsize,
int (*set)(const char *, struct kernel_param *kp),
+ u16 flags,
unsigned int *num)
{
int ret;
@@ -313,6 +310,7 @@ static int param_array(const char *name,
/* Get the name right for errors. */
kp.name = name;
kp.arg = elem;
+ kp.flags = flags;
/* No equals sign? */
if (!val) {
@@ -358,7 +356,8 @@ int param_array_set(const char *val, struct kernel_param *kp)
unsigned int temp_num;
return param_array(kp->name, val, 1, arr->max, arr->elem,
- arr->elemsize, arr->set, arr->num ?: &temp_num);
+ arr->elemsize, arr->set, kp->flags,
+ arr->num ?: &temp_num);
}
int param_array_get(char *buffer, struct kernel_param *kp)
@@ -605,11 +604,7 @@ void module_param_sysfs_remove(struct module *mod)
void destroy_params(const struct kernel_param *params, unsigned num)
{
- unsigned int i;
-
- for (i = 0; i < num; i++)
- if (params[i].flags & KPARAM_KMALLOCED)
- kfree(*(char **)params[i].arg);
+ /* FIXME: This should free kmalloced charp parameters. It doesn't. */
}
static void __init kernel_add_sysfs_param(const char *name,
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9d0b5c665883..7f29643c8985 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1355,7 +1355,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
u64 interrupts, freq;
spin_lock(&ctx->lock);
- list_for_each_entry(event, &ctx->group_list, group_entry) {
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -3959,8 +3959,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
regs = task_pt_regs(current);
if (regs) {
- if (perf_event_overflow(event, 0, &data, regs))
- ret = HRTIMER_NORESTART;
+ if (!(event->attr.exclude_idle && current->pid == 0))
+ if (perf_event_overflow(event, 0, &data, regs))
+ ret = HRTIMER_NORESTART;
}
period = max_t(u64, 10000, event->hw.sample_period);
@@ -3969,6 +3970,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
return ret;
}
+static void perf_swevent_start_hrtimer(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hwc->hrtimer.function = perf_swevent_hrtimer;
+ if (hwc->sample_period) {
+ u64 period;
+
+ if (hwc->remaining) {
+ if (hwc->remaining < 0)
+ period = 10000;
+ else
+ period = hwc->remaining;
+ hwc->remaining = 0;
+ } else {
+ period = max_t(u64, 10000, hwc->sample_period);
+ }
+ __hrtimer_start_range_ns(&hwc->hrtimer,
+ ns_to_ktime(period), 0,
+ HRTIMER_MODE_REL, 0);
+ }
+}
+
+static void perf_swevent_cancel_hrtimer(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->sample_period) {
+ ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
+ hwc->remaining = ktime_to_ns(remaining);
+
+ hrtimer_cancel(&hwc->hrtimer);
+ }
+}
+
/*
* Software event: cpu wall time clock
*/
@@ -3991,22 +4028,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
int cpu = raw_smp_processor_id();
atomic64_set(&hwc->prev_count, cpu_clock(cpu));
- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hwc->hrtimer.function = perf_swevent_hrtimer;
- if (hwc->sample_period) {
- u64 period = max_t(u64, 10000, hwc->sample_period);
- __hrtimer_start_range_ns(&hwc->hrtimer,
- ns_to_ktime(period), 0,
- HRTIMER_MODE_REL, 0);
- }
+ perf_swevent_start_hrtimer(event);
return 0;
}
static void cpu_clock_perf_event_disable(struct perf_event *event)
{
- if (event->hw.sample_period)
- hrtimer_cancel(&event->hw.hrtimer);
+ perf_swevent_cancel_hrtimer(event);
cpu_clock_perf_event_update(event);
}
@@ -4043,22 +4072,15 @@ static int task_clock_perf_event_enable(struct perf_event *event)
now = event->ctx->time;
atomic64_set(&hwc->prev_count, now);
- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hwc->hrtimer.function = perf_swevent_hrtimer;
- if (hwc->sample_period) {
- u64 period = max_t(u64, 10000, hwc->sample_period);
- __hrtimer_start_range_ns(&hwc->hrtimer,
- ns_to_ktime(period), 0,
- HRTIMER_MODE_REL, 0);
- }
+
+ perf_swevent_start_hrtimer(event);
return 0;
}
static void task_clock_perf_event_disable(struct perf_event *event)
{
- if (event->hw.sample_period)
- hrtimer_cancel(&event->hw.hrtimer);
+ perf_swevent_cancel_hrtimer(event);
task_clock_perf_event_update(event, event->ctx->time);
}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 04b3a83d686f..04a9e90d248f 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -693,21 +693,22 @@ static int software_resume(void)
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
error = -EBUSY;
+ swsusp_close(FMODE_READ);
goto Unlock;
}
pm_prepare_console();
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
if (error)
- goto Finish;
+ goto close_finish;
error = usermodehelper_disable();
if (error)
- goto Finish;
+ goto close_finish;
error = create_basic_memory_bitmaps();
if (error)
- goto Finish;
+ goto close_finish;
pr_debug("PM: Preparing processes for restore.\n");
error = prepare_processes();
@@ -719,6 +720,7 @@ static int software_resume(void)
pr_debug("PM: Reading hibernation image.\n");
error = swsusp_read(&flags);
+ swsusp_close(FMODE_READ);
if (!error)
hibernation_restore(flags & SF_PLATFORM_MODE);
@@ -737,6 +739,9 @@ static int software_resume(void)
mutex_unlock(&pm_mutex);
pr_debug("PM: Resume from disk failed.\n");
return error;
+close_finish:
+ swsusp_close(FMODE_READ);
+ goto Finish;
}
late_initcall(software_resume);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b101cdc4df3f..890f6b11b1d3 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -314,7 +314,6 @@ static int save_image(struct swap_map_handle *handle,
{
unsigned int m;
int ret;
- int error = 0;
int nr_pages;
int err2;
struct bio *bio;
@@ -329,26 +328,27 @@ static int save_image(struct swap_map_handle *handle,
nr_pages = 0;
bio = NULL;
do_gettimeofday(&start);
- do {
+ while (1) {
ret = snapshot_read_next(snapshot, PAGE_SIZE);
- if (ret > 0) {
- error = swap_write_page(handle, data_of(*snapshot),
- &bio);
- if (error)
- break;
- if (!(nr_pages % m))
- printk("\b\b\b\b%3d%%", nr_pages / m);
- nr_pages++;
- }
- } while (ret > 0);
+ if (ret <= 0)
+ break;
+ ret = swap_write_page(handle, data_of(*snapshot), &bio);
+ if (ret)
+ break;
+ if (!(nr_pages % m))
+ printk("\b\b\b\b%3d%%", nr_pages / m);
+ nr_pages++;
+ }
err2 = wait_on_bio_chain(&bio);
do_gettimeofday(&stop);
- if (!error)
- error = err2;
- if (!error)
+ if (!ret)
+ ret = err2;
+ if (!ret)
printk("\b\b\b\bdone\n");
+ else
+ printk("\n");
swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
- return error;
+ return ret;
}
/**
@@ -536,7 +536,8 @@ static int load_image(struct swap_map_handle *handle,
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
error = -ENODATA;
- }
+ } else
+ printk("\n");
swsusp_show_speed(&start, &stop, nr_to_read, "Read");
return error;
}
@@ -572,8 +573,6 @@ int swsusp_read(unsigned int *flags_p)
error = load_image(&handle, &snapshot, header->pages - 1);
release_swap_reader(&handle);
- blkdev_put(resume_bdev, FMODE_READ);
-
if (!error)
pr_debug("PM: Image successfully loaded\n");
else
@@ -596,7 +595,7 @@ int swsusp_check(void)
error = bio_read_page(swsusp_resume_block,
swsusp_header, NULL);
if (error)
- return error;
+ goto put;
if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) {
memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
@@ -604,8 +603,10 @@ int swsusp_check(void)
error = bio_write_page(swsusp_resume_block,
swsusp_header, NULL);
} else {
- return -EINVAL;
+ error = -EINVAL;
}
+
+put:
if (error)
blkdev_put(resume_bdev, FMODE_READ);
else
diff --git a/kernel/printk.c b/kernel/printk.c
index f38b07f78a4e..b5ac4d99c667 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -33,6 +33,7 @@
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/kexec.h>
+#include <linux/ratelimit.h>
#include <asm/uaccess.h>
@@ -1376,11 +1377,11 @@ late_initcall(disable_boot_consoles);
*/
DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
-int printk_ratelimit(void)
+int __printk_ratelimit(const char *func)
{
- return __ratelimit(&printk_ratelimit_state);
+ return ___ratelimit(&printk_ratelimit_state, func);
}
-EXPORT_SYMBOL(printk_ratelimit);
+EXPORT_SYMBOL(__printk_ratelimit);
/**
* printk_timed_ratelimit - caller-controlled printk ratelimiting
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 400183346ad2..9b7fd4723878 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,7 +44,6 @@
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/module.h>
-#include <linux/kernel_stat.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map =
EXPORT_SYMBOL_GPL(rcu_lock_map);
#endif
-int rcu_scheduler_active __read_mostly;
-
/*
* Awaken the corresponding synchronize_rcu() instance now that a
* grace period has elapsed.
@@ -66,122 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head)
rcu = container_of(head, struct rcu_synchronize, head);
complete(&rcu->completion);
}
-
-#ifdef CONFIG_TREE_PREEMPT_RCU
-
-/**
- * synchronize_rcu - wait until a grace period has elapsed.
- *
- * Control will return to the caller some time after a full grace
- * period has elapsed, in other words after all currently executing RCU
- * read-side critical sections have completed. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
- */
-void synchronize_rcu(void)
-{
- struct rcu_synchronize rcu;
-
- if (!rcu_scheduler_active)
- return;
-
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
-}
-EXPORT_SYMBOL_GPL(synchronize_rcu);
-
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-
-/**
- * synchronize_sched - wait until an rcu-sched grace period has elapsed.
- *
- * Control will return to the caller some time after a full rcu-sched
- * grace period has elapsed, in other words after all currently executing
- * rcu-sched read-side critical sections have completed. These read-side
- * critical sections are delimited by rcu_read_lock_sched() and
- * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
- * local_irq_disable(), and so on may be used in place of
- * rcu_read_lock_sched().
- *
- * This means that all preempt_disable code sequences, including NMI and
- * hardware-interrupt handlers, in progress on entry will have completed
- * before this primitive returns. However, this does not guarantee that
- * softirq handlers will have completed, since in some kernels, these
- * handlers can run in process context, and can block.
- *
- * This primitive provides the guarantees made by the (now removed)
- * synchronize_kernel() API. In contrast, synchronize_rcu() only
- * guarantees that rcu_read_lock() sections will have completed.
- * In "classic RCU", these two guarantees happen to be one and
- * the same, but can differ in realtime RCU implementations.
- */
-void synchronize_sched(void)
-{
- struct rcu_synchronize rcu;
-
- if (rcu_blocking_is_gp())
- return;
-
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu_sched(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
-}
-EXPORT_SYMBOL_GPL(synchronize_sched);
-
-/**
- * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
- *
- * Control will return to the caller some time after a full rcu_bh grace
- * period has elapsed, in other words after all currently executing rcu_bh
- * read-side critical sections have completed. RCU read-side critical
- * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
- * and may be nested.
- */
-void synchronize_rcu_bh(void)
-{
- struct rcu_synchronize rcu;
-
- if (rcu_blocking_is_gp())
- return;
-
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu_bh(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
-}
-EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
-
-static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- return rcu_cpu_notify(self, action, hcpu);
-}
-
-void __init rcu_init(void)
-{
- int i;
-
- __rcu_init();
- cpu_notifier(rcu_barrier_cpu_hotplug, 0);
-
- /*
- * We don't need protection against CPU-hotplug here because
- * this is called early in boot, before either interrupts
- * or the scheduler are operational.
- */
- for_each_online_cpu(i)
- rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i);
-}
-
-void rcu_scheduler_starting(void)
-{
- WARN_ON(num_online_cpus() != 1);
- WARN_ON(nr_context_switches() > 0);
- rcu_scheduler_active = 1;
-}
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
new file mode 100644
index 000000000000..9f6d9ff2572c
--- /dev/null
+++ b/kernel/rcutiny.c
@@ -0,0 +1,282 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ */
+#include <linux/moduleparam.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/cpu.h>
+
+/* Global control variables for rcupdate callback mechanism. */
+struct rcu_ctrlblk {
+ struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
+ struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
+ struct rcu_head **curtail; /* ->next pointer of last CB. */
+};
+
+/* Definition for rcupdate control block. */
+static struct rcu_ctrlblk rcu_ctrlblk = {
+ .donetail = &rcu_ctrlblk.rcucblist,
+ .curtail = &rcu_ctrlblk.rcucblist,
+};
+
+static struct rcu_ctrlblk rcu_bh_ctrlblk = {
+ .donetail = &rcu_bh_ctrlblk.rcucblist,
+ .curtail = &rcu_bh_ctrlblk.rcucblist,
+};
+
+#ifdef CONFIG_NO_HZ
+
+static long rcu_dynticks_nesting = 1;
+
+/*
+ * Enter dynticks-idle mode, which is an extended quiescent state
+ * if we have fully entered that mode (i.e., if the new value of
+ * dynticks_nesting is zero).
+ */
+void rcu_enter_nohz(void)
+{
+ if (--rcu_dynticks_nesting == 0)
+ rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
+}
+
+/*
+ * Exit dynticks-idle mode, so that we are no longer in an extended
+ * quiescent state.
+ */
+void rcu_exit_nohz(void)
+{
+ rcu_dynticks_nesting++;
+}
+
+#endif /* #ifdef CONFIG_NO_HZ */
+
+/*
+ * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc().
+ * Also disable irqs to avoid confusion due to interrupt handlers
+ * invoking call_rcu().
+ */
+static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (rcp->rcucblist != NULL &&
+ rcp->donetail != rcp->curtail) {
+ rcp->donetail = rcp->curtail;
+ local_irq_restore(flags);
+ return 1;
+ }
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/*
+ * Record an rcu quiescent state. And an rcu_bh quiescent state while we
+ * are at it, given that any rcu quiescent state is also an rcu_bh
+ * quiescent state. Use "+" instead of "||" to defeat short circuiting.
+ */
+void rcu_sched_qs(int cpu)
+{
+ if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk))
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Record an rcu_bh quiescent state.
+ */
+void rcu_bh_qs(int cpu)
+{
+ if (rcu_qsctr_help(&rcu_bh_ctrlblk))
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Check to see if the scheduling-clock interrupt came from an extended
+ * quiescent state, and, if so, tell RCU about it.
+ */
+void rcu_check_callbacks(int cpu, int user)
+{
+ if (user ||
+ (idle_cpu(cpu) &&
+ !in_softirq() &&
+ hardirq_count() <= (1 << HARDIRQ_SHIFT)))
+ rcu_sched_qs(cpu);
+ else if (!in_softirq())
+ rcu_bh_qs(cpu);
+}
+
+/*
+ * Helper function for rcu_process_callbacks() that operates on the
+ * specified rcu_ctrlkblk structure.
+ */
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+{
+ struct rcu_head *next, *list;
+ unsigned long flags;
+
+ /* If no RCU callbacks ready to invoke, just return. */
+ if (&rcp->rcucblist == rcp->donetail)
+ return;
+
+ /* Move the ready-to-invoke callbacks to a local list. */
+ local_irq_save(flags);
+ list = rcp->rcucblist;
+ rcp->rcucblist = *rcp->donetail;
+ *rcp->donetail = NULL;
+ if (rcp->curtail == rcp->donetail)
+ rcp->curtail = &rcp->rcucblist;
+ rcp->donetail = &rcp->rcucblist;
+ local_irq_restore(flags);
+
+ /* Invoke the callbacks on the local list. */
+ while (list) {
+ next = list->next;
+ prefetch(next);
+ list->func(list);
+ list = next;
+ }
+}
+
+/*
+ * Invoke any callbacks whose grace period has completed.
+ */
+static void rcu_process_callbacks(struct softirq_action *unused)
+{
+ __rcu_process_callbacks(&rcu_ctrlblk);
+ __rcu_process_callbacks(&rcu_bh_ctrlblk);
+}
+
+/*
+ * Wait for a grace period to elapse. But it is illegal to invoke
+ * synchronize_sched() from within an RCU read-side critical section.
+ * Therefore, any legal call to synchronize_sched() is a quiescent
+ * state, and so on a UP system, synchronize_sched() need do nothing.
+ * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
+ * benefits of doing might_sleep() to reduce latency.)
+ *
+ * Cool, huh? (Due to Josh Triplett.)
+ *
+ * But we want to make this a static inline later.
+ */
+void synchronize_sched(void)
+{
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(synchronize_sched);
+
+void synchronize_rcu_bh(void)
+{
+ synchronize_sched();
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+
+/*
+ * Helper function for call_rcu() and call_rcu_bh().
+ */
+static void __call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu),
+ struct rcu_ctrlblk *rcp)
+{
+ unsigned long flags;
+
+ head->func = func;
+ head->next = NULL;
+
+ local_irq_save(flags);
+ *rcp->curtail = head;
+ rcp->curtail = &head->next;
+ local_irq_restore(flags);
+}
+
+/*
+ * Post an RCU callback to be invoked after the end of an RCU grace
+ * period. But since we have but one CPU, that would be after any
+ * quiescent state.
+ */
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ __call_rcu(head, func, &rcu_ctrlblk);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/*
+ * Post an RCU bottom-half callback to be invoked after any subsequent
+ * quiescent state.
+ */
+void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ __call_rcu(head, func, &rcu_bh_ctrlblk);
+}
+EXPORT_SYMBOL_GPL(call_rcu_bh);
+
+void rcu_barrier(void)
+{
+ struct rcu_synchronize rcu;
+
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
+EXPORT_SYMBOL_GPL(rcu_barrier);
+
+void rcu_barrier_bh(void)
+{
+ struct rcu_synchronize rcu;
+
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu_bh(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
+EXPORT_SYMBOL_GPL(rcu_barrier_bh);
+
+void rcu_barrier_sched(void)
+{
+ struct rcu_synchronize rcu;
+
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu_sched(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
+EXPORT_SYMBOL_GPL(rcu_barrier_sched);
+
+void __init rcu_init(void)
+{
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 697c0a0229d4..a621a67ef4e3 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -327,6 +327,11 @@ rcu_torture_cb(struct rcu_head *p)
cur_ops->deferred_free(rp);
}
+static int rcu_no_completed(void)
+{
+ return 0;
+}
+
static void rcu_torture_deferred_free(struct rcu_torture *p)
{
call_rcu(&p->rtort_rcu, rcu_torture_cb);
@@ -388,6 +393,21 @@ static struct rcu_torture_ops rcu_sync_ops = {
.name = "rcu_sync"
};
+static struct rcu_torture_ops rcu_expedited_ops = {
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = rcu_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_no_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = synchronize_rcu_expedited,
+ .cb_barrier = NULL,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_expedited"
+};
+
/*
* Definitions for rcu_bh torture testing.
*/
@@ -547,6 +567,25 @@ static struct rcu_torture_ops srcu_ops = {
.name = "srcu"
};
+static void srcu_torture_synchronize_expedited(void)
+{
+ synchronize_srcu_expedited(&srcu_ctl);
+}
+
+static struct rcu_torture_ops srcu_expedited_ops = {
+ .init = srcu_torture_init,
+ .cleanup = srcu_torture_cleanup,
+ .readlock = srcu_torture_read_lock,
+ .read_delay = srcu_read_delay,
+ .readunlock = srcu_torture_read_unlock,
+ .completed = srcu_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = srcu_torture_synchronize_expedited,
+ .cb_barrier = NULL,
+ .stats = srcu_torture_stats,
+ .name = "srcu_expedited"
+};
+
/*
* Definitions for sched torture testing.
*/
@@ -562,11 +601,6 @@ static void sched_torture_read_unlock(int idx)
preempt_enable();
}
-static int sched_torture_completed(void)
-{
- return 0;
-}
-
static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
{
call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
@@ -583,7 +617,7 @@ static struct rcu_torture_ops sched_ops = {
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
- .completed = sched_torture_completed,
+ .completed = rcu_no_completed,
.deferred_free = rcu_sched_torture_deferred_free,
.sync = sched_torture_synchronize,
.cb_barrier = rcu_barrier_sched,
@@ -592,13 +626,13 @@ static struct rcu_torture_ops sched_ops = {
.name = "sched"
};
-static struct rcu_torture_ops sched_ops_sync = {
+static struct rcu_torture_ops sched_sync_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
- .completed = sched_torture_completed,
+ .completed = rcu_no_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = sched_torture_synchronize,
.cb_barrier = NULL,
@@ -612,7 +646,7 @@ static struct rcu_torture_ops sched_expedited_ops = {
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
- .completed = sched_torture_completed,
+ .completed = rcu_no_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = synchronize_sched_expedited,
.cb_barrier = NULL,
@@ -1097,9 +1131,10 @@ rcu_torture_init(void)
int cpu;
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] =
- { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
- &sched_expedited_ops,
- &srcu_ops, &sched_ops, &sched_ops_sync, };
+ { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
+ &rcu_bh_ops, &rcu_bh_sync_ops,
+ &srcu_ops, &srcu_expedited_ops,
+ &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
mutex_lock(&fullstop_mutex);
@@ -1110,8 +1145,12 @@ rcu_torture_init(void)
break;
}
if (i == ARRAY_SIZE(torture_ops)) {
- printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
+ printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
torture_type);
+ printk(KERN_ALERT "rcu-torture types:");
+ for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
+ printk(KERN_ALERT " %s", torture_ops[i]->name);
+ printk(KERN_ALERT "\n");
mutex_unlock(&fullstop_mutex);
return -EINVAL;
}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 705f02ac7433..53ae9598f798 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -46,20 +46,24 @@
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/time.h>
+#include <linux/kernel_stat.h>
#include "rcutree.h"
/* Data structures. */
+static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
+
#define RCU_STATE_INITIALIZER(name) { \
.level = { &name.node[0] }, \
.levelcnt = { \
NUM_RCU_LVL_0, /* root of hierarchy. */ \
NUM_RCU_LVL_1, \
NUM_RCU_LVL_2, \
- NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
+ NUM_RCU_LVL_3, \
+ NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
}, \
- .signaled = RCU_SIGNAL_INIT, \
+ .signaled = RCU_GP_IDLE, \
.gpnum = -300, \
.completed = -300, \
.onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
@@ -77,6 +81,8 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+static int rcu_scheduler_active __read_mostly;
+
/*
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
@@ -98,7 +104,7 @@ void rcu_sched_qs(int cpu)
struct rcu_data *rdp;
rdp = &per_cpu(rcu_sched_data, cpu);
- rdp->passed_quiesc_completed = rdp->completed;
+ rdp->passed_quiesc_completed = rdp->gpnum - 1;
barrier();
rdp->passed_quiesc = 1;
rcu_preempt_note_context_switch(cpu);
@@ -109,7 +115,7 @@ void rcu_bh_qs(int cpu)
struct rcu_data *rdp;
rdp = &per_cpu(rcu_bh_data, cpu);
- rdp->passed_quiesc_completed = rdp->completed;
+ rdp->passed_quiesc_completed = rdp->gpnum - 1;
barrier();
rdp->passed_quiesc = 1;
}
@@ -335,28 +341,9 @@ void rcu_irq_exit(void)
set_need_resched();
}
-/*
- * Record the specified "completed" value, which is later used to validate
- * dynticks counter manipulations. Specify "rsp->completed - 1" to
- * unconditionally invalidate any future dynticks manipulations (which is
- * useful at the beginning of a grace period).
- */
-static void dyntick_record_completed(struct rcu_state *rsp, long comp)
-{
- rsp->dynticks_completed = comp;
-}
-
#ifdef CONFIG_SMP
/*
- * Recall the previously recorded value of the completion for dynticks.
- */
-static long dyntick_recall_completed(struct rcu_state *rsp)
-{
- return rsp->dynticks_completed;
-}
-
-/*
* Snapshot the specified CPU's dynticks counter so that we can later
* credit them with an implicit quiescent state. Return 1 if this CPU
* is in dynticks idle mode, which is an extended quiescent state.
@@ -419,24 +406,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
#else /* #ifdef CONFIG_NO_HZ */
-static void dyntick_record_completed(struct rcu_state *rsp, long comp)
-{
-}
-
#ifdef CONFIG_SMP
-/*
- * If there are no dynticks, then the only way that a CPU can passively
- * be in a quiescent state is to be offline. Unlike dynticks idle, which
- * is a point in time during the prior (already finished) grace period,
- * an offline CPU is always in a quiescent state, and thus can be
- * unconditionally applied. So just return the current value of completed.
- */
-static long dyntick_recall_completed(struct rcu_state *rsp)
-{
- return rsp->completed;
-}
-
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
return 0;
@@ -553,13 +524,33 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
/*
* Update CPU-local rcu_data state to record the newly noticed grace period.
* This is used both when we started the grace period and when we notice
- * that someone else started the grace period.
+ * that someone else started the grace period. The caller must hold the
+ * ->lock of the leaf rcu_node structure corresponding to the current CPU,
+ * and must have irqs disabled.
*/
+static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
+{
+ if (rdp->gpnum != rnp->gpnum) {
+ rdp->qs_pending = 1;
+ rdp->passed_quiesc = 0;
+ rdp->gpnum = rnp->gpnum;
+ }
+}
+
static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
{
- rdp->qs_pending = 1;
- rdp->passed_quiesc = 0;
- rdp->gpnum = rsp->gpnum;
+ unsigned long flags;
+ struct rcu_node *rnp;
+
+ local_irq_save(flags);
+ rnp = rdp->mynode;
+ if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
+ !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
+ local_irq_restore(flags);
+ return;
+ }
+ __note_new_gpnum(rsp, rnp, rdp);
+ spin_unlock_irqrestore(&rnp->lock, flags);
}
/*
@@ -583,6 +574,79 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
}
/*
+ * Advance this CPU's callbacks, but only if the current grace period
+ * has ended. This may be called only from the CPU to whom the rdp
+ * belongs. In addition, the corresponding leaf rcu_node structure's
+ * ->lock must be held by the caller, with irqs disabled.
+ */
+static void
+__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
+{
+ /* Did another grace period end? */
+ if (rdp->completed != rnp->completed) {
+
+ /* Advance callbacks. No harm if list empty. */
+ rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+ /* Remember that we saw this grace-period completion. */
+ rdp->completed = rnp->completed;
+ }
+}
+
+/*
+ * Advance this CPU's callbacks, but only if the current grace period
+ * has ended. This may be called only from the CPU to whom the rdp
+ * belongs.
+ */
+static void
+rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ unsigned long flags;
+ struct rcu_node *rnp;
+
+ local_irq_save(flags);
+ rnp = rdp->mynode;
+ if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
+ !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
+ local_irq_restore(flags);
+ return;
+ }
+ __rcu_process_gp_end(rsp, rnp, rdp);
+ spin_unlock_irqrestore(&rnp->lock, flags);
+}
+
+/*
+ * Do per-CPU grace-period initialization for running CPU. The caller
+ * must hold the lock of the leaf rcu_node structure corresponding to
+ * this CPU.
+ */
+static void
+rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
+{
+ /* Prior grace period ended, so advance callbacks for current CPU. */
+ __rcu_process_gp_end(rsp, rnp, rdp);
+
+ /*
+ * Because this CPU just now started the new grace period, we know
+ * that all of its callbacks will be covered by this upcoming grace
+ * period, even the ones that were registered arbitrarily recently.
+ * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
+ *
+ * Other CPUs cannot be sure exactly when the grace period started.
+ * Therefore, their recently registered callbacks must pass through
+ * an additional RCU_NEXT_READY stage, so that they will be handled
+ * by the next RCU grace period.
+ */
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+ /* Set state so that this CPU will detect the next quiescent state. */
+ __note_new_gpnum(rsp, rnp, rdp);
+}
+
+/*
* Start a new RCU grace period if warranted, re-initializing the hierarchy
* in preparation for detecting the next grace period. The caller must hold
* the root node's ->lock, which is released before return. Hard irqs must
@@ -596,7 +660,23 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
struct rcu_node *rnp = rcu_get_root(rsp);
if (!cpu_needs_another_gp(rsp, rdp)) {
- spin_unlock_irqrestore(&rnp->lock, flags);
+ if (rnp->completed == rsp->completed) {
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
+
+ /*
+ * Propagate new ->completed value to rcu_node structures
+ * so that other CPUs don't have to wait until the start
+ * of the next grace period to process their callbacks.
+ */
+ rcu_for_each_node_breadth_first(rsp, rnp) {
+ spin_lock(&rnp->lock); /* irqs already disabled. */
+ rnp->completed = rsp->completed;
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
+ }
+ local_irq_restore(flags);
return;
}
@@ -606,29 +686,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
record_gp_stall_check_time(rsp);
- dyntick_record_completed(rsp, rsp->completed - 1);
- note_new_gpnum(rsp, rdp);
-
- /*
- * Because this CPU just now started the new grace period, we know
- * that all of its callbacks will be covered by this upcoming grace
- * period, even the ones that were registered arbitrarily recently.
- * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
- *
- * Other CPUs cannot be sure exactly when the grace period started.
- * Therefore, their recently registered callbacks must pass through
- * an additional RCU_NEXT_READY stage, so that they will be handled
- * by the next RCU grace period.
- */
- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
- rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
/* Special-case the common single-level case. */
if (NUM_RCU_NODES == 1) {
rcu_preempt_check_blocked_tasks(rnp);
rnp->qsmask = rnp->qsmaskinit;
rnp->gpnum = rsp->gpnum;
+ rnp->completed = rsp->completed;
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
+ rcu_start_gp_per_cpu(rsp, rnp, rdp);
spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
@@ -657,69 +723,50 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
* irqs disabled.
*/
rcu_for_each_node_breadth_first(rsp, rnp) {
- spin_lock(&rnp->lock); /* irqs already disabled. */
+ spin_lock(&rnp->lock); /* irqs already disabled. */
rcu_preempt_check_blocked_tasks(rnp);
rnp->qsmask = rnp->qsmaskinit;
rnp->gpnum = rsp->gpnum;
- spin_unlock(&rnp->lock); /* irqs already disabled. */
+ rnp->completed = rsp->completed;
+ if (rnp == rdp->mynode)
+ rcu_start_gp_per_cpu(rsp, rnp, rdp);
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
+ rnp = rcu_get_root(rsp);
+ spin_lock(&rnp->lock); /* irqs already disabled. */
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
spin_unlock_irqrestore(&rsp->onofflock, flags);
}
/*
- * Advance this CPU's callbacks, but only if the current grace period
- * has ended. This may be called only from the CPU to whom the rdp
- * belongs.
- */
-static void
-rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
-{
- long completed_snap;
- unsigned long flags;
-
- local_irq_save(flags);
- completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */
-
- /* Did another grace period end? */
- if (rdp->completed != completed_snap) {
-
- /* Advance callbacks. No harm if list empty. */
- rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
- rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
-
- /* Remember that we saw this grace-period completion. */
- rdp->completed = completed_snap;
- }
- local_irq_restore(flags);
-}
-
-/*
- * Clean up after the prior grace period and let rcu_start_gp() start up
- * the next grace period if one is needed. Note that the caller must
- * hold rnp->lock, as required by rcu_start_gp(), which will release it.
+ * Report a full set of quiescent states to the specified rcu_state
+ * data structure. This involves cleaning up after the prior grace
+ * period and letting rcu_start_gp() start up the next grace period
+ * if one is needed. Note that the caller must hold rnp->lock, as
+ * required by rcu_start_gp(), which will release it.
*/
-static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
+static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
__releases(rcu_get_root(rsp)->lock)
{
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
rsp->completed = rsp->gpnum;
- rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
+ rsp->signaled = RCU_GP_IDLE;
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
}
/*
- * Similar to cpu_quiet(), for which it is a helper function. Allows
- * a group of CPUs to be quieted at one go, though all the CPUs in the
- * group must be represented by the same leaf rcu_node structure.
- * That structure's lock must be held upon entry, and it is released
- * before return.
+ * Similar to rcu_report_qs_rdp(), for which it is a helper function.
+ * Allows quiescent states for a group of CPUs to be reported at one go
+ * to the specified rcu_node structure, though all the CPUs in the group
+ * must be represented by the same rcu_node structure (which need not be
+ * a leaf rcu_node structure, though it often will be). That structure's
+ * lock must be held upon entry, and it is released before return.
*/
static void
-cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
- unsigned long flags)
+rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
+ struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock)
{
struct rcu_node *rnp_c;
@@ -755,21 +802,23 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
/*
* Get here if we are the last CPU to pass through a quiescent
- * state for this grace period. Invoke cpu_quiet_msk_finish()
+ * state for this grace period. Invoke rcu_report_qs_rsp()
* to clean up and start the next grace period if one is needed.
*/
- cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */
+ rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
}
/*
- * Record a quiescent state for the specified CPU, which must either be
- * the current CPU. The lastcomp argument is used to make sure we are
- * still in the grace period of interest. We don't want to end the current
- * grace period based on quiescent states detected in an earlier grace
- * period!
+ * Record a quiescent state for the specified CPU to that CPU's rcu_data
+ * structure. This must be either called from the specified CPU, or
+ * called when the specified CPU is known to be offline (and when it is
+ * also known that no other CPU is concurrently trying to help the offline
+ * CPU). The lastcomp argument is used to make sure we are still in the
+ * grace period of interest. We don't want to end the current grace period
+ * based on quiescent states detected in an earlier grace period!
*/
static void
-cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
+rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
{
unsigned long flags;
unsigned long mask;
@@ -777,15 +826,15 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
rnp = rdp->mynode;
spin_lock_irqsave(&rnp->lock, flags);
- if (lastcomp != ACCESS_ONCE(rsp->completed)) {
+ if (lastcomp != rnp->completed) {
/*
* Someone beat us to it for this grace period, so leave.
* The race with GP start is resolved by the fact that we
* hold the leaf rcu_node lock, so that the per-CPU bits
* cannot yet be initialized -- so we would simply find our
- * CPU's bit already cleared in cpu_quiet_msk() if this race
- * occurred.
+ * CPU's bit already cleared in rcu_report_qs_rnp() if this
+ * race occurred.
*/
rdp->passed_quiesc = 0; /* try again later! */
spin_unlock_irqrestore(&rnp->lock, flags);
@@ -803,7 +852,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
*/
rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
- cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
+ rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
}
}
@@ -834,8 +883,11 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
if (!rdp->passed_quiesc)
return;
- /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */
- cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
+ /*
+ * Tell RCU we are done (but rcu_report_qs_rdp() will be the
+ * judge of that).
+ */
+ rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -895,8 +947,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
{
unsigned long flags;
- long lastcomp;
unsigned long mask;
+ int need_report = 0;
struct rcu_data *rdp = rsp->rda[cpu];
struct rcu_node *rnp;
@@ -910,17 +962,32 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
spin_lock(&rnp->lock); /* irqs already disabled. */
rnp->qsmaskinit &= ~mask;
if (rnp->qsmaskinit != 0) {
- spin_unlock(&rnp->lock); /* irqs remain disabled. */
+ if (rnp != rdp->mynode)
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
break;
}
- rcu_preempt_offline_tasks(rsp, rnp, rdp);
+ if (rnp == rdp->mynode)
+ need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
+ else
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
mask = rnp->grpmask;
- spin_unlock(&rnp->lock); /* irqs remain disabled. */
rnp = rnp->parent;
} while (rnp != NULL);
- lastcomp = rsp->completed;
- spin_unlock_irqrestore(&rsp->onofflock, flags);
+ /*
+ * We still hold the leaf rcu_node structure lock here, and
+ * irqs are still disabled. The reason for this subterfuge is
+ * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
+ * held leads to deadlock.
+ */
+ spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
+ rnp = rdp->mynode;
+ if (need_report & RCU_OFL_TASKS_NORM_GP)
+ rcu_report_unblock_qs_rnp(rnp, flags);
+ else
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ if (need_report & RCU_OFL_TASKS_EXP_GP)
+ rcu_report_exp_rnp(rsp, rnp);
rcu_adopt_orphan_cbs(rsp);
}
@@ -958,7 +1025,7 @@ static void rcu_offline_cpu(int cpu)
* Invoke any RCU callbacks that have made it to the end of their grace
* period. Thottle as specified by rdp->blimit.
*/
-static void rcu_do_batch(struct rcu_data *rdp)
+static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
struct rcu_head *next, *list, **tail;
@@ -1011,6 +1078,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
rdp->blimit = blimit;
+ /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
+ if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
+ rdp->qlen_last_fqs_check = 0;
+ rdp->n_force_qs_snap = rsp->n_force_qs;
+ } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
+ rdp->qlen_last_fqs_check = rdp->qlen;
+
local_irq_restore(flags);
/* Re-raise the RCU softirq if there are callbacks remaining. */
@@ -1085,7 +1159,7 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
rcu_for_each_leaf_node(rsp, rnp) {
mask = 0;
spin_lock_irqsave(&rnp->lock, flags);
- if (rsp->completed != lastcomp) {
+ if (rnp->completed != lastcomp) {
spin_unlock_irqrestore(&rnp->lock, flags);
return 1;
}
@@ -1099,10 +1173,10 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
mask |= bit;
}
- if (mask != 0 && rsp->completed == lastcomp) {
+ if (mask != 0 && rnp->completed == lastcomp) {
- /* cpu_quiet_msk() releases rnp->lock. */
- cpu_quiet_msk(mask, rsp, rnp, flags);
+ /* rcu_report_qs_rnp() releases rnp->lock. */
+ rcu_report_qs_rnp(mask, rsp, rnp, flags);
continue;
}
spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1120,6 +1194,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
long lastcomp;
struct rcu_node *rnp = rcu_get_root(rsp);
u8 signaled;
+ u8 forcenow;
if (!rcu_gp_in_progress(rsp))
return; /* No grace period in progress, nothing to force. */
@@ -1132,19 +1207,20 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
goto unlock_ret; /* no emergency and done recently. */
rsp->n_force_qs++;
spin_lock(&rnp->lock);
- lastcomp = rsp->completed;
+ lastcomp = rsp->gpnum - 1;
signaled = rsp->signaled;
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
- if (lastcomp == rsp->gpnum) {
+ if(!rcu_gp_in_progress(rsp)) {
rsp->n_force_qs_ngp++;
spin_unlock(&rnp->lock);
goto unlock_ret; /* no GP in progress, time updated. */
}
spin_unlock(&rnp->lock);
switch (signaled) {
+ case RCU_GP_IDLE:
case RCU_GP_INIT:
- break; /* grace period still initializing, ignore. */
+ break; /* grace period idle or initializing, ignore. */
case RCU_SAVE_DYNTICK:
@@ -1155,20 +1231,29 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
if (rcu_process_dyntick(rsp, lastcomp,
dyntick_save_progress_counter))
goto unlock_ret;
+ /* fall into next case. */
+
+ case RCU_SAVE_COMPLETED:
/* Update state, record completion counter. */
+ forcenow = 0;
spin_lock(&rnp->lock);
- if (lastcomp == rsp->completed) {
+ if (lastcomp + 1 == rsp->gpnum &&
+ lastcomp == rsp->completed &&
+ rsp->signaled == signaled) {
rsp->signaled = RCU_FORCE_QS;
- dyntick_record_completed(rsp, lastcomp);
+ rsp->completed_fqs = lastcomp;
+ forcenow = signaled == RCU_SAVE_COMPLETED;
}
spin_unlock(&rnp->lock);
- break;
+ if (!forcenow)
+ break;
+ /* fall into next case. */
case RCU_FORCE_QS:
/* Check dyntick-idle state, send IPI to laggarts. */
- if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp),
+ if (rcu_process_dyntick(rsp, rsp->completed_fqs,
rcu_implicit_dynticks_qs))
goto unlock_ret;
@@ -1224,7 +1309,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
}
/* If there are callbacks ready, invoke them. */
- rcu_do_batch(rdp);
+ rcu_do_batch(rsp, rdp);
}
/*
@@ -1288,10 +1373,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
}
- /* Force the grace period if too many callbacks or too long waiting. */
- if (unlikely(++rdp->qlen > qhimark)) {
+ /*
+ * Force the grace period if too many callbacks or too long waiting.
+ * Enforce hysteresis, and don't invoke force_quiescent_state()
+ * if some other CPU has recently done so. Also, don't bother
+ * invoking force_quiescent_state() if the newly enqueued callback
+ * is the only one waiting for a grace period to complete.
+ */
+ if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
rdp->blimit = LONG_MAX;
- force_quiescent_state(rsp, 0);
+ if (rsp->n_force_qs == rdp->n_force_qs_snap &&
+ *rdp->nxttail[RCU_DONE_TAIL] != head)
+ force_quiescent_state(rsp, 0);
+ rdp->n_force_qs_snap = rsp->n_force_qs;
+ rdp->qlen_last_fqs_check = rdp->qlen;
} else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
force_quiescent_state(rsp, 1);
local_irq_restore(flags);
@@ -1315,6 +1410,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
+/**
+ * synchronize_sched - wait until an rcu-sched grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full rcu-sched
+ * grace period has elapsed, in other words after all currently executing
+ * rcu-sched read-side critical sections have completed. These read-side
+ * critical sections are delimited by rcu_read_lock_sched() and
+ * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
+ * local_irq_disable(), and so on may be used in place of
+ * rcu_read_lock_sched().
+ *
+ * This means that all preempt_disable code sequences, including NMI and
+ * hardware-interrupt handlers, in progress on entry will have completed
+ * before this primitive returns. However, this does not guarantee that
+ * softirq handlers will have completed, since in some kernels, these
+ * handlers can run in process context, and can block.
+ *
+ * This primitive provides the guarantees made by the (now removed)
+ * synchronize_kernel() API. In contrast, synchronize_rcu() only
+ * guarantees that rcu_read_lock() sections will have completed.
+ * In "classic RCU", these two guarantees happen to be one and
+ * the same, but can differ in realtime RCU implementations.
+ */
+void synchronize_sched(void)
+{
+ struct rcu_synchronize rcu;
+
+ if (rcu_blocking_is_gp())
+ return;
+
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu_sched(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
+EXPORT_SYMBOL_GPL(synchronize_sched);
+
+/**
+ * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full rcu_bh grace
+ * period has elapsed, in other words after all currently executing rcu_bh
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
+ * and may be nested.
+ */
+void synchronize_rcu_bh(void)
+{
+ struct rcu_synchronize rcu;
+
+ if (rcu_blocking_is_gp())
+ return;
+
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu_bh(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+
/*
* Check to see if there is any immediate RCU-related work to be done
* by the current CPU, for the specified type of RCU, returning 1 if so.
@@ -1324,6 +1481,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
*/
static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
{
+ struct rcu_node *rnp = rdp->mynode;
+
rdp->n_rcu_pending++;
/* Check for CPU stalls, if enabled. */
@@ -1348,13 +1507,13 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
}
/* Has another RCU grace period completed? */
- if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */
+ if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
rdp->n_rp_gp_completed++;
return 1;
}
/* Has a new RCU grace period started? */
- if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */
+ if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
rdp->n_rp_gp_started++;
return 1;
}
@@ -1397,6 +1556,21 @@ int rcu_needs_cpu(int cpu)
rcu_preempt_needs_cpu(cpu);
}
+/*
+ * This function is invoked towards the end of the scheduler's initialization
+ * process. Before this is called, the idle task might contain
+ * RCU read-side critical sections (during which time, this idle
+ * task is booting the system). After this function is called, the
+ * idle tasks are prohibited from containing RCU read-side critical
+ * sections.
+ */
+void rcu_scheduler_starting(void)
+{
+ WARN_ON(num_online_cpus() != 1);
+ WARN_ON(nr_context_switches() > 0);
+ rcu_scheduler_active = 1;
+}
+
static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex);
@@ -1508,21 +1682,18 @@ static void __cpuinit
rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
{
unsigned long flags;
- long lastcomp;
unsigned long mask;
struct rcu_data *rdp = rsp->rda[cpu];
struct rcu_node *rnp = rcu_get_root(rsp);
/* Set up local state, ensuring consistent view of global state. */
spin_lock_irqsave(&rnp->lock, flags);
- lastcomp = rsp->completed;
- rdp->completed = lastcomp;
- rdp->gpnum = lastcomp;
rdp->passed_quiesc = 0; /* We could be racing with new GP, */
rdp->qs_pending = 1; /* so set up to respond to current GP. */
rdp->beenonline = 1; /* We have now been online. */
rdp->preemptable = preemptable;
- rdp->passed_quiesc_completed = lastcomp - 1;
+ rdp->qlen_last_fqs_check = 0;
+ rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->blimit = blimit;
spin_unlock(&rnp->lock); /* irqs remain disabled. */
@@ -1542,6 +1713,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
spin_lock(&rnp->lock); /* irqs already disabled. */
rnp->qsmaskinit |= mask;
mask = rnp->grpmask;
+ if (rnp == rdp->mynode) {
+ rdp->gpnum = rnp->completed; /* if GP in progress... */
+ rdp->completed = rnp->completed;
+ rdp->passed_quiesc_completed = rnp->completed - 1;
+ }
spin_unlock(&rnp->lock); /* irqs already disabled. */
rnp = rnp->parent;
} while (rnp != NULL && !(rnp->qsmaskinit & mask));
@@ -1559,8 +1735,8 @@ static void __cpuinit rcu_online_cpu(int cpu)
/*
* Handle CPU online/offline notification events.
*/
-int __cpuinit rcu_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1647,8 +1823,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
cpustride *= rsp->levelspread[i];
rnp = rsp->level[i];
for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
- if (rnp != rcu_get_root(rsp))
- spin_lock_init(&rnp->lock);
+ spin_lock_init(&rnp->lock);
+ lockdep_set_class(&rnp->lock, &rcu_node_class[i]);
rnp->gpnum = 0;
rnp->qsmask = 0;
rnp->qsmaskinit = 0;
@@ -1669,9 +1845,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
rnp->level = i;
INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
+ INIT_LIST_HEAD(&rnp->blocked_tasks[2]);
+ INIT_LIST_HEAD(&rnp->blocked_tasks[3]);
}
}
- spin_lock_init(&rcu_get_root(rsp)->lock);
}
/*
@@ -1697,16 +1874,30 @@ do { \
} \
} while (0)
-void __init __rcu_init(void)
+void __init rcu_init(void)
{
+ int i;
+
rcu_bootup_announce();
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+#if NUM_RCU_LVL_4 != 0
+ printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n");
+#endif /* #if NUM_RCU_LVL_4 != 0 */
RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
__rcu_init_preempt();
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+
+ /*
+ * We don't need protection against CPU-hotplug here because
+ * this is called early in boot, before either interrupts
+ * or the scheduler are operational.
+ */
+ cpu_notifier(rcu_cpu_notify, 0);
+ for_each_online_cpu(i)
+ rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i);
}
#include "rcutree_plugin.h"
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index b40ac5706040..d2a0046f63b2 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -34,10 +34,11 @@
* In practice, this has not been tested, so there is probably some
* bug somewhere.
*/
-#define MAX_RCU_LVLS 3
+#define MAX_RCU_LVLS 4
#define RCU_FANOUT (CONFIG_RCU_FANOUT)
#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
+#define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT)
#if NR_CPUS <= RCU_FANOUT
# define NUM_RCU_LVLS 1
@@ -45,23 +46,33 @@
# define NUM_RCU_LVL_1 (NR_CPUS)
# define NUM_RCU_LVL_2 0
# define NUM_RCU_LVL_3 0
+# define NUM_RCU_LVL_4 0
#elif NR_CPUS <= RCU_FANOUT_SQ
# define NUM_RCU_LVLS 2
# define NUM_RCU_LVL_0 1
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
# define NUM_RCU_LVL_2 (NR_CPUS)
# define NUM_RCU_LVL_3 0
+# define NUM_RCU_LVL_4 0
#elif NR_CPUS <= RCU_FANOUT_CUBE
# define NUM_RCU_LVLS 3
# define NUM_RCU_LVL_0 1
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
# define NUM_RCU_LVL_3 NR_CPUS
+# define NUM_RCU_LVL_4 0
+#elif NR_CPUS <= RCU_FANOUT_FOURTH
+# define NUM_RCU_LVLS 4
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE)
+# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
+# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
+# define NUM_RCU_LVL_4 NR_CPUS
#else
# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
#endif /* #if (NR_CPUS) <= RCU_FANOUT */
-#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
/*
@@ -84,14 +95,21 @@ struct rcu_node {
long gpnum; /* Current grace period for this node. */
/* This will either be equal to or one */
/* behind the root rcu_node's gpnum. */
+ long completed; /* Last grace period completed for this node. */
+ /* This will either be equal to or one */
+ /* behind the root rcu_node's gpnum. */
unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/
/* In leaf rcu_node, each bit corresponds to */
/* an rcu_data structure, otherwise, each */
/* bit corresponds to a child rcu_node */
/* structure. */
+ unsigned long expmask; /* Groups that have ->blocked_tasks[] */
+ /* elements that need to drain to allow the */
+ /* current expedited grace period to */
+ /* complete (only for TREE_PREEMPT_RCU). */
unsigned long qsmaskinit;
- /* Per-GP initialization for qsmask. */
+ /* Per-GP initial value for qsmask & expmask. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */
/* Only one bit will be set in this mask. */
int grplo; /* lowest-numbered CPU or group here. */
@@ -99,7 +117,7 @@ struct rcu_node {
u8 grpnum; /* CPU/group number for next level up. */
u8 level; /* root is at level 0. */
struct rcu_node *parent;
- struct list_head blocked_tasks[2];
+ struct list_head blocked_tasks[4];
/* Tasks blocked in RCU read-side critsect. */
/* Grace period number (->gpnum) x blocked */
/* by tasks on the (x & 0x1) element of the */
@@ -114,6 +132,21 @@ struct rcu_node {
for ((rnp) = &(rsp)->node[0]; \
(rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
+/*
+ * Do a breadth-first scan of the non-leaf rcu_node structures for the
+ * specified rcu_state structure. Note that if there is a singleton
+ * rcu_node tree with but one rcu_node structure, this loop is a no-op.
+ */
+#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
+ for ((rnp) = &(rsp)->node[0]; \
+ (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
+
+/*
+ * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
+ * structure. Note that if there is a singleton rcu_node tree with but
+ * one rcu_node structure, this loop -will- visit the rcu_node structure.
+ * It is still a leaf node, even if it is also the root node.
+ */
#define rcu_for_each_leaf_node(rsp, rnp) \
for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
(rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
@@ -167,6 +200,10 @@ struct rcu_data {
struct rcu_head *nxtlist;
struct rcu_head **nxttail[RCU_NEXT_SIZE];
long qlen; /* # of queued callbacks */
+ long qlen_last_fqs_check;
+ /* qlen at last check for QS forcing */
+ unsigned long n_force_qs_snap;
+ /* did other CPU force QS recently? */
long blimit; /* Upper limit on a processed batch */
#ifdef CONFIG_NO_HZ
@@ -197,13 +234,15 @@ struct rcu_data {
};
/* Values for signaled field in struct rcu_state. */
-#define RCU_GP_INIT 0 /* Grace period being initialized. */
-#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
-#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
+#define RCU_GP_IDLE 0 /* No grace period in progress. */
+#define RCU_GP_INIT 1 /* Grace period being initialized. */
+#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
+#define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */
+#define RCU_FORCE_QS 4 /* Need to force quiescent state. */
#ifdef CONFIG_NO_HZ
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
#else /* #ifdef CONFIG_NO_HZ */
-#define RCU_SIGNAL_INIT RCU_FORCE_QS
+#define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED
#endif /* #else #ifdef CONFIG_NO_HZ */
#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
@@ -241,7 +280,7 @@ struct rcu_state {
long gpnum; /* Current gp number. */
long completed; /* # of last completed gp. */
- /* End of fields guarded by root rcu_node's lock. */
+ /* End of fields guarded by root rcu_node's lock. */
spinlock_t onofflock; /* exclude on/offline and */
/* starting new GP. Also */
@@ -255,6 +294,8 @@ struct rcu_state {
long orphan_qlen; /* Number of orphaned cbs. */
spinlock_t fqslock; /* Only one task forcing */
/* quiescent states. */
+ long completed_fqs; /* Value of completed @ snap. */
+ /* Protected by fqslock. */
unsigned long jiffies_force_qs; /* Time at which to invoke */
/* force_quiescent_state(). */
unsigned long n_force_qs; /* Number of calls to */
@@ -269,11 +310,15 @@ struct rcu_state {
unsigned long jiffies_stall; /* Time at which to check */
/* for CPU stalls. */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-#ifdef CONFIG_NO_HZ
- long dynticks_completed; /* Value of completed @ snap. */
-#endif /* #ifdef CONFIG_NO_HZ */
};
+/* Return values for rcu_preempt_offline_tasks(). */
+
+#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
+ /* GP were moved to root. */
+#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
+ /* GP were moved to root. */
+
#ifdef RCU_TREE_NONCORE
/*
@@ -293,23 +338,30 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
#else /* #ifdef RCU_TREE_NONCORE */
/* Forward declarations for rcutree_plugin.h */
-static inline void rcu_bootup_announce(void);
+static void rcu_bootup_announce(void);
long rcu_batches_completed(void);
static void rcu_preempt_note_context_switch(int cpu);
static int rcu_preempted_readers(struct rcu_node *rnp);
+#ifdef CONFIG_HOTPLUG_CPU
+static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
+ unsigned long flags);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
static void rcu_print_task_stall(struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
-static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
- struct rcu_node *rnp,
- struct rcu_data *rdp);
+static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
+ struct rcu_node *rnp,
+ struct rcu_data *rdp);
static void rcu_preempt_offline_cpu(int cpu);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_preempt_check_callbacks(int cpu);
static void rcu_preempt_process_callbacks(void);
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
+#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
static int rcu_preempt_pending(int cpu);
static int rcu_preempt_needs_cpu(int cpu);
static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c0cb783aa16a..37fbccdf41d5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -24,16 +24,19 @@
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
+#include <linux/delay.h>
#ifdef CONFIG_TREE_PREEMPT_RCU
struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
+static int rcu_preempted_readers_exp(struct rcu_node *rnp);
+
/*
* Tell them what RCU they are running.
*/
-static inline void rcu_bootup_announce(void)
+static void __init rcu_bootup_announce(void)
{
printk(KERN_INFO
"Experimental preemptable hierarchical RCU implementation.\n");
@@ -67,7 +70,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
static void rcu_preempt_qs(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
- rdp->passed_quiesc_completed = rdp->completed;
+ rdp->passed_quiesc_completed = rdp->gpnum - 1;
barrier();
rdp->passed_quiesc = 1;
}
@@ -157,14 +160,58 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock);
*/
static int rcu_preempted_readers(struct rcu_node *rnp)
{
- return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
+ int phase = rnp->gpnum & 0x1;
+
+ return !list_empty(&rnp->blocked_tasks[phase]) ||
+ !list_empty(&rnp->blocked_tasks[phase + 2]);
+}
+
+/*
+ * Record a quiescent state for all tasks that were previously queued
+ * on the specified rcu_node structure and that were blocking the current
+ * RCU grace period. The caller must hold the specified rnp->lock with
+ * irqs disabled, and this lock is released upon return, but irqs remain
+ * disabled.
+ */
+static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
+ __releases(rnp->lock)
+{
+ unsigned long mask;
+ struct rcu_node *rnp_p;
+
+ if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return; /* Still need more quiescent states! */
+ }
+
+ rnp_p = rnp->parent;
+ if (rnp_p == NULL) {
+ /*
+ * Either there is only one rcu_node in the tree,
+ * or tasks were kicked up to root rcu_node due to
+ * CPUs going offline.
+ */
+ rcu_report_qs_rsp(&rcu_preempt_state, flags);
+ return;
+ }
+
+ /* Report up the rest of the hierarchy. */
+ mask = rnp->grpmask;
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
+ spin_lock(&rnp_p->lock); /* irqs already disabled. */
+ rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
}
+/*
+ * Handle special cases during rcu_read_unlock(), such as needing to
+ * notify RCU core processing or task having blocked during the RCU
+ * read-side critical section.
+ */
static void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
+ int empty_exp;
unsigned long flags;
- unsigned long mask;
struct rcu_node *rnp;
int special;
@@ -207,36 +254,30 @@ static void rcu_read_unlock_special(struct task_struct *t)
spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
empty = !rcu_preempted_readers(rnp);
+ empty_exp = !rcu_preempted_readers_exp(rnp);
+ smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
list_del_init(&t->rcu_node_entry);
t->rcu_blocked_node = NULL;
/*
* If this was the last task on the current list, and if
* we aren't waiting on any CPUs, report the quiescent state.
- * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk()
- * drop rnp->lock and restore irq.
+ * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
*/
- if (!empty && rnp->qsmask == 0 &&
- !rcu_preempted_readers(rnp)) {
- struct rcu_node *rnp_p;
-
- if (rnp->parent == NULL) {
- /* Only one rcu_node in the tree. */
- cpu_quiet_msk_finish(&rcu_preempt_state, flags);
- return;
- }
- /* Report up the rest of the hierarchy. */
- mask = rnp->grpmask;
+ if (empty)
spin_unlock_irqrestore(&rnp->lock, flags);
- rnp_p = rnp->parent;
- spin_lock_irqsave(&rnp_p->lock, flags);
- WARN_ON_ONCE(rnp->qsmask);
- cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags);
- return;
- }
- spin_unlock(&rnp->lock);
+ else
+ rcu_report_unblock_qs_rnp(rnp, flags);
+
+ /*
+ * If this was the last task on the expedited lists,
+ * then we need to report up the rcu_node hierarchy.
+ */
+ if (!empty_exp && !rcu_preempted_readers_exp(rnp))
+ rcu_report_exp_rnp(&rcu_preempt_state, rnp);
+ } else {
+ local_irq_restore(flags);
}
- local_irq_restore(flags);
}
/*
@@ -303,26 +344,34 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
* rcu_node. The reason for not just moving them to the immediate
* parent is to remove the need for rcu_read_unlock_special() to
* make more than two attempts to acquire the target rcu_node's lock.
+ * Returns true if there were tasks blocking the current RCU grace
+ * period.
+ *
+ * Returns 1 if there was previously a task blocking the current grace
+ * period on the specified rcu_node structure.
*
* The caller must hold rnp->lock with irqs disabled.
*/
-static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
- struct rcu_node *rnp,
- struct rcu_data *rdp)
+static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
+ struct rcu_node *rnp,
+ struct rcu_data *rdp)
{
int i;
struct list_head *lp;
struct list_head *lp_root;
+ int retval = 0;
struct rcu_node *rnp_root = rcu_get_root(rsp);
struct task_struct *tp;
if (rnp == rnp_root) {
WARN_ONCE(1, "Last CPU thought to be offlined?");
- return; /* Shouldn't happen: at least one CPU online. */
+ return 0; /* Shouldn't happen: at least one CPU online. */
}
WARN_ON_ONCE(rnp != rdp->mynode &&
(!list_empty(&rnp->blocked_tasks[0]) ||
- !list_empty(&rnp->blocked_tasks[1])));
+ !list_empty(&rnp->blocked_tasks[1]) ||
+ !list_empty(&rnp->blocked_tasks[2]) ||
+ !list_empty(&rnp->blocked_tasks[3])));
/*
* Move tasks up to root rcu_node. Rely on the fact that the
@@ -330,7 +379,11 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
* rcu_nodes in terms of gp_num value. This fact allows us to
* move the blocked_tasks[] array directly, element by element.
*/
- for (i = 0; i < 2; i++) {
+ if (rcu_preempted_readers(rnp))
+ retval |= RCU_OFL_TASKS_NORM_GP;
+ if (rcu_preempted_readers_exp(rnp))
+ retval |= RCU_OFL_TASKS_EXP_GP;
+ for (i = 0; i < 4; i++) {
lp = &rnp->blocked_tasks[i];
lp_root = &rnp_root->blocked_tasks[i];
while (!list_empty(lp)) {
@@ -342,6 +395,7 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
spin_unlock(&rnp_root->lock); /* irqs remain disabled */
}
}
+ return retval;
}
/*
@@ -392,6 +446,186 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu);
+/**
+ * synchronize_rcu - wait until a grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full grace
+ * period has elapsed, in other words after all currently executing RCU
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ */
+void synchronize_rcu(void)
+{
+ struct rcu_synchronize rcu;
+
+ if (!rcu_scheduler_active)
+ return;
+
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu);
+
+static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
+static long sync_rcu_preempt_exp_count;
+static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
+
+/*
+ * Return non-zero if there are any tasks in RCU read-side critical
+ * sections blocking the current preemptible-RCU expedited grace period.
+ * If there is no preemptible-RCU expedited grace period currently in
+ * progress, returns zero unconditionally.
+ */
+static int rcu_preempted_readers_exp(struct rcu_node *rnp)
+{
+ return !list_empty(&rnp->blocked_tasks[2]) ||
+ !list_empty(&rnp->blocked_tasks[3]);
+}
+
+/*
+ * return non-zero if there is no RCU expedited grace period in progress
+ * for the specified rcu_node structure, in other words, if all CPUs and
+ * tasks covered by the specified rcu_node structure have done their bit
+ * for the current expedited grace period. Works only for preemptible
+ * RCU -- other RCU implementation use other means.
+ *
+ * Caller must hold sync_rcu_preempt_exp_mutex.
+ */
+static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
+{
+ return !rcu_preempted_readers_exp(rnp) &&
+ ACCESS_ONCE(rnp->expmask) == 0;
+}
+
+/*
+ * Report the exit from RCU read-side critical section for the last task
+ * that queued itself during or before the current expedited preemptible-RCU
+ * grace period. This event is reported either to the rcu_node structure on
+ * which the task was queued or to one of that rcu_node structure's ancestors,
+ * recursively up the tree. (Calm down, calm down, we do the recursion
+ * iteratively!)
+ *
+ * Caller must hold sync_rcu_preempt_exp_mutex.
+ */
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+{
+ unsigned long flags;
+ unsigned long mask;
+
+ spin_lock_irqsave(&rnp->lock, flags);
+ for (;;) {
+ if (!sync_rcu_preempt_exp_done(rnp))
+ break;
+ if (rnp->parent == NULL) {
+ wake_up(&sync_rcu_preempt_exp_wq);
+ break;
+ }
+ mask = rnp->grpmask;
+ spin_unlock(&rnp->lock); /* irqs remain disabled */
+ rnp = rnp->parent;
+ spin_lock(&rnp->lock); /* irqs already disabled */
+ rnp->expmask &= ~mask;
+ }
+ spin_unlock_irqrestore(&rnp->lock, flags);
+}
+
+/*
+ * Snapshot the tasks blocking the newly started preemptible-RCU expedited
+ * grace period for the specified rcu_node structure. If there are no such
+ * tasks, report it up the rcu_node hierarchy.
+ *
+ * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
+ */
+static void
+sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
+{
+ int must_wait;
+
+ spin_lock(&rnp->lock); /* irqs already disabled */
+ list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
+ list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
+ must_wait = rcu_preempted_readers_exp(rnp);
+ spin_unlock(&rnp->lock); /* irqs remain disabled */
+ if (!must_wait)
+ rcu_report_exp_rnp(rsp, rnp);
+}
+
+/*
+ * Wait for an rcu-preempt grace period, but expedite it. The basic idea
+ * is to invoke synchronize_sched_expedited() to push all the tasks to
+ * the ->blocked_tasks[] lists, move all entries from the first set of
+ * ->blocked_tasks[] lists to the second set, and finally wait for this
+ * second set to drain.
+ */
+void synchronize_rcu_expedited(void)
+{
+ unsigned long flags;
+ struct rcu_node *rnp;
+ struct rcu_state *rsp = &rcu_preempt_state;
+ long snap;
+ int trycount = 0;
+
+ smp_mb(); /* Caller's modifications seen first by other CPUs. */
+ snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
+ smp_mb(); /* Above access cannot bleed into critical section. */
+
+ /*
+ * Acquire lock, falling back to synchronize_rcu() if too many
+ * lock-acquisition failures. Of course, if someone does the
+ * expedited grace period for us, just leave.
+ */
+ while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
+ if (trycount++ < 10)
+ udelay(trycount * num_online_cpus());
+ else {
+ synchronize_rcu();
+ return;
+ }
+ if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
+ goto mb_ret; /* Others did our work for us. */
+ }
+ if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
+ goto unlock_mb_ret; /* Others did our work for us. */
+
+ /* force all RCU readers onto blocked_tasks[]. */
+ synchronize_sched_expedited();
+
+ spin_lock_irqsave(&rsp->onofflock, flags);
+
+ /* Initialize ->expmask for all non-leaf rcu_node structures. */
+ rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
+ spin_lock(&rnp->lock); /* irqs already disabled. */
+ rnp->expmask = rnp->qsmaskinit;
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
+ }
+
+ /* Snapshot current state of ->blocked_tasks[] lists. */
+ rcu_for_each_leaf_node(rsp, rnp)
+ sync_rcu_preempt_exp_init(rsp, rnp);
+ if (NUM_RCU_NODES > 1)
+ sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
+
+ spin_unlock_irqrestore(&rsp->onofflock, flags);
+
+ /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
+ rnp = rcu_get_root(rsp);
+ wait_event(sync_rcu_preempt_exp_wq,
+ sync_rcu_preempt_exp_done(rnp));
+
+ /* Clean up and exit. */
+ smp_mb(); /* ensure expedited GP seen before counter increment. */
+ ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
+unlock_mb_ret:
+ mutex_unlock(&sync_rcu_preempt_exp_mutex);
+mb_ret:
+ smp_mb(); /* ensure subsequent action seen after grace period. */
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+
/*
* Check to see if there is any immediate preemptable-RCU-related work
* to be done.
@@ -464,7 +698,7 @@ void exit_rcu(void)
/*
* Tell them what RCU they are running.
*/
-static inline void rcu_bootup_announce(void)
+static void __init rcu_bootup_announce(void)
{
printk(KERN_INFO "Hierarchical RCU implementation.\n");
}
@@ -495,6 +729,16 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
return 0;
}
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* Because preemptible RCU does not exist, no quieting of tasks. */
+static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
+{
+ spin_unlock_irqrestore(&rnp->lock, flags);
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
/*
@@ -521,12 +765,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
/*
* Because preemptable RCU does not exist, it never needs to migrate
- * tasks that were blocked within RCU read-side critical sections.
+ * tasks that were blocked within RCU read-side critical sections, and
+ * such non-existent tasks cannot possibly have been blocking the current
+ * grace period.
*/
-static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
- struct rcu_node *rnp,
- struct rcu_data *rdp)
+static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
+ struct rcu_node *rnp,
+ struct rcu_data *rdp)
{
+ return 0;
}
/*
@@ -565,6 +812,30 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
EXPORT_SYMBOL_GPL(call_rcu);
/*
+ * Wait for an rcu-preempt grace period, but make it happen quickly.
+ * But because preemptable RCU does not exist, map to rcu-sched.
+ */
+void synchronize_rcu_expedited(void)
+{
+ synchronize_sched_expedited();
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Because preemptable RCU does not exist, there is never any need to
+ * report on tasks preempted in RCU read-side critical sections during
+ * expedited RCU grace periods.
+ */
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+{
+ return;
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+/*
* Because preemptable RCU does not exist, it never has any work to do.
*/
static int rcu_preempt_pending(int cpu)
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 4b31c779e62e..9d2c88423b31 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -155,12 +155,15 @@ static const struct file_operations rcudata_csv_fops = {
static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
{
+ long gpnum;
int level = 0;
+ int phase;
struct rcu_node *rnp;
+ gpnum = rsp->gpnum;
seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
"nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n",
- rsp->completed, rsp->gpnum, rsp->signaled,
+ rsp->completed, gpnum, rsp->signaled,
(long)(rsp->jiffies_force_qs - jiffies),
(int)(jiffies & 0xffff),
rsp->n_force_qs, rsp->n_force_qs_ngp,
@@ -171,8 +174,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
seq_puts(m, "\n");
level = rnp->level;
}
- seq_printf(m, "%lx/%lx %d:%d ^%d ",
+ phase = gpnum & 0x1;
+ seq_printf(m, "%lx/%lx %c%c>%c%c %d:%d ^%d ",
rnp->qsmask, rnp->qsmaskinit,
+ "T."[list_empty(&rnp->blocked_tasks[phase])],
+ "E."[list_empty(&rnp->blocked_tasks[phase + 2])],
+ "T."[list_empty(&rnp->blocked_tasks[!phase])],
+ "E."[list_empty(&rnp->blocked_tasks[!phase + 2])],
rnp->grplo, rnp->grphi, rnp->grpnum);
}
seq_puts(m, "\n");
diff --git a/kernel/sched.c b/kernel/sched.c
index e88689522e66..6ae2739b8f19 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
*/
static DEFINE_SPINLOCK(task_group_lock);
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
#ifdef CONFIG_SMP
static int root_task_group_empty(void)
{
@@ -316,7 +318,6 @@ static int root_task_group_empty(void)
}
#endif
-#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_USER_SCHED
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
#else /* !CONFIG_USER_SCHED */
@@ -1564,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
#ifdef CONFIG_FAIR_GROUP_SCHED
-struct update_shares_data {
- unsigned long rq_weight[NR_CPUS];
-};
-
-static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
+static __read_mostly unsigned long *update_shares_data;
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
@@ -1578,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
static void update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares,
unsigned long sd_rq_weight,
- struct update_shares_data *usd)
+ unsigned long *usd_rq_weight)
{
unsigned long shares, rq_weight;
int boost = 0;
- rq_weight = usd->rq_weight[cpu];
+ rq_weight = usd_rq_weight[cpu];
if (!rq_weight) {
boost = 1;
rq_weight = NICE_0_LOAD;
@@ -1618,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
static int tg_shares_up(struct task_group *tg, void *data)
{
unsigned long weight, rq_weight = 0, shares = 0;
- struct update_shares_data *usd;
+ unsigned long *usd_rq_weight;
struct sched_domain *sd = data;
unsigned long flags;
int i;
@@ -1627,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
return 0;
local_irq_save(flags);
- usd = &__get_cpu_var(update_shares_data);
+ usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
for_each_cpu(i, sched_domain_span(sd)) {
weight = tg->cfs_rq[i]->load.weight;
- usd->rq_weight[i] = weight;
+ usd_rq_weight[i] = weight;
/*
* If there are currently no tasks on the cpu pretend there
@@ -1652,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
shares = tg->shares;
for_each_cpu(i, sched_domain_span(sd))
- update_group_shares_cpu(tg, i, shares, rq_weight, usd);
+ update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
local_irq_restore(flags);
@@ -1996,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
p->sched_class->prio_changed(rq, p, oldprio, running);
}
+/**
+ * kthread_bind - bind a just-created kthread to a cpu.
+ * @p: thread created by kthread_create().
+ * @cpu: cpu (might not be online, must be possible) for @k to run on.
+ *
+ * Description: This function is equivalent to set_cpus_allowed(),
+ * except that @cpu doesn't need to be online, and the thread must be
+ * stopped (i.e., just returned from kthread_create()).
+ *
+ * Function lives here instead of kthread.c because it messes with
+ * scheduler internals which require locking.
+ */
+void kthread_bind(struct task_struct *p, unsigned int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ /* Must have done schedule() in kthread() before we set_task_cpu */
+ if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock_irqsave(&rq->lock, flags);
+ set_task_cpu(p, cpu);
+ p->cpus_allowed = cpumask_of_cpu(cpu);
+ p->rt.nr_cpus_allowed = 1;
+ p->flags |= PF_THREAD_BOUND;
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
+EXPORT_SYMBOL(kthread_bind);
+
#ifdef CONFIG_SMP
/*
* Is this task likely cache-hot:
@@ -2008,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
/*
* Buddy candidates are cache hot:
*/
- if (sched_feat(CACHE_HOT_BUDDY) &&
+ if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
@@ -5452,7 +5481,7 @@ need_resched_nonpreemptible:
}
EXPORT_SYMBOL(schedule);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* Look out! "owner" is an entirely speculative pointer
* access and not reliable.
@@ -9407,6 +9436,10 @@ void __init sched_init(void)
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_GROUP_SCHED */
+#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
+ update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
+ __alignof__(unsigned long));
+#endif
for_each_possible_cpu(i) {
struct rq *rq;
@@ -9532,13 +9565,13 @@ void __init sched_init(void)
current->sched_class = &fair_sched_class;
/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
- alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
#ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ
- alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
#endif
- alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
+ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */
perf_event_init();
@@ -10868,6 +10901,7 @@ void synchronize_sched_expedited(void)
spin_unlock_irqrestore(&rq->lock, flags);
}
rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+ synchronize_sched_expedited_count++;
mutex_unlock(&rcu_sched_expedited_mutex);
put_online_cpus();
if (need_full_sync)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4e777b47eeda..37087a7fac22 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
* re-elected due to buddy favours.
*/
clear_buddies(cfs_rq, curr);
+ return;
+ }
+
+ /*
+ * Ensure that a task that missed wakeup preemption by a
+ * narrow margin doesn't have to wait for a full slice.
+ * This also mitigates buddy induced latencies under load.
+ */
+ if (!sched_feat(WAKEUP_PREEMPT))
+ return;
+
+ if (delta_exec < sysctl_sched_min_granularity)
+ return;
+
+ if (cfs_rq->nr_running > 1) {
+ struct sched_entity *se = __pick_next_entity(cfs_rq);
+ s64 delta = curr->vruntime - se->vruntime;
+
+ if (delta > ideal_runtime)
+ resched_task(rq_of(cfs_rq)->curr);
}
}
@@ -861,12 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
{
struct sched_entity *se = __pick_next_entity(cfs_rq);
+ struct sched_entity *left = se;
- if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
- return cfs_rq->next;
+ if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
+ se = cfs_rq->next;
- if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
- return cfs_rq->last;
+ /*
+ * Prefer last buddy, try to return the CPU to a preempted task.
+ */
+ if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
+ se = cfs_rq->last;
+
+ clear_buddies(cfs_rq, se);
return se;
}
@@ -1568,6 +1594,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
int sync = wake_flags & WF_SYNC;
+ int scale = cfs_rq->nr_running >= sched_nr_latency;
update_curr(cfs_rq);
@@ -1582,18 +1609,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(se == pse))
return;
- /*
- * Only set the backward buddy when the current task is still on the
- * rq. This can happen when a wakeup gets interleaved with schedule on
- * the ->pre_schedule() or idle_balance() point, either of which can
- * drop the rq lock.
- *
- * Also, during early boot the idle thread is in the fair class, for
- * obvious reasons its a bad idea to schedule back to the idle thread.
- */
- if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
- set_last_buddy(se);
- if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
+ if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
set_next_buddy(pse);
/*
@@ -1639,8 +1655,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
BUG_ON(!pse);
- if (wakeup_preempt_entity(se, pse) == 1)
+ if (wakeup_preempt_entity(se, pse) == 1) {
resched_task(curr);
+ /*
+ * Only set the backward buddy when the current task is still
+ * on the rq. This can happen when a wakeup gets interleaved
+ * with schedule on the ->pre_schedule() or idle_balance()
+ * point, either of which can * drop the rq lock.
+ *
+ * Also, during early boot the idle thread is in the fair class,
+ * for obvious reasons its a bad idea to schedule back to it.
+ */
+ if (unlikely(!se->on_rq || curr == rq->idle))
+ return;
+ if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
+ set_last_buddy(se);
+ }
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1654,16 +1684,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
do {
se = pick_next_entity(cfs_rq);
- /*
- * If se was a buddy, clear it so that it will have to earn
- * the favour again.
- *
- * If se was not a buddy, clear the buddies because neither
- * was elegible to run, let them earn it again.
- *
- * IOW. unconditionally clear buddies.
- */
- __clear_buddies(cfs_rq, NULL);
set_next_entity(cfs_rq, se);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
diff --git a/kernel/signal.c b/kernel/signal.c
index 6705320784fd..fe08008133da 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,6 +22,7 @@
#include <linux/ptrace.h>
#include <linux/signal.h>
#include <linux/signalfd.h>
+#include <linux/ratelimit.h>
#include <linux/tracehook.h>
#include <linux/capability.h>
#include <linux/freezer.h>
@@ -41,6 +42,8 @@
static struct kmem_cache *sigqueue_cachep;
+int print_fatal_signals __read_mostly;
+
static void __user *sig_handler(struct task_struct *t, int sig)
{
return t->sighand->action[sig - 1].sa.sa_handler;
@@ -159,7 +162,7 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
{
unsigned long i, *s, *m, x;
int sig = 0;
-
+
s = pending->signal.sig;
m = mask->sig;
switch (_NSIG_WORDS) {
@@ -184,17 +187,31 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
sig = ffz(~x) + 1;
break;
}
-
+
return sig;
}
+static inline void print_dropped_signal(int sig)
+{
+ static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+
+ if (!print_fatal_signals)
+ return;
+
+ if (!__ratelimit(&ratelimit_state))
+ return;
+
+ printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
+ current->comm, current->pid, sig);
+}
+
/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an
* appopriate lock must be held to stop the target task from exiting
*/
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
- int override_rlimit)
+static struct sigqueue *
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
{
struct sigqueue *q = NULL;
struct user_struct *user;
@@ -207,10 +224,15 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
*/
user = get_uid(__task_cred(t)->user);
atomic_inc(&user->sigpending);
+
if (override_rlimit ||
atomic_read(&user->sigpending) <=
- t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
+ t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
q = kmem_cache_alloc(sigqueue_cachep, flags);
+ } else {
+ print_dropped_signal(sig);
+ }
+
if (unlikely(q == NULL)) {
atomic_dec(&user->sigpending);
free_uid(user);
@@ -869,7 +891,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
else
override_rlimit = 0;
- q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
+ q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
override_rlimit);
if (q) {
list_add_tail(&q->list, &pending->list);
@@ -925,8 +947,6 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
return __send_signal(sig, info, t, group, from_ancestor_ns);
}
-int print_fatal_signals;
-
static void print_fatal_signal(struct pt_regs *regs, int signr)
{
printk("%s/%d: potentially unexpected fatal signal %d.\n",
@@ -1293,19 +1313,19 @@ EXPORT_SYMBOL(kill_pid);
* These functions support sending signals using preallocated sigqueue
* structures. This is needed "because realtime applications cannot
* afford to lose notifications of asynchronous events, like timer
- * expirations or I/O completions". In the case of Posix Timers
+ * expirations or I/O completions". In the case of Posix Timers
* we allocate the sigqueue structure from the timer_create. If this
* allocation fails we are able to report the failure to the application
* with an EAGAIN error.
*/
-
struct sigqueue *sigqueue_alloc(void)
{
- struct sigqueue *q;
+ struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
- if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
+ if (q)
q->flags |= SIGQUEUE_PREALLOC;
- return(q);
+
+ return q;
}
void sigqueue_free(struct sigqueue *q)
diff --git a/kernel/slow-work-debugfs.c b/kernel/slow-work-debugfs.c
new file mode 100644
index 000000000000..e45c43645298
--- /dev/null
+++ b/kernel/slow-work-debugfs.c
@@ -0,0 +1,227 @@
+/* Slow work debugging
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slow-work.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/seq_file.h>
+#include "slow-work.h"
+
+#define ITERATOR_SHIFT (BITS_PER_LONG - 4)
+#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT)
+#define ITERATOR_COUNTER (~ITERATOR_SELECTOR)
+
+void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
+{
+ seq_puts(m, "Slow-work: New thread");
+}
+
+/*
+ * Render the time mark field on a work item into a 5-char time with units plus
+ * a space
+ */
+static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
+{
+ struct timespec now, diff;
+
+ now = CURRENT_TIME;
+ diff = timespec_sub(now, work->mark);
+
+ if (diff.tv_sec < 0)
+ seq_puts(m, " -ve ");
+ else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
+ seq_printf(m, "%3luns ", diff.tv_nsec);
+ else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
+ seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
+ else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
+ seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
+ else if (diff.tv_sec <= 1)
+ seq_puts(m, " 1s ");
+ else if (diff.tv_sec < 60)
+ seq_printf(m, "%4lus ", diff.tv_sec);
+ else if (diff.tv_sec < 60 * 60)
+ seq_printf(m, "%4lum ", diff.tv_sec / 60);
+ else if (diff.tv_sec < 60 * 60 * 24)
+ seq_printf(m, "%4luh ", diff.tv_sec / 3600);
+ else
+ seq_puts(m, "exces ");
+}
+
+/*
+ * Describe a slow work item for debugfs
+ */
+static int slow_work_runqueue_show(struct seq_file *m, void *v)
+{
+ struct slow_work *work;
+ struct list_head *p = v;
+ unsigned long id;
+
+ switch ((unsigned long) v) {
+ case 1:
+ seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n");
+ return 0;
+ case 2:
+ seq_puts(m, "=== ===== ================ == ===== ==========\n");
+ return 0;
+
+ case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
+ id = (unsigned long) v - 3;
+
+ read_lock(&slow_work_execs_lock);
+ work = slow_work_execs[id];
+ if (work) {
+ smp_read_barrier_depends();
+
+ seq_printf(m, "%3lu %5d %16p %2lx ",
+ id, slow_work_pids[id], work, work->flags);
+ slow_work_print_mark(m, work);
+
+ if (work->ops->desc)
+ work->ops->desc(work, m);
+ seq_putc(m, '\n');
+ }
+ read_unlock(&slow_work_execs_lock);
+ return 0;
+
+ default:
+ work = list_entry(p, struct slow_work, link);
+ seq_printf(m, "%3s - %16p %2lx ",
+ work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
+ work, work->flags);
+ slow_work_print_mark(m, work);
+
+ if (work->ops->desc)
+ work->ops->desc(work, m);
+ seq_putc(m, '\n');
+ return 0;
+ }
+}
+
+/*
+ * map the iterator to a work item
+ */
+static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
+{
+ struct list_head *p;
+ unsigned long count, id;
+
+ switch (*_pos >> ITERATOR_SHIFT) {
+ case 0x0:
+ if (*_pos == 0)
+ *_pos = 1;
+ if (*_pos < 3)
+ return (void *)(unsigned long) *_pos;
+ if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
+ for (id = *_pos - 3;
+ id < SLOW_WORK_THREAD_LIMIT;
+ id++, (*_pos)++)
+ if (slow_work_execs[id])
+ return (void *)(unsigned long) *_pos;
+ *_pos = 0x1UL << ITERATOR_SHIFT;
+
+ case 0x1:
+ count = *_pos & ITERATOR_COUNTER;
+ list_for_each(p, &slow_work_queue) {
+ if (count == 0)
+ return p;
+ count--;
+ }
+ *_pos = 0x2UL << ITERATOR_SHIFT;
+
+ case 0x2:
+ count = *_pos & ITERATOR_COUNTER;
+ list_for_each(p, &vslow_work_queue) {
+ if (count == 0)
+ return p;
+ count--;
+ }
+ *_pos = 0x3UL << ITERATOR_SHIFT;
+
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * set up the iterator to start reading from the first line
+ */
+static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
+{
+ spin_lock_irq(&slow_work_queue_lock);
+ return slow_work_runqueue_index(m, _pos);
+}
+
+/*
+ * move to the next line
+ */
+static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ struct list_head *p = v;
+ unsigned long selector = *_pos >> ITERATOR_SHIFT;
+
+ (*_pos)++;
+ switch (selector) {
+ case 0x0:
+ return slow_work_runqueue_index(m, _pos);
+
+ case 0x1:
+ if (*_pos >> ITERATOR_SHIFT == 0x1) {
+ p = p->next;
+ if (p != &slow_work_queue)
+ return p;
+ }
+ *_pos = 0x2UL << ITERATOR_SHIFT;
+ p = &vslow_work_queue;
+
+ case 0x2:
+ if (*_pos >> ITERATOR_SHIFT == 0x2) {
+ p = p->next;
+ if (p != &vslow_work_queue)
+ return p;
+ }
+ *_pos = 0x3UL << ITERATOR_SHIFT;
+
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * clean up after reading
+ */
+static void slow_work_runqueue_stop(struct seq_file *m, void *v)
+{
+ spin_unlock_irq(&slow_work_queue_lock);
+}
+
+static const struct seq_operations slow_work_runqueue_ops = {
+ .start = slow_work_runqueue_start,
+ .stop = slow_work_runqueue_stop,
+ .next = slow_work_runqueue_next,
+ .show = slow_work_runqueue_show,
+};
+
+/*
+ * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents
+ */
+static int slow_work_runqueue_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &slow_work_runqueue_ops);
+}
+
+const struct file_operations slow_work_runqueue_fops = {
+ .owner = THIS_MODULE,
+ .open = slow_work_runqueue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 0d31135efbf4..00889bd3c590 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,11 +16,8 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/wait.h>
-
-#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
- * things to do */
-#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
- * OOM */
+#include <linux/debugfs.h>
+#include "slow-work.h"
static void slow_work_cull_timeout(unsigned long);
static void slow_work_oom_timeout(unsigned long);
@@ -46,7 +43,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
#ifdef CONFIG_SYSCTL
static const int slow_work_min_min_threads = 2;
-static int slow_work_max_max_threads = 255;
+static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
static const int slow_work_min_vslow = 1;
static const int slow_work_max_vslow = 99;
@@ -98,6 +95,56 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
static struct slow_work slow_work_new_thread; /* new thread starter */
/*
+ * slow work ID allocation (use slow_work_queue_lock)
+ */
+static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
+
+/*
+ * Unregistration tracking to prevent put_ref() from disappearing during module
+ * unload
+ */
+#ifdef CONFIG_MODULES
+static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
+static struct module *slow_work_unreg_module;
+static struct slow_work *slow_work_unreg_work_item;
+static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
+static DEFINE_MUTEX(slow_work_unreg_sync_lock);
+
+static void slow_work_set_thread_processing(int id, struct slow_work *work)
+{
+ if (work)
+ slow_work_thread_processing[id] = work->owner;
+}
+static void slow_work_done_thread_processing(int id, struct slow_work *work)
+{
+ struct module *module = slow_work_thread_processing[id];
+
+ slow_work_thread_processing[id] = NULL;
+ smp_mb();
+ if (slow_work_unreg_work_item == work ||
+ slow_work_unreg_module == module)
+ wake_up_all(&slow_work_unreg_wq);
+}
+static void slow_work_clear_thread_processing(int id)
+{
+ slow_work_thread_processing[id] = NULL;
+}
+#else
+static void slow_work_set_thread_processing(int id, struct slow_work *work) {}
+static void slow_work_done_thread_processing(int id, struct slow_work *work) {}
+static void slow_work_clear_thread_processing(int id) {}
+#endif
+
+/*
+ * Data for tracking currently executing items for indication through /proc
+ */
+#ifdef CONFIG_SLOW_WORK_DEBUG
+struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
+pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
+DEFINE_RWLOCK(slow_work_execs_lock);
+#endif
+
+/*
* The queues of work items and the lock governing access to them. These are
* shared between all the CPUs. It doesn't make sense to have per-CPU queues
* as the number of threads bears no relation to the number of CPUs.
@@ -105,9 +152,18 @@ static struct slow_work slow_work_new_thread; /* new thread starter */
* There are two queues of work items: one for slow work items, and one for
* very slow work items.
*/
-static LIST_HEAD(slow_work_queue);
-static LIST_HEAD(vslow_work_queue);
-static DEFINE_SPINLOCK(slow_work_queue_lock);
+LIST_HEAD(slow_work_queue);
+LIST_HEAD(vslow_work_queue);
+DEFINE_SPINLOCK(slow_work_queue_lock);
+
+/*
+ * The following are two wait queues that get pinged when a work item is placed
+ * on an empty queue. These allow work items that are hogging a thread by
+ * sleeping in a way that could be deferred to yield their thread and enqueue
+ * themselves.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation);
+static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation);
/*
* The thread controls. A variable used to signal to the threads that they
@@ -126,6 +182,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited);
static int slow_work_user_count;
static DEFINE_MUTEX(slow_work_user_lock);
+static inline int slow_work_get_ref(struct slow_work *work)
+{
+ if (work->ops->get_ref)
+ return work->ops->get_ref(work);
+
+ return 0;
+}
+
+static inline void slow_work_put_ref(struct slow_work *work)
+{
+ if (work->ops->put_ref)
+ work->ops->put_ref(work);
+}
+
/*
* Calculate the maximum number of active threads in the pool that are
* permitted to process very slow work items.
@@ -149,7 +219,7 @@ static unsigned slow_work_calc_vsmax(void)
* Attempt to execute stuff queued on a slow thread. Return true if we managed
* it, false if there was nothing to do.
*/
-static bool slow_work_execute(void)
+static noinline bool slow_work_execute(int id)
{
struct slow_work *work = NULL;
unsigned vsmax;
@@ -186,6 +256,13 @@ static bool slow_work_execute(void)
} else {
very_slow = false; /* avoid the compiler warning */
}
+
+ slow_work_set_thread_processing(id, work);
+ if (work) {
+ slow_work_mark_time(work);
+ slow_work_begin_exec(id, work);
+ }
+
spin_unlock_irq(&slow_work_queue_lock);
if (!work)
@@ -194,12 +271,19 @@ static bool slow_work_execute(void)
if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
BUG();
- work->ops->execute(work);
+ /* don't execute if the work is in the process of being cancelled */
+ if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
+ work->ops->execute(work);
if (very_slow)
atomic_dec(&vslow_work_executing_count);
clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
+ /* wake up anyone waiting for this work to be complete */
+ wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
+
+ slow_work_end_exec(id, work);
+
/* if someone tried to enqueue the item whilst we were executing it,
* then it'll be left unenqueued to avoid multiple threads trying to
* execute it simultaneously
@@ -219,7 +303,10 @@ static bool slow_work_execute(void)
spin_unlock_irq(&slow_work_queue_lock);
}
- work->ops->put_ref(work);
+ /* sort out the race between module unloading and put_ref() */
+ slow_work_put_ref(work);
+ slow_work_done_thread_processing(id, work);
+
return true;
auto_requeue:
@@ -227,15 +314,61 @@ auto_requeue:
* - we transfer our ref on the item back to the appropriate queue
* - don't wake another thread up as we're awake already
*/
+ slow_work_mark_time(work);
if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
list_add_tail(&work->link, &vslow_work_queue);
else
list_add_tail(&work->link, &slow_work_queue);
spin_unlock_irq(&slow_work_queue_lock);
+ slow_work_clear_thread_processing(id);
return true;
}
/**
+ * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work
+ * work: The work item under execution that wants to sleep
+ * _timeout: Scheduler sleep timeout
+ *
+ * Allow a requeueable work item to sleep on a slow-work processor thread until
+ * that thread is needed to do some other work or the sleep is interrupted by
+ * some other event.
+ *
+ * The caller must set up a wake up event before calling this and must have set
+ * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
+ * condition before calling this function as no test is made here.
+ *
+ * False is returned if there is nothing on the queue; true is returned if the
+ * work item should be requeued
+ */
+bool slow_work_sleep_till_thread_needed(struct slow_work *work,
+ signed long *_timeout)
+{
+ wait_queue_head_t *wfo_wq;
+ struct list_head *queue;
+
+ DEFINE_WAIT(wait);
+
+ if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
+ wfo_wq = &vslow_work_queue_waits_for_occupation;
+ queue = &vslow_work_queue;
+ } else {
+ wfo_wq = &slow_work_queue_waits_for_occupation;
+ queue = &slow_work_queue;
+ }
+
+ if (!list_empty(queue))
+ return true;
+
+ add_wait_queue_exclusive(wfo_wq, &wait);
+ if (list_empty(queue))
+ *_timeout = schedule_timeout(*_timeout);
+ finish_wait(wfo_wq, &wait);
+
+ return !list_empty(queue);
+}
+EXPORT_SYMBOL(slow_work_sleep_till_thread_needed);
+
+/**
* slow_work_enqueue - Schedule a slow work item for processing
* @work: The work item to queue
*
@@ -260,16 +393,22 @@ auto_requeue:
* allowed to pick items to execute. This ensures that very slow items won't
* overly block ones that are just ordinarily slow.
*
- * Returns 0 if successful, -EAGAIN if not.
+ * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
+ * attempted queued)
*/
int slow_work_enqueue(struct slow_work *work)
{
+ wait_queue_head_t *wfo_wq;
+ struct list_head *queue;
unsigned long flags;
+ int ret;
+
+ if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
+ return -ECANCELED;
BUG_ON(slow_work_user_count <= 0);
BUG_ON(!work);
BUG_ON(!work->ops);
- BUG_ON(!work->ops->get_ref);
/* when honouring an enqueue request, we only promise that we will run
* the work function in the future; we do not promise to run it once
@@ -280,8 +419,19 @@ int slow_work_enqueue(struct slow_work *work)
* maintaining our promise
*/
if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
+ if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
+ wfo_wq = &vslow_work_queue_waits_for_occupation;
+ queue = &vslow_work_queue;
+ } else {
+ wfo_wq = &slow_work_queue_waits_for_occupation;
+ queue = &slow_work_queue;
+ }
+
spin_lock_irqsave(&slow_work_queue_lock, flags);
+ if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
+ goto cancelled;
+
/* we promise that we will not attempt to execute the work
* function in more than one thread simultaneously
*
@@ -299,25 +449,221 @@ int slow_work_enqueue(struct slow_work *work)
if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
} else {
- if (work->ops->get_ref(work) < 0)
- goto cant_get_ref;
- if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
- list_add_tail(&work->link, &vslow_work_queue);
- else
- list_add_tail(&work->link, &slow_work_queue);
+ ret = slow_work_get_ref(work);
+ if (ret < 0)
+ goto failed;
+ slow_work_mark_time(work);
+ list_add_tail(&work->link, queue);
wake_up(&slow_work_thread_wq);
+
+ /* if someone who could be requeued is sleeping on a
+ * thread, then ask them to yield their thread */
+ if (work->link.prev == queue)
+ wake_up(wfo_wq);
}
spin_unlock_irqrestore(&slow_work_queue_lock, flags);
}
return 0;
-cant_get_ref:
+cancelled:
+ ret = -ECANCELED;
+failed:
spin_unlock_irqrestore(&slow_work_queue_lock, flags);
- return -EAGAIN;
+ return ret;
}
EXPORT_SYMBOL(slow_work_enqueue);
+static int slow_work_wait(void *word)
+{
+ schedule();
+ return 0;
+}
+
+/**
+ * slow_work_cancel - Cancel a slow work item
+ * @work: The work item to cancel
+ *
+ * This function will cancel a previously enqueued work item. If we cannot
+ * cancel the work item, it is guarenteed to have run when this function
+ * returns.
+ */
+void slow_work_cancel(struct slow_work *work)
+{
+ bool wait = true, put = false;
+
+ set_bit(SLOW_WORK_CANCELLING, &work->flags);
+ smp_mb();
+
+ /* if the work item is a delayed work item with an active timer, we
+ * need to wait for the timer to finish _before_ getting the spinlock,
+ * lest we deadlock against the timer routine
+ *
+ * the timer routine will leave DELAYED set if it notices the
+ * CANCELLING flag in time
+ */
+ if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
+ struct delayed_slow_work *dwork =
+ container_of(work, struct delayed_slow_work, work);
+ del_timer_sync(&dwork->timer);
+ }
+
+ spin_lock_irq(&slow_work_queue_lock);
+
+ if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
+ /* the timer routine aborted or never happened, so we are left
+ * holding the timer's reference on the item and should just
+ * drop the pending flag and wait for any ongoing execution to
+ * finish */
+ struct delayed_slow_work *dwork =
+ container_of(work, struct delayed_slow_work, work);
+
+ BUG_ON(timer_pending(&dwork->timer));
+ BUG_ON(!list_empty(&work->link));
+
+ clear_bit(SLOW_WORK_DELAYED, &work->flags);
+ put = true;
+ clear_bit(SLOW_WORK_PENDING, &work->flags);
+
+ } else if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
+ !list_empty(&work->link)) {
+ /* the link in the pending queue holds a reference on the item
+ * that we will need to release */
+ list_del_init(&work->link);
+ wait = false;
+ put = true;
+ clear_bit(SLOW_WORK_PENDING, &work->flags);
+
+ } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
+ /* the executor is holding our only reference on the item, so
+ * we merely need to wait for it to finish executing */
+ clear_bit(SLOW_WORK_PENDING, &work->flags);
+ }
+
+ spin_unlock_irq(&slow_work_queue_lock);
+
+ /* the EXECUTING flag is set by the executor whilst the spinlock is set
+ * and before the item is dequeued - so assuming the above doesn't
+ * actually dequeue it, simply waiting for the EXECUTING flag to be
+ * released here should be sufficient */
+ if (wait)
+ wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
+ TASK_UNINTERRUPTIBLE);
+
+ clear_bit(SLOW_WORK_CANCELLING, &work->flags);
+ if (put)
+ slow_work_put_ref(work);
+}
+EXPORT_SYMBOL(slow_work_cancel);
+
+/*
+ * Handle expiry of the delay timer, indicating that a delayed slow work item
+ * should now be queued if not cancelled
+ */
+static void delayed_slow_work_timer(unsigned long data)
+{
+ wait_queue_head_t *wfo_wq;
+ struct list_head *queue;
+ struct slow_work *work = (struct slow_work *) data;
+ unsigned long flags;
+ bool queued = false, put = false, first = false;
+
+ if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
+ wfo_wq = &vslow_work_queue_waits_for_occupation;
+ queue = &vslow_work_queue;
+ } else {
+ wfo_wq = &slow_work_queue_waits_for_occupation;
+ queue = &slow_work_queue;
+ }
+
+ spin_lock_irqsave(&slow_work_queue_lock, flags);
+ if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
+ clear_bit(SLOW_WORK_DELAYED, &work->flags);
+
+ if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
+ /* we discard the reference the timer was holding in
+ * favour of the one the executor holds */
+ set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
+ put = true;
+ } else {
+ slow_work_mark_time(work);
+ list_add_tail(&work->link, queue);
+ queued = true;
+ if (work->link.prev == queue)
+ first = true;
+ }
+ }
+
+ spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+ if (put)
+ slow_work_put_ref(work);
+ if (first)
+ wake_up(wfo_wq);
+ if (queued)
+ wake_up(&slow_work_thread_wq);
+}
+
+/**
+ * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing
+ * @dwork: The delayed work item to queue
+ * @delay: When to start executing the work, in jiffies from now
+ *
+ * This is similar to slow_work_enqueue(), but it adds a delay before the work
+ * is actually queued for processing.
+ *
+ * The item can have delayed processing requested on it whilst it is being
+ * executed. The delay will begin immediately, and if it expires before the
+ * item finishes executing, the item will be placed back on the queue when it
+ * has done executing.
+ */
+int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
+ unsigned long delay)
+{
+ struct slow_work *work = &dwork->work;
+ unsigned long flags;
+ int ret;
+
+ if (delay == 0)
+ return slow_work_enqueue(&dwork->work);
+
+ BUG_ON(slow_work_user_count <= 0);
+ BUG_ON(!work);
+ BUG_ON(!work->ops);
+
+ if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
+ return -ECANCELED;
+
+ if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
+ spin_lock_irqsave(&slow_work_queue_lock, flags);
+
+ if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
+ goto cancelled;
+
+ /* the timer holds a reference whilst it is pending */
+ ret = work->ops->get_ref(work);
+ if (ret < 0)
+ goto cant_get_ref;
+
+ if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags))
+ BUG();
+ dwork->timer.expires = jiffies + delay;
+ dwork->timer.data = (unsigned long) work;
+ dwork->timer.function = delayed_slow_work_timer;
+ add_timer(&dwork->timer);
+
+ spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+ }
+
+ return 0;
+
+cancelled:
+ ret = -ECANCELED;
+cant_get_ref:
+ spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(delayed_slow_work_enqueue);
+
/*
* Schedule a cull of the thread pool at some time in the near future
*/
@@ -368,13 +714,23 @@ static inline bool slow_work_available(int vsmax)
*/
static int slow_work_thread(void *_data)
{
- int vsmax;
+ int vsmax, id;
DEFINE_WAIT(wait);
set_freezable();
set_user_nice(current, -5);
+ /* allocate ourselves an ID */
+ spin_lock_irq(&slow_work_queue_lock);
+ id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
+ BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
+ __set_bit(id, slow_work_ids);
+ slow_work_set_thread_pid(id, current->pid);
+ spin_unlock_irq(&slow_work_queue_lock);
+
+ sprintf(current->comm, "kslowd%03u", id);
+
for (;;) {
vsmax = vslow_work_proportion;
vsmax *= atomic_read(&slow_work_thread_count);
@@ -395,7 +751,7 @@ static int slow_work_thread(void *_data)
vsmax *= atomic_read(&slow_work_thread_count);
vsmax /= 100;
- if (slow_work_available(vsmax) && slow_work_execute()) {
+ if (slow_work_available(vsmax) && slow_work_execute(id)) {
cond_resched();
if (list_empty(&slow_work_queue) &&
list_empty(&vslow_work_queue) &&
@@ -412,6 +768,11 @@ static int slow_work_thread(void *_data)
break;
}
+ spin_lock_irq(&slow_work_queue_lock);
+ slow_work_set_thread_pid(id, 0);
+ __clear_bit(id, slow_work_ids);
+ spin_unlock_irq(&slow_work_queue_lock);
+
if (atomic_dec_and_test(&slow_work_thread_count))
complete_and_exit(&slow_work_last_thread_exited, 0);
return 0;
@@ -427,21 +788,6 @@ static void slow_work_cull_timeout(unsigned long data)
}
/*
- * Get a reference on slow work thread starter
- */
-static int slow_work_new_thread_get_ref(struct slow_work *work)
-{
- return 0;
-}
-
-/*
- * Drop a reference on slow work thread starter
- */
-static void slow_work_new_thread_put_ref(struct slow_work *work)
-{
-}
-
-/*
* Start a new slow work thread
*/
static void slow_work_new_thread_execute(struct slow_work *work)
@@ -475,9 +821,11 @@ static void slow_work_new_thread_execute(struct slow_work *work)
}
static const struct slow_work_ops slow_work_new_thread_ops = {
- .get_ref = slow_work_new_thread_get_ref,
- .put_ref = slow_work_new_thread_put_ref,
+ .owner = THIS_MODULE,
.execute = slow_work_new_thread_execute,
+#ifdef CONFIG_SLOW_WORK_DEBUG
+ .desc = slow_work_new_thread_desc,
+#endif
};
/*
@@ -546,12 +894,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
/**
* slow_work_register_user - Register a user of the facility
+ * @module: The module about to make use of the facility
*
* Register a user of the facility, starting up the initial threads if there
* aren't any other users at this point. This will return 0 if successful, or
* an error if not.
*/
-int slow_work_register_user(void)
+int slow_work_register_user(struct module *module)
{
struct task_struct *p;
int loop;
@@ -598,14 +947,81 @@ error:
}
EXPORT_SYMBOL(slow_work_register_user);
+/*
+ * wait for all outstanding items from the calling module to complete
+ * - note that more items may be queued whilst we're waiting
+ */
+static void slow_work_wait_for_items(struct module *module)
+{
+#ifdef CONFIG_MODULES
+ DECLARE_WAITQUEUE(myself, current);
+ struct slow_work *work;
+ int loop;
+
+ mutex_lock(&slow_work_unreg_sync_lock);
+ add_wait_queue(&slow_work_unreg_wq, &myself);
+
+ for (;;) {
+ spin_lock_irq(&slow_work_queue_lock);
+
+ /* first of all, we wait for the last queued item in each list
+ * to be processed */
+ list_for_each_entry_reverse(work, &vslow_work_queue, link) {
+ if (work->owner == module) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ slow_work_unreg_work_item = work;
+ goto do_wait;
+ }
+ }
+ list_for_each_entry_reverse(work, &slow_work_queue, link) {
+ if (work->owner == module) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ slow_work_unreg_work_item = work;
+ goto do_wait;
+ }
+ }
+
+ /* then we wait for the items being processed to finish */
+ slow_work_unreg_module = module;
+ smp_mb();
+ for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
+ if (slow_work_thread_processing[loop] == module)
+ goto do_wait;
+ }
+ spin_unlock_irq(&slow_work_queue_lock);
+ break; /* okay, we're done */
+
+ do_wait:
+ spin_unlock_irq(&slow_work_queue_lock);
+ schedule();
+ slow_work_unreg_work_item = NULL;
+ slow_work_unreg_module = NULL;
+ }
+
+ remove_wait_queue(&slow_work_unreg_wq, &myself);
+ mutex_unlock(&slow_work_unreg_sync_lock);
+#endif /* CONFIG_MODULES */
+}
+
/**
* slow_work_unregister_user - Unregister a user of the facility
+ * @module: The module whose items should be cleared
*
* Unregister a user of the facility, killing all the threads if this was the
* last one.
+ *
+ * This waits for all the work items belonging to the nominated module to go
+ * away before proceeding.
*/
-void slow_work_unregister_user(void)
+void slow_work_unregister_user(struct module *module)
{
+ /* first of all, wait for all outstanding items from the calling module
+ * to complete */
+ if (module)
+ slow_work_wait_for_items(module);
+
+ /* then we can actually go about shutting down the facility if need
+ * be */
mutex_lock(&slow_work_user_lock);
BUG_ON(slow_work_user_count <= 0);
@@ -639,6 +1055,16 @@ static int __init init_slow_work(void)
if (slow_work_max_max_threads < nr_cpus * 2)
slow_work_max_max_threads = nr_cpus * 2;
#endif
+#ifdef CONFIG_SLOW_WORK_DEBUG
+ {
+ struct dentry *dbdir;
+
+ dbdir = debugfs_create_dir("slow_work", NULL);
+ if (dbdir && !IS_ERR(dbdir))
+ debugfs_create_file("runqueue", S_IFREG | 0400, dbdir,
+ NULL, &slow_work_runqueue_fops);
+ }
+#endif
return 0;
}
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
new file mode 100644
index 000000000000..321f3c59d732
--- /dev/null
+++ b/kernel/slow-work.h
@@ -0,0 +1,72 @@
+/* Slow work private definitions
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
+ * things to do */
+#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
+ * OOM */
+
+#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
+
+/*
+ * slow-work.c
+ */
+#ifdef CONFIG_SLOW_WORK_DEBUG
+extern struct slow_work *slow_work_execs[];
+extern pid_t slow_work_pids[];
+extern rwlock_t slow_work_execs_lock;
+#endif
+
+extern struct list_head slow_work_queue;
+extern struct list_head vslow_work_queue;
+extern spinlock_t slow_work_queue_lock;
+
+/*
+ * slow-work-debugfs.c
+ */
+#ifdef CONFIG_SLOW_WORK_DEBUG
+extern const struct file_operations slow_work_runqueue_fops;
+
+extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
+#endif
+
+/*
+ * Helper functions
+ */
+static inline void slow_work_set_thread_pid(int id, pid_t pid)
+{
+#ifdef CONFIG_SLOW_WORK_PROC
+ slow_work_pids[id] = pid;
+#endif
+}
+
+static inline void slow_work_mark_time(struct slow_work *work)
+{
+#ifdef CONFIG_SLOW_WORK_PROC
+ work->mark = CURRENT_TIME;
+#endif
+}
+
+static inline void slow_work_begin_exec(int id, struct slow_work *work)
+{
+#ifdef CONFIG_SLOW_WORK_PROC
+ slow_work_execs[id] = work;
+#endif
+}
+
+static inline void slow_work_end_exec(int id, struct slow_work *work)
+{
+#ifdef CONFIG_SLOW_WORK_PROC
+ write_lock(&slow_work_execs_lock);
+ slow_work_execs[id] = NULL;
+ write_unlock(&slow_work_execs_lock);
+#endif
+}
diff --git a/kernel/smp.c b/kernel/smp.c
index c9d1c7835c2f..a8c76069cf50 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -265,9 +265,7 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data);
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait until function has completed on other CPUs.
*
- * Returns 0 on success, else a negative status code. Note that @wait
- * will be implicitly turned on in case of allocation failures, since
- * we fall back to on-stack allocation.
+ * Returns 0 on success, else a negative status code.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int wait)
@@ -321,6 +319,51 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
}
EXPORT_SYMBOL(smp_call_function_single);
+/*
+ * smp_call_function_any - Run a function on any of the given cpus
+ * @mask: The mask of cpus it can run on.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait until function has completed.
+ *
+ * Returns 0 on success, else a negative status code (if no cpus were online).
+ * Note that @wait will be implicitly turned on in case of allocation failures,
+ * since we fall back to on-stack allocation.
+ *
+ * Selection preference:
+ * 1) current cpu if in @mask
+ * 2) any cpu of current node if in @mask
+ * 3) any other online cpu in @mask
+ */
+int smp_call_function_any(const struct cpumask *mask,
+ void (*func)(void *info), void *info, int wait)
+{
+ unsigned int cpu;
+ const struct cpumask *nodemask;
+ int ret;
+
+ /* Try for same CPU (cheapest) */
+ cpu = get_cpu();
+ if (cpumask_test_cpu(cpu, mask))
+ goto call;
+
+ /* Try for same node. */
+ nodemask = cpumask_of_node(cpu);
+ for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
+ cpu = cpumask_next_and(cpu, nodemask, mask)) {
+ if (cpu_online(cpu))
+ goto call;
+ }
+
+ /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
+ cpu = cpumask_any_and(mask, cpu_online_mask);
+call:
+ ret = smp_call_function_single(cpu, func, info, wait);
+ put_cpu();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(smp_call_function_any);
+
/**
* __smp_call_function_single(): Run a function on another CPU
* @cpu: The CPU to run on.
@@ -355,9 +398,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
- * If @wait is true, then returns once @func has returned. Note that @wait
- * will be implicitly turned on in case of allocation failures, since
- * we fall back to on-stack allocation.
+ * If @wait is true, then returns once @func has returned.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. Preemption
@@ -443,8 +484,7 @@ EXPORT_SYMBOL(smp_call_function_many);
* Returns 0.
*
* If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func. In case of allocation
- * failure, @wait will be implicitly turned on.
+ * it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f8749e5216e0..21939d9e830e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -302,9 +302,9 @@ void irq_exit(void)
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
+ rcu_irq_exit();
#ifdef CONFIG_NO_HZ
/* Make sure that timer wheel updates are propagated */
- rcu_irq_exit();
if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
tick_nohz_stop_sched_tick(0);
#endif
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5ddab730cb2f..41e042219ff6 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,145 +21,28 @@
#include <linux/debug_locks.h>
#include <linux/module.h>
-#ifndef _spin_trylock
-int __lockfunc _spin_trylock(spinlock_t *lock)
-{
- return __spin_trylock(lock);
-}
-EXPORT_SYMBOL(_spin_trylock);
-#endif
-
-#ifndef _read_trylock
-int __lockfunc _read_trylock(rwlock_t *lock)
-{
- return __read_trylock(lock);
-}
-EXPORT_SYMBOL(_read_trylock);
-#endif
-
-#ifndef _write_trylock
-int __lockfunc _write_trylock(rwlock_t *lock)
-{
- return __write_trylock(lock);
-}
-EXPORT_SYMBOL(_write_trylock);
-#endif
-
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-
-#ifndef _read_lock
-void __lockfunc _read_lock(rwlock_t *lock)
-{
- __read_lock(lock);
-}
-EXPORT_SYMBOL(_read_lock);
-#endif
-
-#ifndef _spin_lock_irqsave
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
-{
- return __spin_lock_irqsave(lock);
-}
-EXPORT_SYMBOL(_spin_lock_irqsave);
-#endif
-
-#ifndef _spin_lock_irq
-void __lockfunc _spin_lock_irq(spinlock_t *lock)
-{
- __spin_lock_irq(lock);
-}
-EXPORT_SYMBOL(_spin_lock_irq);
-#endif
-
-#ifndef _spin_lock_bh
-void __lockfunc _spin_lock_bh(spinlock_t *lock)
-{
- __spin_lock_bh(lock);
-}
-EXPORT_SYMBOL(_spin_lock_bh);
-#endif
-
-#ifndef _read_lock_irqsave
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
-{
- return __read_lock_irqsave(lock);
-}
-EXPORT_SYMBOL(_read_lock_irqsave);
-#endif
-
-#ifndef _read_lock_irq
-void __lockfunc _read_lock_irq(rwlock_t *lock)
-{
- __read_lock_irq(lock);
-}
-EXPORT_SYMBOL(_read_lock_irq);
-#endif
-
-#ifndef _read_lock_bh
-void __lockfunc _read_lock_bh(rwlock_t *lock)
-{
- __read_lock_bh(lock);
-}
-EXPORT_SYMBOL(_read_lock_bh);
-#endif
-
-#ifndef _write_lock_irqsave
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
-{
- return __write_lock_irqsave(lock);
-}
-EXPORT_SYMBOL(_write_lock_irqsave);
-#endif
-
-#ifndef _write_lock_irq
-void __lockfunc _write_lock_irq(rwlock_t *lock)
-{
- __write_lock_irq(lock);
-}
-EXPORT_SYMBOL(_write_lock_irq);
-#endif
-
-#ifndef _write_lock_bh
-void __lockfunc _write_lock_bh(rwlock_t *lock)
-{
- __write_lock_bh(lock);
-}
-EXPORT_SYMBOL(_write_lock_bh);
-#endif
-
-#ifndef _spin_lock
-void __lockfunc _spin_lock(spinlock_t *lock)
-{
- __spin_lock(lock);
-}
-EXPORT_SYMBOL(_spin_lock);
-#endif
-
-#ifndef _write_lock
-void __lockfunc _write_lock(rwlock_t *lock)
-{
- __write_lock(lock);
-}
-EXPORT_SYMBOL(_write_lock);
-#endif
-
-#else /* CONFIG_PREEMPT: */
-
/*
+ * The __lock_function inlines are taken from
+ * include/linux/spinlock_api_smp.h
+ */
+#else
+/*
+ * We build the __lock_function inlines here. They are too large for
+ * inlining all over the place, but here is only one user per function
+ * which embedds them into the calling _lock_function below.
+ *
* This could be a long-held lock. We both prepare to spin for a long
* time (making _this_ CPU preemptable if possible), and we also signal
* towards that other CPU that it should break the lock ASAP.
- *
- * (We do this in a function because inlining it would be excessive.)
*/
-
#define BUILD_LOCK_OPS(op, locktype) \
-void __lockfunc _##op##_lock(locktype##_t *lock) \
+void __lockfunc __##op##_lock(locktype##_t *lock) \
{ \
for (;;) { \
preempt_disable(); \
@@ -175,9 +58,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \
(lock)->break_lock = 0; \
} \
\
-EXPORT_SYMBOL(_##op##_lock); \
- \
-unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
+unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -198,16 +79,12 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
return flags; \
} \
\
-EXPORT_SYMBOL(_##op##_lock_irqsave); \
- \
-void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
+void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
{ \
_##op##_lock_irqsave(lock); \
} \
\
-EXPORT_SYMBOL(_##op##_lock_irq); \
- \
-void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
+void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -220,23 +97,21 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
local_bh_disable(); \
local_irq_restore(flags); \
} \
- \
-EXPORT_SYMBOL(_##op##_lock_bh)
/*
* Build preemption-friendly versions of the following
* lock-spinning functions:
*
- * _[spin|read|write]_lock()
- * _[spin|read|write]_lock_irq()
- * _[spin|read|write]_lock_irqsave()
- * _[spin|read|write]_lock_bh()
+ * __[spin|read|write]_lock()
+ * __[spin|read|write]_lock_irq()
+ * __[spin|read|write]_lock_irqsave()
+ * __[spin|read|write]_lock_bh()
*/
BUILD_LOCK_OPS(spin, spinlock);
BUILD_LOCK_OPS(read, rwlock);
BUILD_LOCK_OPS(write, rwlock);
-#endif /* CONFIG_PREEMPT */
+#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -248,7 +123,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
}
EXPORT_SYMBOL(_spin_lock_nested);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
+unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
+ int subclass)
{
unsigned long flags;
@@ -272,7 +148,127 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
#endif
-#ifndef _spin_unlock
+#ifndef CONFIG_INLINE_SPIN_TRYLOCK
+int __lockfunc _spin_trylock(spinlock_t *lock)
+{
+ return __spin_trylock(lock);
+}
+EXPORT_SYMBOL(_spin_trylock);
+#endif
+
+#ifndef CONFIG_INLINE_READ_TRYLOCK
+int __lockfunc _read_trylock(rwlock_t *lock)
+{
+ return __read_trylock(lock);
+}
+EXPORT_SYMBOL(_read_trylock);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_TRYLOCK
+int __lockfunc _write_trylock(rwlock_t *lock)
+{
+ return __write_trylock(lock);
+}
+EXPORT_SYMBOL(_write_trylock);
+#endif
+
+#ifndef CONFIG_INLINE_READ_LOCK
+void __lockfunc _read_lock(rwlock_t *lock)
+{
+ __read_lock(lock);
+}
+EXPORT_SYMBOL(_read_lock);
+#endif
+
+#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
+unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+{
+ return __spin_lock_irqsave(lock);
+}
+EXPORT_SYMBOL(_spin_lock_irqsave);
+#endif
+
+#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
+void __lockfunc _spin_lock_irq(spinlock_t *lock)
+{
+ __spin_lock_irq(lock);
+}
+EXPORT_SYMBOL(_spin_lock_irq);
+#endif
+
+#ifndef CONFIG_INLINE_SPIN_LOCK_BH
+void __lockfunc _spin_lock_bh(spinlock_t *lock)
+{
+ __spin_lock_bh(lock);
+}
+EXPORT_SYMBOL(_spin_lock_bh);
+#endif
+
+#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+{
+ return __read_lock_irqsave(lock);
+}
+EXPORT_SYMBOL(_read_lock_irqsave);
+#endif
+
+#ifndef CONFIG_INLINE_READ_LOCK_IRQ
+void __lockfunc _read_lock_irq(rwlock_t *lock)
+{
+ __read_lock_irq(lock);
+}
+EXPORT_SYMBOL(_read_lock_irq);
+#endif
+
+#ifndef CONFIG_INLINE_READ_LOCK_BH
+void __lockfunc _read_lock_bh(rwlock_t *lock)
+{
+ __read_lock_bh(lock);
+}
+EXPORT_SYMBOL(_read_lock_bh);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+{
+ return __write_lock_irqsave(lock);
+}
+EXPORT_SYMBOL(_write_lock_irqsave);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
+void __lockfunc _write_lock_irq(rwlock_t *lock)
+{
+ __write_lock_irq(lock);
+}
+EXPORT_SYMBOL(_write_lock_irq);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK_BH
+void __lockfunc _write_lock_bh(rwlock_t *lock)
+{
+ __write_lock_bh(lock);
+}
+EXPORT_SYMBOL(_write_lock_bh);
+#endif
+
+#ifndef CONFIG_INLINE_SPIN_LOCK
+void __lockfunc _spin_lock(spinlock_t *lock)
+{
+ __spin_lock(lock);
+}
+EXPORT_SYMBOL(_spin_lock);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK
+void __lockfunc _write_lock(rwlock_t *lock)
+{
+ __write_lock(lock);
+}
+EXPORT_SYMBOL(_write_lock);
+#endif
+
+#ifndef CONFIG_INLINE_SPIN_UNLOCK
void __lockfunc _spin_unlock(spinlock_t *lock)
{
__spin_unlock(lock);
@@ -280,7 +276,7 @@ void __lockfunc _spin_unlock(spinlock_t *lock)
EXPORT_SYMBOL(_spin_unlock);
#endif
-#ifndef _write_unlock
+#ifndef CONFIG_INLINE_WRITE_UNLOCK
void __lockfunc _write_unlock(rwlock_t *lock)
{
__write_unlock(lock);
@@ -288,7 +284,7 @@ void __lockfunc _write_unlock(rwlock_t *lock)
EXPORT_SYMBOL(_write_unlock);
#endif
-#ifndef _read_unlock
+#ifndef CONFIG_INLINE_READ_UNLOCK
void __lockfunc _read_unlock(rwlock_t *lock)
{
__read_unlock(lock);
@@ -296,7 +292,7 @@ void __lockfunc _read_unlock(rwlock_t *lock)
EXPORT_SYMBOL(_read_unlock);
#endif
-#ifndef _spin_unlock_irqrestore
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
__spin_unlock_irqrestore(lock, flags);
@@ -304,7 +300,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
EXPORT_SYMBOL(_spin_unlock_irqrestore);
#endif
-#ifndef _spin_unlock_irq
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
void __lockfunc _spin_unlock_irq(spinlock_t *lock)
{
__spin_unlock_irq(lock);
@@ -312,7 +308,7 @@ void __lockfunc _spin_unlock_irq(spinlock_t *lock)
EXPORT_SYMBOL(_spin_unlock_irq);
#endif
-#ifndef _spin_unlock_bh
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{
__spin_unlock_bh(lock);
@@ -320,7 +316,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock)
EXPORT_SYMBOL(_spin_unlock_bh);
#endif
-#ifndef _read_unlock_irqrestore
+#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
__read_unlock_irqrestore(lock, flags);
@@ -328,7 +324,7 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
EXPORT_SYMBOL(_read_unlock_irqrestore);
#endif
-#ifndef _read_unlock_irq
+#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
void __lockfunc _read_unlock_irq(rwlock_t *lock)
{
__read_unlock_irq(lock);
@@ -336,7 +332,7 @@ void __lockfunc _read_unlock_irq(rwlock_t *lock)
EXPORT_SYMBOL(_read_unlock_irq);
#endif
-#ifndef _read_unlock_bh
+#ifndef CONFIG_INLINE_READ_UNLOCK_BH
void __lockfunc _read_unlock_bh(rwlock_t *lock)
{
__read_unlock_bh(lock);
@@ -344,7 +340,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_read_unlock_bh);
#endif
-#ifndef _write_unlock_irqrestore
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
__write_unlock_irqrestore(lock, flags);
@@ -352,7 +348,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
EXPORT_SYMBOL(_write_unlock_irqrestore);
#endif
-#ifndef _write_unlock_irq
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
void __lockfunc _write_unlock_irq(rwlock_t *lock)
{
__write_unlock_irq(lock);
@@ -360,7 +356,7 @@ void __lockfunc _write_unlock_irq(rwlock_t *lock)
EXPORT_SYMBOL(_write_unlock_irq);
#endif
-#ifndef _write_unlock_bh
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
void __lockfunc _write_unlock_bh(rwlock_t *lock)
{
__write_unlock_bh(lock);
@@ -368,7 +364,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_write_unlock_bh);
#endif
-#ifndef _spin_trylock_bh
+#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
return __spin_trylock_bh(lock);
diff --git a/kernel/srcu.c b/kernel/srcu.c
index b0aeeaf22ce4..818d7d9aa03c 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -49,6 +49,7 @@ int init_srcu_struct(struct srcu_struct *sp)
sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
return (sp->per_cpu_ref ? 0 : -ENOMEM);
}
+EXPORT_SYMBOL_GPL(init_srcu_struct);
/*
* srcu_readers_active_idx -- returns approximate number of readers
@@ -97,6 +98,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
free_percpu(sp->per_cpu_ref);
sp->per_cpu_ref = NULL;
}
+EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
/**
* srcu_read_lock - register a new reader for an SRCU-protected structure.
@@ -118,6 +120,7 @@ int srcu_read_lock(struct srcu_struct *sp)
preempt_enable();
return idx;
}
+EXPORT_SYMBOL_GPL(srcu_read_lock);
/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
@@ -136,22 +139,12 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx)
per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
preempt_enable();
}
+EXPORT_SYMBOL_GPL(srcu_read_unlock);
-/**
- * synchronize_srcu - wait for prior SRCU read-side critical-section completion
- * @sp: srcu_struct with which to synchronize.
- *
- * Flip the completed counter, and wait for the old count to drain to zero.
- * As with classic RCU, the updater must use some separate means of
- * synchronizing concurrent updates. Can block; must be called from
- * process context.
- *
- * Note that it is illegal to call synchornize_srcu() from the corresponding
- * SRCU read-side critical section; doing so will result in deadlock.
- * However, it is perfectly legal to call synchronize_srcu() on one
- * srcu_struct from some other srcu_struct's read-side critical section.
+/*
+ * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
*/
-void synchronize_srcu(struct srcu_struct *sp)
+void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
{
int idx;
@@ -173,7 +166,7 @@ void synchronize_srcu(struct srcu_struct *sp)
return;
}
- synchronize_sched(); /* Force memory barrier on all CPUs. */
+ sync_func(); /* Force memory barrier on all CPUs. */
/*
* The preceding synchronize_sched() ensures that any CPU that
@@ -190,7 +183,7 @@ void synchronize_srcu(struct srcu_struct *sp)
idx = sp->completed & 0x1;
sp->completed++;
- synchronize_sched(); /* Force memory barrier on all CPUs. */
+ sync_func(); /* Force memory barrier on all CPUs. */
/*
* At this point, because of the preceding synchronize_sched(),
@@ -203,7 +196,7 @@ void synchronize_srcu(struct srcu_struct *sp)
while (srcu_readers_active_idx(sp, idx))
schedule_timeout_interruptible(1);
- synchronize_sched(); /* Force memory barrier on all CPUs. */
+ sync_func(); /* Force memory barrier on all CPUs. */
/*
* The preceding synchronize_sched() forces all srcu_read_unlock()
@@ -237,6 +230,47 @@ void synchronize_srcu(struct srcu_struct *sp)
}
/**
+ * synchronize_srcu - wait for prior SRCU read-side critical-section completion
+ * @sp: srcu_struct with which to synchronize.
+ *
+ * Flip the completed counter, and wait for the old count to drain to zero.
+ * As with classic RCU, the updater must use some separate means of
+ * synchronizing concurrent updates. Can block; must be called from
+ * process context.
+ *
+ * Note that it is illegal to call synchronize_srcu() from the corresponding
+ * SRCU read-side critical section; doing so will result in deadlock.
+ * However, it is perfectly legal to call synchronize_srcu() on one
+ * srcu_struct from some other srcu_struct's read-side critical section.
+ */
+void synchronize_srcu(struct srcu_struct *sp)
+{
+ __synchronize_srcu(sp, synchronize_sched);
+}
+EXPORT_SYMBOL_GPL(synchronize_srcu);
+
+/**
+ * synchronize_srcu_expedited - like synchronize_srcu, but less patient
+ * @sp: srcu_struct with which to synchronize.
+ *
+ * Flip the completed counter, and wait for the old count to drain to zero.
+ * As with classic RCU, the updater must use some separate means of
+ * synchronizing concurrent updates. Can block; must be called from
+ * process context.
+ *
+ * Note that it is illegal to call synchronize_srcu_expedited()
+ * from the corresponding SRCU read-side critical section; doing so
+ * will result in deadlock. However, it is perfectly legal to call
+ * synchronize_srcu_expedited() on one srcu_struct from some other
+ * srcu_struct's read-side critical section.
+ */
+void synchronize_srcu_expedited(struct srcu_struct *sp)
+{
+ __synchronize_srcu(sp, synchronize_sched_expedited);
+}
+EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
+
+/**
* srcu_batches_completed - return batches completed.
* @sp: srcu_struct on which to report batch completion.
*
@@ -248,10 +282,4 @@ long srcu_batches_completed(struct srcu_struct *sp)
{
return sp->completed;
}
-
-EXPORT_SYMBOL_GPL(init_srcu_struct);
-EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
-EXPORT_SYMBOL_GPL(srcu_read_lock);
-EXPORT_SYMBOL_GPL(srcu_read_unlock);
-EXPORT_SYMBOL_GPL(synchronize_srcu);
EXPORT_SYMBOL_GPL(srcu_batches_completed);
diff --git a/kernel/sys.c b/kernel/sys.c
index 255475d163e0..ce17760d9c51 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1110,6 +1110,8 @@ SYSCALL_DEFINE0(setsid)
err = session;
out:
write_unlock_irq(&tasklist_lock);
+ if (err > 0)
+ proc_sid_connector(group_leader);
return err;
}
@@ -1546,24 +1548,37 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
if (arg4 | arg5)
return -EINVAL;
switch (arg2) {
- case 0:
+ case PR_MCE_KILL_CLEAR:
if (arg3 != 0)
return -EINVAL;
current->flags &= ~PF_MCE_PROCESS;
break;
- case 1:
+ case PR_MCE_KILL_SET:
current->flags |= PF_MCE_PROCESS;
- if (arg3 != 0)
+ if (arg3 == PR_MCE_KILL_EARLY)
current->flags |= PF_MCE_EARLY;
- else
+ else if (arg3 == PR_MCE_KILL_LATE)
current->flags &= ~PF_MCE_EARLY;
+ else if (arg3 == PR_MCE_KILL_DEFAULT)
+ current->flags &=
+ ~(PF_MCE_EARLY|PF_MCE_PROCESS);
+ else
+ return -EINVAL;
break;
default:
return -EINVAL;
}
error = 0;
break;
-
+ case PR_MCE_KILL_GET:
+ if (arg2 | arg3 | arg4 | arg5)
+ return -EINVAL;
+ if (current->flags & PF_MCE_PROCESS)
+ error = (current->flags & PF_MCE_EARLY) ?
+ PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
+ else
+ error = PR_MCE_KILL_DEFAULT;
+ break;
default:
error = -EINVAL;
break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0d949c517412..4dbf93a52ee9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -36,6 +36,7 @@
#include <linux/sysrq.h>
#include <linux/highuid.h>
#include <linux/writeback.h>
+#include <linux/ratelimit.h>
#include <linux/hugetlb.h>
#include <linux/initrd.h>
#include <linux/key.h>
@@ -158,6 +159,8 @@ extern int no_unaligned_warning;
extern int unaligned_dump_stack;
#endif
+extern struct ratelimit_state printk_ratelimit_state;
+
#ifdef CONFIG_RT_MUTEXES
extern int max_lock_depth;
#endif
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index b38423ca711a..b6e7aaea4604 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1521,7 +1521,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
if (!table->ctl_name && table->strategy)
set_fail(&fail, table, "Strategy without ctl_name");
#endif
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_PROC_SYSCTL
if (table->procname && !table->proc_handler)
set_fail(&fail, table, "No proc_handler");
#endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 37ba67e33265..6dc4e5ef7a01 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -740,7 +740,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
out:
mutex_unlock(&ftrace_profile_lock);
- filp->f_pos += cnt;
+ *ppos += cnt;
return cnt;
}
@@ -2222,15 +2222,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
ret = ftrace_process_regex(parser->buffer,
parser->idx, enable);
if (ret)
- goto out;
+ goto out_unlock;
trace_parser_clear(parser);
}
ret = read;
-
+out_unlock:
mutex_unlock(&ftrace_regex_lock);
-out:
+
return ret;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d4ff01970547..5dd017fea6f5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -483,7 +483,7 @@ struct ring_buffer_iter {
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
-static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
+static inline u64 rb_time_stamp(struct ring_buffer *buffer)
{
/* shift to debug/test normalization and TIME_EXTENTS */
return buffer->clock() << DEBUG_SHIFT;
@@ -494,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
u64 time;
preempt_disable_notrace();
- time = rb_time_stamp(buffer, cpu);
+ time = rb_time_stamp(buffer);
preempt_enable_no_resched_notrace();
return time;
@@ -599,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list)
}
/*
- * rb_is_head_page - test if the give page is the head page
+ * rb_is_head_page - test if the given page is the head page
*
* Because the reader may move the head_page pointer, we can
* not trust what the head page is (it may be pointing to
@@ -1193,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();
+ spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) {
@@ -1207,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
return;
rb_reset_cpu(cpu_buffer);
+ spin_unlock_irq(&cpu_buffer->reader_lock);
rb_check_pages(cpu_buffer);
@@ -1868,7 +1870,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* Nested commits always have zero deltas, so
* just reread the time stamp
*/
- *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
+ *ts = rb_time_stamp(buffer);
next_page->page->time_stamp = *ts;
}
@@ -2111,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
goto out_fail;
- ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
+ ts = rb_time_stamp(cpu_buffer->buffer);
/*
* Only the first commit can update the timestamp.
@@ -2681,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
EXPORT_SYMBOL_GPL(ring_buffer_entries);
/**
- * ring_buffer_overrun_cpu - get the number of overruns in buffer
+ * ring_buffer_overruns - get the number of overruns in buffer
* @buffer: The ring buffer
*
* Returns the total number of overruns in the ring buffer
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c820b0310a12..b20d3ec75de9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2440,7 +2440,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
return ret;
}
- filp->f_pos += cnt;
+ *ppos += cnt;
return cnt;
}
@@ -2582,7 +2582,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
}
mutex_unlock(&trace_types_lock);
- filp->f_pos += cnt;
+ *ppos += cnt;
return cnt;
}
@@ -2764,7 +2764,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
if (err)
return err;
- filp->f_pos += ret;
+ *ppos += ret;
return ret;
}
@@ -3299,7 +3299,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
}
}
- filp->f_pos += cnt;
+ *ppos += cnt;
/* If check pages failed, return ENOMEM */
if (tracing_disabled)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index ed17565826b0..b6c12c6a1bcd 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
* @s: trace sequence descriptor
* @fmt: printf format string
*
+ * It returns 0 if the trace oversizes the buffer's free
+ * space, 1 otherwise.
+ *
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace
* trace_seq_printf is used to store strings into a special
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
s->len += ret;
- return len;
+ return 1;
}
EXPORT_SYMBOL_GPL(trace_seq_printf);
diff --git a/kernel/user.c b/kernel/user.c
index 2c000e7132ac..46d0165ca70c 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -330,9 +330,9 @@ done:
*/
static void free_user(struct user_struct *up, unsigned long flags)
{
- spin_unlock_irqrestore(&uidhash_lock, flags);
INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
+ spin_unlock_irqrestore(&uidhash_lock, flags);
}
#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 47cdd7e76f2b..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -685,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
int schedule_on_each_cpu(work_func_t func)
{
int cpu;
+ int orig = -1;
struct work_struct *works;
works = alloc_percpu(struct work_struct);
@@ -692,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
return -ENOMEM;
get_online_cpus();
+
+ /*
+ * When running in keventd don't schedule a work item on
+ * itself. Can just call directly because the work queue is
+ * already bound. This also is faster.
+ */
+ if (current_is_keventd())
+ orig = raw_smp_processor_id();
+
for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu);
INIT_WORK(work, func);
- schedule_work_on(cpu, work);
+ if (cpu != orig)
+ schedule_work_on(cpu, work);
}
+ if (orig >= 0)
+ func(per_cpu_ptr(works, orig));
+
for_each_online_cpu(cpu)
flush_work(per_cpu_ptr(works, cpu));
+
put_online_cpus();
free_percpu(works);
return 0;