aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/hrtimer.c70
-rw-r--r--kernel/intermodule.c3
-rw-r--r--kernel/itimer.c11
-rw-r--r--kernel/kprobes.c36
-rw-r--r--kernel/module.c3
-rw-r--r--kernel/posix-timers.c53
-rw-r--r--kernel/power/console.c12
-rw-r--r--kernel/power/disk.c15
-rw-r--r--kernel/power/main.c4
-rw-r--r--kernel/power/power.h9
-rw-r--r--kernel/power/swsusp.c8
-rw-r--r--kernel/rcutorture.c10
-rw-r--r--kernel/sched.c24
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sys.c25
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/time.c15
-rw-r--r--kernel/user.c32
20 files changed, 177 insertions, 171 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index fe2f71f92ae0..ba42b0a76961 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -641,7 +641,7 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
* task has been modifying its cpuset.
*/
-void cpuset_update_task_memory_state()
+void cpuset_update_task_memory_state(void)
{
int my_cpusets_mem_gen;
struct task_struct *tsk = current;
diff --git a/kernel/fork.c b/kernel/fork.c
index 4ae8cfc1c89c..7f0ab5ee948c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -802,7 +802,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
- hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC);
+ hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
sig->it_real_incr.tv64 = 0;
sig->real_timer.function = it_real_fn;
sig->real_timer.data = tsk;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f1c4155b49ac..2b6e1757aedd 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -21,6 +21,12 @@
* Credits:
* based on kernel/timer.c
*
+ * Help, testing, suggestions, bugfixes, improvements were
+ * provided by:
+ *
+ * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
+ * et. al.
+ *
* For licencing details see kernel-base/COPYING
*/
@@ -66,6 +72,12 @@ EXPORT_SYMBOL_GPL(ktime_get_real);
/*
* The timer bases:
+ *
+ * Note: If we want to add new timer bases, we have to skip the two
+ * clock ids captured by the cpu-timers. We do this by holding empty
+ * entries rather than doing math adjustment of the clock ids.
+ * This ensures that we capture erroneous accesses to these clock ids
+ * rather than moving them into the range of valid clock id's.
*/
#define MAX_HRTIMER_BASES 2
@@ -483,29 +495,25 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
}
/**
- * hrtimer_rebase - rebase an initialized hrtimer to a different base
+ * hrtimer_init - initialize a timer to the given clock
*
- * @timer: the timer to be rebased
+ * @timer: the timer to be initialized
* @clock_id: the clock to be used
+ * @mode: timer mode abs/rel
*/
-void hrtimer_rebase(struct hrtimer *timer, const clockid_t clock_id)
+void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
{
struct hrtimer_base *bases;
+ memset(timer, 0, sizeof(struct hrtimer));
+
bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
- timer->base = &bases[clock_id];
-}
-/**
- * hrtimer_init - initialize a timer to the given clock
- *
- * @timer: the timer to be initialized
- * @clock_id: the clock to be used
- */
-void hrtimer_init(struct hrtimer *timer, const clockid_t clock_id)
-{
- memset(timer, 0, sizeof(struct hrtimer));
- hrtimer_rebase(timer, clock_id);
+ if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS)
+ clock_id = CLOCK_MONOTONIC;
+
+ timer->base = &bases[clock_id];
}
/**
@@ -550,6 +558,7 @@ static inline void run_hrtimer_queue(struct hrtimer_base *base)
fn = timer->function;
data = timer->data;
set_curr_timer(base, timer);
+ timer->state = HRTIMER_RUNNING;
__remove_hrtimer(timer, base);
spin_unlock_irq(&base->lock);
@@ -565,6 +574,10 @@ static inline void run_hrtimer_queue(struct hrtimer_base *base)
spin_lock_irq(&base->lock);
+ /* Another CPU has added back the timer */
+ if (timer->state != HRTIMER_RUNNING)
+ continue;
+
if (restart == HRTIMER_RESTART)
enqueue_hrtimer(timer, base);
else
@@ -638,8 +651,7 @@ schedule_hrtimer_interruptible(struct hrtimer *timer,
return schedule_hrtimer(timer, mode);
}
-static long __sched
-nanosleep_restart(struct restart_block *restart, clockid_t clockid)
+static long __sched nanosleep_restart(struct restart_block *restart)
{
struct timespec __user *rmtp;
struct timespec tu;
@@ -649,7 +661,7 @@ nanosleep_restart(struct restart_block *restart, clockid_t clockid)
restart->fn = do_no_restart_syscall;
- hrtimer_init(&timer, clockid);
+ hrtimer_init(&timer, (clockid_t) restart->arg3, HRTIMER_ABS);
timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0;
@@ -669,16 +681,6 @@ nanosleep_restart(struct restart_block *restart, clockid_t clockid)
return -ERESTART_RESTARTBLOCK;
}
-static long __sched nanosleep_restart_mono(struct restart_block *restart)
-{
- return nanosleep_restart(restart, CLOCK_MONOTONIC);
-}
-
-static long __sched nanosleep_restart_real(struct restart_block *restart)
-{
- return nanosleep_restart(restart, CLOCK_REALTIME);
-}
-
long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
const enum hrtimer_mode mode, const clockid_t clockid)
{
@@ -687,7 +689,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
struct timespec tu;
ktime_t rem;
- hrtimer_init(&timer, clockid);
+ hrtimer_init(&timer, clockid, mode);
timer.expires = timespec_to_ktime(*rqtp);
@@ -695,7 +697,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
if (rem.tv64 <= 0)
return 0;
- /* Absolute timers do not update the rmtp value: */
+ /* Absolute timers do not update the rmtp value and restart: */
if (mode == HRTIMER_ABS)
return -ERESTARTNOHAND;
@@ -705,11 +707,11 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
return -EFAULT;
restart = &current_thread_info()->restart_block;
- restart->fn = (clockid == CLOCK_MONOTONIC) ?
- nanosleep_restart_mono : nanosleep_restart_real;
+ restart->fn = nanosleep_restart;
restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF;
restart->arg1 = timer.expires.tv64 >> 32;
restart->arg2 = (unsigned long) rmtp;
+ restart->arg3 = (unsigned long) timer.base->index;
return -ERESTART_RESTARTBLOCK;
}
@@ -736,10 +738,8 @@ static void __devinit init_hrtimers_cpu(int cpu)
struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu);
int i;
- for (i = 0; i < MAX_HRTIMER_BASES; i++) {
+ for (i = 0; i < MAX_HRTIMER_BASES; i++, base++)
spin_lock_init(&base->lock);
- base++;
- }
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/intermodule.c b/kernel/intermodule.c
index 0cbe633420fb..55b1e5b85db9 100644
--- a/kernel/intermodule.c
+++ b/kernel/intermodule.c
@@ -179,3 +179,6 @@ EXPORT_SYMBOL(inter_module_register);
EXPORT_SYMBOL(inter_module_unregister);
EXPORT_SYMBOL(inter_module_get_request);
EXPORT_SYMBOL(inter_module_put);
+
+MODULE_LICENSE("GPL");
+
diff --git a/kernel/itimer.c b/kernel/itimer.c
index c2c05c4ff28d..379be2f8c84c 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -49,9 +49,11 @@ int do_getitimer(int which, struct itimerval *value)
switch (which) {
case ITIMER_REAL:
+ spin_lock_irq(&tsk->sighand->siglock);
value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
value->it_interval =
ktime_to_timeval(tsk->signal->it_real_incr);
+ spin_unlock_irq(&tsk->sighand->siglock);
break;
case ITIMER_VIRTUAL:
read_lock(&tasklist_lock);
@@ -150,18 +152,25 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
switch (which) {
case ITIMER_REAL:
+again:
+ spin_lock_irq(&tsk->sighand->siglock);
timer = &tsk->signal->real_timer;
- hrtimer_cancel(timer);
if (ovalue) {
ovalue->it_value = itimer_get_remtime(timer);
ovalue->it_interval
= ktime_to_timeval(tsk->signal->it_real_incr);
}
+ /* We are sharing ->siglock with it_real_fn() */
+ if (hrtimer_try_to_cancel(timer) < 0) {
+ spin_unlock_irq(&tsk->sighand->siglock);
+ goto again;
+ }
tsk->signal->it_real_incr =
timeval_to_ktime(value->it_interval);
expires = timeval_to_ktime(value->it_value);
if (expires.tv64 != 0)
hrtimer_start(timer, expires, HRTIMER_REL);
+ spin_unlock_irq(&tsk->sighand->siglock);
break;
case ITIMER_VIRTUAL:
nval = timeval_to_cputime(&value->it_value);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3ea6325228da..fef1af8a73ce 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -344,23 +344,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
spin_unlock_irqrestore(&kretprobe_lock, flags);
}
-/*
- * This kprobe pre_handler is registered with every kretprobe. When probe
- * hits it will set up the return probe.
- */
-static int __kprobes pre_handler_kretprobe(struct kprobe *p,
- struct pt_regs *regs)
-{
- struct kretprobe *rp = container_of(p, struct kretprobe, kp);
- unsigned long flags = 0;
-
- /*TODO: consider to only swap the RA after the last pre_handler fired */
- spin_lock_irqsave(&kretprobe_lock, flags);
- arch_prepare_kretprobe(rp, regs);
- spin_unlock_irqrestore(&kretprobe_lock, flags);
- return 0;
-}
-
static inline void free_rp_inst(struct kretprobe *rp)
{
struct kretprobe_instance *ri;
@@ -578,6 +561,23 @@ void __kprobes unregister_jprobe(struct jprobe *jp)
#ifdef ARCH_SUPPORTS_KRETPROBES
+/*
+ * This kprobe pre_handler is registered with every kretprobe. When probe
+ * hits it will set up the return probe.
+ */
+static int __kprobes pre_handler_kretprobe(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ struct kretprobe *rp = container_of(p, struct kretprobe, kp);
+ unsigned long flags = 0;
+
+ /*TODO: consider to only swap the RA after the last pre_handler fired */
+ spin_lock_irqsave(&kretprobe_lock, flags);
+ arch_prepare_kretprobe(rp, regs);
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+ return 0;
+}
+
int __kprobes register_kretprobe(struct kretprobe *rp)
{
int ret = 0;
@@ -631,12 +631,12 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp)
unregister_kprobe(&rp->kp);
/* No race here */
spin_lock_irqsave(&kretprobe_lock, flags);
- free_rp_inst(rp);
while ((ri = get_used_rp_inst(rp)) != NULL) {
ri->rp = NULL;
hlist_del(&ri->uflist);
}
spin_unlock_irqrestore(&kretprobe_lock, flags);
+ free_rp_inst(rp);
}
static int __init init_kprobes(void)
diff --git a/kernel/module.c b/kernel/module.c
index 618ed6e23ecc..e058aedf6b93 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2092,7 +2092,8 @@ static unsigned long mod_find_symname(struct module *mod, const char *name)
unsigned int i;
for (i = 0; i < mod->num_symtab; i++)
- if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0)
+ if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
+ mod->symtab[i].st_info != 'U')
return mod->symtab[i].st_value;
return 0;
}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 197208b3aa2a..216f574b5ffb 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -194,9 +194,7 @@ static inline int common_clock_set(const clockid_t which_clock,
static int common_timer_create(struct k_itimer *new_timer)
{
- hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock);
- new_timer->it.real.timer.data = new_timer;
- new_timer->it.real.timer.function = posix_timer_fn;
+ hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
return 0;
}
@@ -290,7 +288,8 @@ void do_schedule_next_timer(struct siginfo *info)
info->si_overrun = timr->it_overrun_last;
}
- unlock_timer(timr, flags);
+ if (timr)
+ unlock_timer(timr, flags);
}
int posix_timer_event(struct k_itimer *timr,int si_private)
@@ -692,6 +691,7 @@ common_timer_set(struct k_itimer *timr, int flags,
struct itimerspec *new_setting, struct itimerspec *old_setting)
{
struct hrtimer *timer = &timr->it.real.timer;
+ enum hrtimer_mode mode;
if (old_setting)
common_timer_get(timr, old_setting);
@@ -713,14 +713,10 @@ common_timer_set(struct k_itimer *timr, int flags,
if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
return 0;
- /* Posix madness. Only absolute CLOCK_REALTIME timers
- * are affected by clock sets. So we must reiniatilize
- * the timer.
- */
- if (timr->it_clock == CLOCK_REALTIME && (flags & TIMER_ABSTIME))
- hrtimer_rebase(timer, CLOCK_REALTIME);
- else
- hrtimer_rebase(timer, CLOCK_MONOTONIC);
+ mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL;
+ hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
+ timr->it.real.timer.data = timr;
+ timr->it.real.timer.function = posix_timer_fn;
timer->expires = timespec_to_ktime(new_setting->it_value);
@@ -728,11 +724,15 @@ common_timer_set(struct k_itimer *timr, int flags,
timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
/* SIGEV_NONE timers are not queued ! See common_timer_get */
- if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
+ if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
+ /* Setup correct expiry time for relative timers */
+ if (mode == HRTIMER_REL)
+ timer->expires = ktime_add(timer->expires,
+ timer->base->get_time());
return 0;
+ }
- hrtimer_start(timer, timer->expires, (flags & TIMER_ABSTIME) ?
- HRTIMER_ABS : HRTIMER_REL);
+ hrtimer_start(timer, timer->expires, mode);
return 0;
}
@@ -875,12 +875,6 @@ int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
}
EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);
-int do_posix_clock_notimer_create(struct k_itimer *timer)
-{
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(do_posix_clock_notimer_create);
-
int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
struct timespec *t, struct timespec __user *r)
{
@@ -947,21 +941,8 @@ sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
static int common_nsleep(const clockid_t which_clock, int flags,
struct timespec *tsave, struct timespec __user *rmtp)
{
- int mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL;
- int clockid = which_clock;
-
- switch (which_clock) {
- case CLOCK_REALTIME:
- /* Posix madness. Only absolute timers on clock realtime
- are affected by clock set. */
- if (mode != HRTIMER_ABS)
- clockid = CLOCK_MONOTONIC;
- case CLOCK_MONOTONIC:
- break;
- default:
- return -EINVAL;
- }
- return hrtimer_nanosleep(tsave, rmtp, mode, clockid);
+ return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
+ HRTIMER_ABS : HRTIMER_REL, which_clock);
}
asmlinkage long
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 7ff375e7c95f..579d239d129f 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -9,18 +9,11 @@
#include <linux/console.h>
#include "power.h"
-static int new_loglevel = 10;
-static int orig_loglevel;
#ifdef SUSPEND_CONSOLE
static int orig_fgconsole, orig_kmsg;
-#endif
int pm_prepare_console(void)
{
- orig_loglevel = console_loglevel;
- console_loglevel = new_loglevel;
-
-#ifdef SUSPEND_CONSOLE
acquire_console_sem();
orig_fgconsole = fg_console;
@@ -41,18 +34,15 @@ int pm_prepare_console(void)
}
orig_kmsg = kmsg_redirect;
kmsg_redirect = SUSPEND_CONSOLE;
-#endif
return 0;
}
void pm_restore_console(void)
{
- console_loglevel = orig_loglevel;
-#ifdef SUSPEND_CONSOLE
acquire_console_sem();
set_console(orig_fgconsole);
release_console_sem();
kmsg_redirect = orig_kmsg;
-#endif
return;
}
+#endif
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index e24446f8d8cd..0b43847dc980 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -53,7 +53,7 @@ static void power_down(suspend_disk_method_t mode)
switch(mode) {
case PM_DISK_PLATFORM:
- kernel_power_off_prepare();
+ kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
error = pm_ops->enter(PM_SUSPEND_DISK);
break;
case PM_DISK_SHUTDOWN:
@@ -95,13 +95,6 @@ static int prepare_processes(void)
goto thaw;
}
- if (pm_disk_mode == PM_DISK_PLATFORM) {
- if (pm_ops && pm_ops->prepare) {
- if ((error = pm_ops->prepare(PM_SUSPEND_DISK)))
- goto thaw;
- }
- }
-
/* Free memory before shutting down devices. */
if (!(error = swsusp_shrink_memory()))
return 0;
@@ -367,14 +360,14 @@ power_attr(resume);
static ssize_t image_size_show(struct subsystem * subsys, char *buf)
{
- return sprintf(buf, "%u\n", image_size);
+ return sprintf(buf, "%lu\n", image_size);
}
static ssize_t image_size_store(struct subsystem * subsys, const char * buf, size_t n)
{
- unsigned int size;
+ unsigned long size;
- if (sscanf(buf, "%u", &size) == 1) {
+ if (sscanf(buf, "%lu", &size) == 1) {
image_size = size;
return n;
}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index d253f3ae2fa5..9cb235cba4a9 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -133,10 +133,10 @@ static int suspend_enter(suspend_state_t state)
static void suspend_finish(suspend_state_t state)
{
device_resume();
- if (pm_ops && pm_ops->finish)
- pm_ops->finish(state);
thaw_processes();
enable_nonboot_cpus();
+ if (pm_ops && pm_ops->finish)
+ pm_ops->finish(state);
pm_restore_console();
}
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 7e8492fd1423..d8f0d1a76bae 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -42,8 +42,13 @@ static struct subsys_attribute _name##_attr = { \
extern struct subsystem power_subsys;
+#ifdef SUSPEND_CONSOLE
extern int pm_prepare_console(void);
extern void pm_restore_console(void);
+#else
+static int pm_prepare_console(void) { return 0; }
+static void pm_restore_console(void) {}
+#endif
/* References to section boundaries */
extern const void __nosave_begin, __nosave_end;
@@ -51,8 +56,8 @@ extern const void __nosave_begin, __nosave_end;
extern unsigned int nr_copy_pages;
extern struct pbe *pagedir_nosave;
-/* Preferred image size in MB (default 500) */
-extern unsigned int image_size;
+/* Preferred image size in bytes (default 500 MB) */
+extern unsigned long image_size;
extern asmlinkage int swsusp_arch_suspend(void);
extern asmlinkage int swsusp_arch_resume(void);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 55a18d26abed..59c91c148e82 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -70,12 +70,12 @@
#include "power.h"
/*
- * Preferred image size in MB (tunable via /sys/power/image_size).
+ * Preferred image size in bytes (tunable via /sys/power/image_size).
* When it is set to N, swsusp will do its best to ensure the image
- * size will not exceed N MB, but if that is impossible, it will
+ * size will not exceed N bytes, but if that is impossible, it will
* try to create the smallest image possible.
*/
-unsigned int image_size = 500;
+unsigned long image_size = 500 * 1024 * 1024;
#ifdef CONFIG_HIGHMEM
unsigned int count_highmem_pages(void);
@@ -590,7 +590,7 @@ int swsusp_shrink_memory(void)
if (!tmp)
return -ENOMEM;
pages += tmp;
- } else if (size > (image_size * 1024 * 1024) / PAGE_SIZE) {
+ } else if (size > image_size / PAGE_SIZE) {
tmp = shrink_all_memory(SHRINK_BITE);
pages += tmp;
}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 773219907dd8..7712912dbc84 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -114,16 +114,16 @@ rcu_torture_alloc(void)
{
struct list_head *p;
- spin_lock(&rcu_torture_lock);
+ spin_lock_bh(&rcu_torture_lock);
if (list_empty(&rcu_torture_freelist)) {
atomic_inc(&n_rcu_torture_alloc_fail);
- spin_unlock(&rcu_torture_lock);
+ spin_unlock_bh(&rcu_torture_lock);
return NULL;
}
atomic_inc(&n_rcu_torture_alloc);
p = rcu_torture_freelist.next;
list_del_init(p);
- spin_unlock(&rcu_torture_lock);
+ spin_unlock_bh(&rcu_torture_lock);
return container_of(p, struct rcu_torture, rtort_free);
}
@@ -134,9 +134,9 @@ static void
rcu_torture_free(struct rcu_torture *p)
{
atomic_inc(&n_rcu_torture_free);
- spin_lock(&rcu_torture_lock);
+ spin_lock_bh(&rcu_torture_lock);
list_add_tail(&p->rtort_free, &rcu_torture_freelist);
- spin_unlock(&rcu_torture_lock);
+ spin_unlock_bh(&rcu_torture_lock);
}
static void
diff --git a/kernel/sched.c b/kernel/sched.c
index 3ee2ae45125f..bc38804e40dd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4031,7 +4031,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
goto out_unlock;
retval = 0;
- cpus_and(*mask, p->cpus_allowed, cpu_possible_map);
+ cpus_and(*mask, p->cpus_allowed, cpu_online_map);
out_unlock:
read_unlock(&tasklist_lock);
@@ -5141,7 +5141,7 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
#define SEARCH_SCOPE 2
#define MIN_CACHE_SIZE (64*1024U)
#define DEFAULT_CACHE_SIZE (5*1024*1024U)
-#define ITERATIONS 2
+#define ITERATIONS 1
#define SIZE_THRESH 130
#define COST_THRESH 130
@@ -5480,9 +5480,9 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
break;
}
/*
- * Increase the cachesize in 5% steps:
+ * Increase the cachesize in 10% steps:
*/
- size = size * 20 / 19;
+ size = size * 10 / 9;
}
if (migration_debug)
@@ -5551,13 +5551,15 @@ static void calibrate_migration_costs(const cpumask_t *cpu_map)
-1
#endif
);
- printk("migration_cost=");
- for (distance = 0; distance <= max_distance; distance++) {
- if (distance)
- printk(",");
- printk("%ld", (long)migration_cost[distance] / 1000);
+ if (system_state == SYSTEM_BOOTING) {
+ printk("migration_cost=");
+ for (distance = 0; distance <= max_distance; distance++) {
+ if (distance)
+ printk(",");
+ printk("%ld", (long)migration_cost[distance] / 1000);
+ }
+ printk("\n");
}
- printk("\n");
j1 = jiffies;
if (migration_debug)
printk("migration: %ld seconds\n", (j1-j0)/HZ);
@@ -6109,7 +6111,7 @@ void __init sched_init(void)
runqueue_t *rq;
int i, j, k;
- for (i = 0; i < NR_CPUS; i++) {
+ for_each_cpu(i) {
prio_array_t *array;
rq = cpu_rq(i);
diff --git a/kernel/signal.c b/kernel/signal.c
index d3efafd8109a..b373fc2420da 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -283,7 +283,7 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
return(q);
}
-static inline void __sigqueue_free(struct sigqueue *q)
+static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
diff --git a/kernel/sys.c b/kernel/sys.c
index d09cac23fdfd..0929c698affc 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -440,23 +440,25 @@ void kernel_kexec(void)
}
EXPORT_SYMBOL_GPL(kernel_kexec);
+void kernel_shutdown_prepare(enum system_states state)
+{
+ notifier_call_chain(&reboot_notifier_list,
+ (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
+ system_state = state;
+ device_shutdown();
+}
/**
* kernel_halt - halt the system
*
* Shutdown everything and perform a clean system halt.
*/
-void kernel_halt_prepare(void)
-{
- notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
- system_state = SYSTEM_HALT;
- device_shutdown();
-}
void kernel_halt(void)
{
- kernel_halt_prepare();
+ kernel_shutdown_prepare(SYSTEM_HALT);
printk(KERN_EMERG "System halted.\n");
machine_halt();
}
+
EXPORT_SYMBOL_GPL(kernel_halt);
/**
@@ -464,20 +466,13 @@ EXPORT_SYMBOL_GPL(kernel_halt);
*
* Shutdown everything and perform a clean system power_off.
*/
-void kernel_power_off_prepare(void)
-{
- notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
- system_state = SYSTEM_POWER_OFF;
- device_shutdown();
-}
void kernel_power_off(void)
{
- kernel_power_off_prepare();
+ kernel_shutdown_prepare(SYSTEM_POWER_OFF);
printk(KERN_EMERG "Power down.\n");
machine_power_off();
}
EXPORT_SYMBOL_GPL(kernel_power_off);
-
/*
* Reboot system call: for obvious reasons only root may call it,
* and even root needs to set up some magic numbers in the registers
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index cb99a42f8b37..71dd6f62efec 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -878,7 +878,17 @@ static ctl_table vm_table[] = {
.maxlen = sizeof(zone_reclaim_mode),
.mode = 0644,
.proc_handler = &proc_dointvec,
- .strategy = &zero,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ },
+ {
+ .ctl_name = VM_ZONE_RECLAIM_INTERVAL,
+ .procname = "zone_reclaim_interval",
+ .data = &zone_reclaim_interval,
+ .maxlen = sizeof(zone_reclaim_interval),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ .strategy = &sysctl_jiffies,
},
#endif
{ .ctl_name = 0 }
diff --git a/kernel/time.c b/kernel/time.c
index 7477b1d2079e..804539165d8b 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -155,7 +155,7 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
static int firsttime = 1;
int error = 0;
- if (!timespec_valid(tv))
+ if (tv && !timespec_valid(tv))
return -EINVAL;
error = security_settime(tv, tz);
@@ -637,15 +637,16 @@ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
*
* Returns the timespec representation of the nsec parameter.
*/
-inline struct timespec ns_to_timespec(const nsec_t nsec)
+struct timespec ns_to_timespec(const nsec_t nsec)
{
struct timespec ts;
- if (nsec)
- ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC,
- &ts.tv_nsec);
- else
- ts.tv_sec = ts.tv_nsec = 0;
+ if (!nsec)
+ return (struct timespec) {0, 0};
+
+ ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec);
+ if (unlikely(nsec < 0))
+ set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec);
return ts;
}
diff --git a/kernel/user.c b/kernel/user.c
index 89e562feb1b1..d9deae43a9ab 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/key.h>
+#include <linux/interrupt.h>
/*
* UID task count cache, to get fast user lookup in "alloc_uid"
@@ -27,6 +28,16 @@
static kmem_cache_t *uid_cachep;
static struct list_head uidhash_table[UIDHASH_SZ];
+
+/*
+ * The uidhash_lock is mostly taken from process context, but it is
+ * occasionally also taken from softirq/tasklet context, when
+ * task-structs get RCU-freed. Hence all locking must be softirq-safe.
+ * But free_uid() is also called with local interrupts disabled, and running
+ * local_bh_enable() with local interrupts disabled is an error - we'll run
+ * softirq callbacks, and they can unconditionally enable interrupts, and
+ * the caller of free_uid() didn't expect that..
+ */
static DEFINE_SPINLOCK(uidhash_lock);
struct user_struct root_user = {
@@ -82,15 +93,19 @@ static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *has
struct user_struct *find_user(uid_t uid)
{
struct user_struct *ret;
+ unsigned long flags;
- spin_lock(&uidhash_lock);
+ spin_lock_irqsave(&uidhash_lock, flags);
ret = uid_hash_find(uid, uidhashentry(uid));
- spin_unlock(&uidhash_lock);
+ spin_unlock_irqrestore(&uidhash_lock, flags);
return ret;
}
void free_uid(struct user_struct *up)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
uid_hash_remove(up);
key_put(up->uid_keyring);
@@ -98,6 +113,7 @@ void free_uid(struct user_struct *up)
kmem_cache_free(uid_cachep, up);
spin_unlock(&uidhash_lock);
}
+ local_irq_restore(flags);
}
struct user_struct * alloc_uid(uid_t uid)
@@ -105,9 +121,9 @@ struct user_struct * alloc_uid(uid_t uid)
struct list_head *hashent = uidhashentry(uid);
struct user_struct *up;
- spin_lock(&uidhash_lock);
+ spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
- spin_unlock(&uidhash_lock);
+ spin_unlock_irq(&uidhash_lock);
if (!up) {
struct user_struct *new;
@@ -137,7 +153,7 @@ struct user_struct * alloc_uid(uid_t uid)
* Before adding this, check whether we raced
* on adding the same user already..
*/
- spin_lock(&uidhash_lock);
+ spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
key_put(new->uid_keyring);
@@ -147,7 +163,7 @@ struct user_struct * alloc_uid(uid_t uid)
uid_hash_insert(new, hashent);
up = new;
}
- spin_unlock(&uidhash_lock);
+ spin_unlock_irq(&uidhash_lock);
}
return up;
@@ -183,9 +199,9 @@ static int __init uid_cache_init(void)
INIT_LIST_HEAD(uidhash_table + n);
/* Insert the root user immediately (init already runs as root) */
- spin_lock(&uidhash_lock);
+ spin_lock_irq(&uidhash_lock);
uid_hash_insert(&root_user, uidhashentry(0));
- spin_unlock(&uidhash_lock);
+ spin_unlock_irq(&uidhash_lock);
return 0;
}