From 8239c25f47d2b318156993b15f33900a86ea5e17 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 20 Apr 2012 13:05:42 +0000 Subject: smp: Add task_struct argument to __cpu_up() Preparatory patch to make the idle thread allocation for secondary cpus generic. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Rusty Russell Cc: Paul E. McKenney Cc: Srivatsa S. Bhat Cc: Matt Turner Cc: Russell King Cc: Mike Frysinger Cc: Jesper Nilsson Cc: Richard Kuo Cc: Tony Luck Cc: Hirokazu Takata Cc: Ralf Baechle Cc: David Howells Cc: James E.J. Bottomley Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Paul Mundt Cc: David S. Miller Cc: Chris Metcalf Cc: Richard Weinberger Cc: x86@kernel.org Link: http://lkml.kernel.org/r/20120420124556.964170564@linutronix.de --- kernel/cpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 2060c6e57027..e711aef0fb3c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -309,7 +309,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) } /* Arch-specific enabling code. */ - ret = __cpu_up(cpu); + ret = __cpu_up(cpu, NULL); if (ret != 0) goto out_notify; BUG_ON(!cpu_online(cpu)); -- cgit v1.2.3-59-g8ed1b From 38498a67aa2cf8c80754b8d304bfacc10bc582b5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 20 Apr 2012 13:05:44 +0000 Subject: smp: Add generic smpboot facility Start a new file, which will hold SMP and CPU hotplug related generic infrastructure. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Rusty Russell Cc: Paul E. McKenney Cc: Srivatsa S. Bhat Cc: Matt Turner Cc: Russell King Cc: Mike Frysinger Cc: Jesper Nilsson Cc: Richard Kuo Cc: Tony Luck Cc: Hirokazu Takata Cc: Ralf Baechle Cc: David Howells Cc: James E.J. Bottomley Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Paul Mundt Cc: David S. Miller Cc: Chris Metcalf Cc: Richard Weinberger Cc: x86@kernel.org Link: http://lkml.kernel.org/r/20120420124557.035417523@linutronix.de --- kernel/Makefile | 1 + kernel/cpu.c | 8 ++++++++ kernel/smpboot.c | 14 ++++++++++++++ kernel/smpboot.h | 6 ++++++ 4 files changed, 29 insertions(+) create mode 100644 kernel/smpboot.c create mode 100644 kernel/smpboot.h (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index cb41b9547c9f..6c07f30fa9b7 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SMP) += smpboot.o ifneq ($(CONFIG_SMP),y) obj-y += up.o endif diff --git a/kernel/cpu.c b/kernel/cpu.c index e711aef0fb3c..e58b99ada3d8 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -17,6 +17,8 @@ #include #include +#include "smpboot.h" + #ifdef CONFIG_SMP /* Serializes the updates to cpu_online_mask, cpu_present_mask */ static DEFINE_MUTEX(cpu_add_remove_lock); @@ -300,6 +302,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) return -EINVAL; cpu_hotplug_begin(); + + ret = smpboot_prepare(cpu); + if (ret) + goto out; + ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); if (ret) { nr_calls--; @@ -320,6 +327,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) out_notify: if (ret != 0) __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); +out: cpu_hotplug_done(); return ret; diff --git a/kernel/smpboot.c b/kernel/smpboot.c new file mode 100644 index 000000000000..6dae6a3d2d59 --- /dev/null +++ b/kernel/smpboot.c @@ -0,0 +1,14 @@ +/* + * Common SMP CPU bringup/teardown functions + */ +#include + +#include "smpboot.h" + +/** + * smpboot_prepare - generic smpboot preparation + */ +int __cpuinit smpboot_prepare(unsigned int cpu) +{ + return 0; +} diff --git a/kernel/smpboot.h b/kernel/smpboot.h new file mode 100644 index 000000000000..d88e77165086 --- /dev/null +++ b/kernel/smpboot.h @@ -0,0 +1,6 @@ +#ifndef SMPBOOT_H +#define SMPBOOT_H + +int smpboot_prepare(unsigned int cpu); + +#endif -- cgit v1.2.3-59-g8ed1b From 29d5e0476e1c4a513859e7858845ad172f560389 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 20 Apr 2012 13:05:45 +0000 Subject: smp: Provide generic idle thread allocation All SMP architectures have magic to fork the idle task and to store it for reusage when cpu hotplug is enabled. Provide a generic infrastructure for it. Create/reinit the idle thread for the cpu which is brought up in the generic code and hand the thread pointer to the architecture code via __cpu_up(). Note, that fork_idle() is called via a workqueue, because this guarantees that the idle thread does not get a reference to a user space VM. This can happen when the boot process did not bring up all possible cpus and a later cpu_up() is initiated via the sysfs interface. In that case fork_idle() would be called in the context of the user space task and take a reference on the user space VM. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Rusty Russell Cc: Paul E. McKenney Cc: Srivatsa S. Bhat Cc: Matt Turner Cc: Russell King Cc: Mike Frysinger Cc: Jesper Nilsson Cc: Richard Kuo Cc: Tony Luck Cc: Hirokazu Takata Cc: Ralf Baechle Cc: David Howells Cc: James E.J. Bottomley Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Paul Mundt Cc: David S. Miller Cc: Chris Metcalf Cc: Richard Weinberger Cc: x86@kernel.org Acked-by: Venkatesh Pallipadi Link: http://lkml.kernel.org/r/20120420124557.102478630@linutronix.de --- arch/Kconfig | 3 ++ kernel/cpu.c | 2 +- kernel/sched/core.c | 2 ++ kernel/smpboot.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++- kernel/smpboot.h | 10 +++++++ 5 files changed, 99 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/arch/Kconfig b/arch/Kconfig index 684eb5af439d..4f0d0f7c8313 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -145,6 +145,9 @@ config HAVE_DMA_ATTRS config USE_GENERIC_SMP_HELPERS bool +config GENERIC_SMP_IDLE_THREAD + bool + config HAVE_REGS_AND_STACK_ACCESS_API bool help diff --git a/kernel/cpu.c b/kernel/cpu.c index e58b99ada3d8..05c46bae5e55 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -316,7 +316,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) } /* Arch-specific enabling code. */ - ret = __cpu_up(cpu, NULL); + ret = __cpu_up(cpu, idle_thread_get(cpu)); if (ret != 0) goto out_notify; BUG_ON(!cpu_online(cpu)); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4603b9d8f30a..6a63cde23d03 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -83,6 +83,7 @@ #include "sched.h" #include "../workqueue_sched.h" +#include "../smpboot.h" #define CREATE_TRACE_POINTS #include @@ -7049,6 +7050,7 @@ void __init sched_init(void) /* May be allocated at isolcpus cmdline parse time */ if (cpu_isolated_map == NULL) zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); + idle_thread_set_boot_cpu(); #endif init_sched_fair_class(); diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 6dae6a3d2d59..ed1576981801 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -1,14 +1,96 @@ /* * Common SMP CPU bringup/teardown functions */ +#include +#include #include +#include +#include +#include #include "smpboot.h" +#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + unsigned int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c = container_of(work, struct create_idle, work); + + c->idle = fork_idle(c->cpu); + complete(&c->done); +} + +static struct task_struct * __cpuinit idle_thread_create(unsigned int cpu) +{ + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + destroy_work_on_stack(&c_idle.work); + return c_idle.idle; +} + +/* + * For the hotplug case we keep the task structs around and reuse + * them. + */ +static DEFINE_PER_CPU(struct task_struct *, idle_threads); + +static inline struct task_struct *get_idle_for_cpu(unsigned int cpu) +{ + struct task_struct *tsk = per_cpu(idle_threads, cpu); + + if (!tsk) + return idle_thread_create(cpu); + init_idle(tsk, cpu); + return tsk; +} + +struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) +{ + return per_cpu(idle_threads, cpu); +} + +void __init idle_thread_set_boot_cpu(void) +{ + per_cpu(idle_threads, smp_processor_id()) = current; +} + +/** + * idle_thread_init - Initialize the idle thread for a cpu + * @cpu: The cpu for which the idle thread should be initialized + * + * Creates the thread if it does not exist. + */ +static int __cpuinit idle_thread_init(unsigned int cpu) +{ + struct task_struct *idle = get_idle_for_cpu(cpu); + + if (IS_ERR(idle)) { + printk(KERN_ERR "failed fork for CPU %u\n", cpu); + return PTR_ERR(idle); + } + per_cpu(idle_threads, cpu) = idle; + return 0; +} +#else +static inline int idle_thread_init(unsigned int cpu) { return 0; } +#endif + /** * smpboot_prepare - generic smpboot preparation */ int __cpuinit smpboot_prepare(unsigned int cpu) { - return 0; + return idle_thread_init(cpu); } diff --git a/kernel/smpboot.h b/kernel/smpboot.h index d88e77165086..7943bbbab917 100644 --- a/kernel/smpboot.h +++ b/kernel/smpboot.h @@ -1,6 +1,16 @@ #ifndef SMPBOOT_H #define SMPBOOT_H +struct task_struct; + int smpboot_prepare(unsigned int cpu); +#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD +struct task_struct *idle_thread_get(unsigned int cpu); +void idle_thread_set_boot_cpu(void); +#else +static inline struct task_struct *idle_thread_get(unsigned int cpu) { return NULL; } +static inline void idle_thread_set_boot_cpu(void) { } +#endif + #endif -- cgit v1.2.3-59-g8ed1b From 3bb5d2ee396aabaa4e318f17e94d13e2ee0e5a88 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Fri, 20 Apr 2012 17:08:50 -0700 Subject: smp, idle: Allocate idle thread for each possible cpu during boot percpu areas are already allocated during boot for each possible cpu. percpu idle threads can be considered as an extension of the percpu areas, and allocate them for each possible cpu during boot. This will eliminate the need for workqueue based idle thread allocation. In future we can move the idle thread area into the percpu area too. [ tglx: Moved the loop into smpboot.c and added an error check when the init code failed to allocate an idle thread for a cpu which should be onlined ] Signed-off-by: Suresh Siddha Cc: Peter Zijlstra Cc: Rusty Russell Cc: Paul E. McKenney Cc: Srivatsa S. Bhat Cc: Tejun Heo Cc: David Rientjes Cc: venki@google.com Link: http://lkml.kernel.org/r/1334966930.28674.245.camel@sbsiddha-desk.sc.intel.com Signed-off-by: Thomas Gleixner --- kernel/cpu.c | 9 ++++--- kernel/smp.c | 4 ++++ kernel/smpboot.c | 72 +++++++++++++++----------------------------------------- kernel/smpboot.h | 2 ++ 4 files changed, 31 insertions(+), 56 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 05c46bae5e55..0e6353cf147a 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -297,15 +297,18 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) int ret, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; + struct task_struct *idle; if (cpu_online(cpu) || !cpu_present(cpu)) return -EINVAL; cpu_hotplug_begin(); - ret = smpboot_prepare(cpu); - if (ret) + idle = idle_thread_get(cpu); + if (IS_ERR(idle)) { + ret = PTR_ERR(idle); goto out; + } ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); if (ret) { @@ -316,7 +319,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) } /* Arch-specific enabling code. */ - ret = __cpu_up(cpu, idle_thread_get(cpu)); + ret = __cpu_up(cpu, idle); if (ret != 0) goto out_notify; BUG_ON(!cpu_online(cpu)); diff --git a/kernel/smp.c b/kernel/smp.c index 2f8b10ecf759..a61294c07f3f 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -13,6 +13,8 @@ #include #include +#include "smpboot.h" + #ifdef CONFIG_USE_GENERIC_SMP_HELPERS static struct { struct list_head queue; @@ -669,6 +671,8 @@ void __init smp_init(void) { unsigned int cpu; + idle_threads_init(); + /* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { if (num_online_cpus() >= setup_max_cpus) diff --git a/kernel/smpboot.c b/kernel/smpboot.c index ed1576981801..e1a797e028a3 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -6,64 +6,42 @@ #include #include #include -#include #include "smpboot.h" #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD -struct create_idle { - struct work_struct work; - struct task_struct *idle; - struct completion done; - unsigned int cpu; -}; - -static void __cpuinit do_fork_idle(struct work_struct *work) -{ - struct create_idle *c = container_of(work, struct create_idle, work); - - c->idle = fork_idle(c->cpu); - complete(&c->done); -} - -static struct task_struct * __cpuinit idle_thread_create(unsigned int cpu) -{ - struct create_idle c_idle = { - .cpu = cpu, - .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), - }; - - INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); - schedule_work(&c_idle.work); - wait_for_completion(&c_idle.done); - destroy_work_on_stack(&c_idle.work); - return c_idle.idle; -} - /* * For the hotplug case we keep the task structs around and reuse * them. */ static DEFINE_PER_CPU(struct task_struct *, idle_threads); -static inline struct task_struct *get_idle_for_cpu(unsigned int cpu) +struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) { struct task_struct *tsk = per_cpu(idle_threads, cpu); if (!tsk) - return idle_thread_create(cpu); + return ERR_PTR(-ENOMEM); init_idle(tsk, cpu); return tsk; } -struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) +void __init idle_thread_set_boot_cpu(void) { - return per_cpu(idle_threads, cpu); + per_cpu(idle_threads, smp_processor_id()) = current; } -void __init idle_thread_set_boot_cpu(void) +static inline void idle_init(unsigned int cpu) { - per_cpu(idle_threads, smp_processor_id()) = current; + struct task_struct *tsk = per_cpu(idle_threads, cpu); + + if (!tsk) { + tsk = fork_idle(cpu); + if (IS_ERR(tsk)) + pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); + else + per_cpu(idle_threads, cpu) = tsk; + } } /** @@ -72,25 +50,13 @@ void __init idle_thread_set_boot_cpu(void) * * Creates the thread if it does not exist. */ -static int __cpuinit idle_thread_init(unsigned int cpu) +void __init idle_threads_init(void) { - struct task_struct *idle = get_idle_for_cpu(cpu); + unsigned int cpu; - if (IS_ERR(idle)) { - printk(KERN_ERR "failed fork for CPU %u\n", cpu); - return PTR_ERR(idle); + for_each_possible_cpu(cpu) { + if (cpu != smp_processor_id()) + idle_init(cpu); } - per_cpu(idle_threads, cpu) = idle; - return 0; } -#else -static inline int idle_thread_init(unsigned int cpu) { return 0; } #endif - -/** - * smpboot_prepare - generic smpboot preparation - */ -int __cpuinit smpboot_prepare(unsigned int cpu) -{ - return idle_thread_init(cpu); -} diff --git a/kernel/smpboot.h b/kernel/smpboot.h index 7943bbbab917..4cfbcb8a8362 100644 --- a/kernel/smpboot.h +++ b/kernel/smpboot.h @@ -8,9 +8,11 @@ int smpboot_prepare(unsigned int cpu); #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD struct task_struct *idle_thread_get(unsigned int cpu); void idle_thread_set_boot_cpu(void); +void idle_threads_init(void); #else static inline struct task_struct *idle_thread_get(unsigned int cpu) { return NULL; } static inline void idle_thread_set_boot_cpu(void) { } +static inline void idle_threads_init(unsigned int cpu) { } #endif #endif -- cgit v1.2.3-59-g8ed1b From 43a18b1e588d1b6a993eedd44dd3154590d9bebd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 4 May 2012 12:52:25 +0200 Subject: smp: Fix idle_thread_init() inline stub idle_thread_init() does not have arguments. Reported-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- kernel/smpboot.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/smpboot.h b/kernel/smpboot.h index 4cfbcb8a8362..80c0acfb8472 100644 --- a/kernel/smpboot.h +++ b/kernel/smpboot.h @@ -12,7 +12,7 @@ void idle_threads_init(void); #else static inline struct task_struct *idle_thread_get(unsigned int cpu) { return NULL; } static inline void idle_thread_set_boot_cpu(void) { } -static inline void idle_threads_init(unsigned int cpu) { } +static inline void idle_threads_init(void) { } #endif #endif -- cgit v1.2.3-59-g8ed1b From a4a2eb490e38aaff61eafcb8cde6725ad1be22ab Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 May 2012 09:02:48 +0000 Subject: init_task: Create generic init_task instance All archs define init_task in the same way (except ia64, but there is no particular reason why ia64 cannot use the common version). Create a generic instance so all archs can be converted over. The config switch is temporary and will be removed when all archs are converted over. Signed-off-by: Thomas Gleixner Cc: Benjamin Herrenschmidt Cc: Chen Liqin Cc: Chris Metcalf Cc: Chris Zankel Cc: David Howells Cc: David S. Miller Cc: Geert Uytterhoeven Cc: Guan Xuetao Cc: Haavard Skinnemoen Cc: Hirokazu Takata Cc: James E.J. Bottomley Cc: Jesper Nilsson Cc: Jonas Bonn Cc: Mark Salter Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Matt Turner Cc: Michal Simek Cc: Mike Frysinger Cc: Paul Mundt Cc: Ralf Baechle Cc: Richard Kuo Cc: Richard Weinberger Cc: Russell King Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/20120503085034.092585287@linutronix.de --- arch/Kconfig | 3 +++ init/Makefile | 1 + init/init_task.c | 24 ++++++++++++++++++++++++ kernel/sched/Makefile | 2 -- 4 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 init/init_task.c (limited to 'kernel') diff --git a/arch/Kconfig b/arch/Kconfig index 4f0d0f7c8313..2dd8fdd7ea9f 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -148,6 +148,9 @@ config USE_GENERIC_SMP_HELPERS config GENERIC_SMP_IDLE_THREAD bool +config HAVE_GENERIC_INIT_TASK + bool + config HAVE_REGS_AND_STACK_ACCESS_API bool help diff --git a/init/Makefile b/init/Makefile index 0bf677aa0872..c55eac955cdc 100644 --- a/init/Makefile +++ b/init/Makefile @@ -9,6 +9,7 @@ else obj-$(CONFIG_BLK_DEV_INITRD) += initramfs.o endif obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o +obj-$(CONFIG_HAVE_GENERIC_INIT_TASK) += init_task.o mounts-y := do_mounts.o mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o diff --git a/init/init_task.c b/init/init_task.c new file mode 100644 index 000000000000..8b2f3996b035 --- /dev/null +++ b/init/init_task.c @@ -0,0 +1,24 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); + +/* Initial task structure */ +struct task_struct init_task = INIT_TASK(init_task); +EXPORT_SYMBOL(init_task); + +/* + * Initial thread structure. Alignment of this is handled by a special + * linker map entry. + */ +union thread_union init_thread_union __init_task_data = + { INIT_THREAD_INFO(init_task) }; diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 9a7dd35102a3..173ea52f3af0 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -16,5 +16,3 @@ obj-$(CONFIG_SMP) += cpupri.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_SCHED_DEBUG) += debug.o - - -- cgit v1.2.3-59-g8ed1b From f37f435f33717dcf15fd4bb422da739da7fc2052 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 7 May 2012 17:59:48 +0000 Subject: smp: Implement kick_all_cpus_sync() Will replace the misnomed cpu_idle_wait() function which is copied a gazillion times all over arch/* Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20120507175652.049316594@linutronix.de --- include/linux/smp.h | 4 ++++ kernel/smp.c | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'kernel') diff --git a/include/linux/smp.h b/include/linux/smp.h index 24360de6c968..717fb746c9a8 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -81,6 +81,8 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data, int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait); +void kick_all_cpus_sync(void); + /* * Generic and arch helpers */ @@ -192,6 +194,8 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, return smp_call_function_single(0, func, info, wait); } +static inline void kick_all_cpus_sync(void) { } + #endif /* !SMP */ /* diff --git a/kernel/smp.c b/kernel/smp.c index a61294c07f3f..d0ae5b24875e 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -795,3 +795,26 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), } } EXPORT_SYMBOL(on_each_cpu_cond); + +static void do_nothing(void *unused) +{ +} + +/** + * kick_all_cpus_sync - Force all cpus out of idle + * + * Used to synchronize the update of pm_idle function pointer. It's + * called after the pointer is updated and returns after the dummy + * callback function has been executed on all cpus. The execution of + * the function can only happen on the remote cpus after they have + * left the idle function which had been called via pm_idle function + * pointer. So it's guaranteed that nothing uses the previous pointer + * anymore. + */ +void kick_all_cpus_sync(void) +{ + /* Make sure the change is visible before we kick the cpus */ + smp_mb(); + smp_call_function(do_nothing, NULL, 1); +} +EXPORT_SYMBOL_GPL(kick_all_cpus_sync); -- cgit v1.2.3-59-g8ed1b From 6c0a9fa62feb7e9fdefa9720bcc03040c9b0b311 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 5 May 2012 15:05:40 +0000 Subject: fork: Remove the weak insanity We error out when compiling with gcc4.1.[01] as it miscompiles __weak. The workaround with magic defines is not longer necessary. Make it __weak again. Signed-off-by: Thomas Gleixner Link: http://lkml.kernel.org/r/20120505150141.306358267@linutronix.de --- arch/sh/include/asm/thread_info.h | 1 - arch/x86/include/asm/thread_info.h | 1 - kernel/fork.c | 8 +------- 3 files changed, 1 insertion(+), 9 deletions(-) (limited to 'kernel') diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 20ee40af16e9..09963d4018cb 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -98,7 +98,6 @@ static inline struct thread_info *current_thread_info(void) extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node); extern void free_thread_info(struct thread_info *ti); extern void arch_task_cache_init(void); -#define arch_task_cache_init arch_task_cache_init extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); extern void init_thread_xstate(void); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index ad6df8ccd715..8692a166dd4e 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -284,6 +284,5 @@ static inline bool is_ia32_task(void) extern void arch_task_cache_init(void); extern void free_thread_info(struct thread_info *ti); extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); -#define arch_task_cache_init arch_task_cache_init #endif #endif /* _ASM_X86_THREAD_INFO_H */ diff --git a/kernel/fork.c b/kernel/fork.c index b9372a0bff18..a79b36e2e912 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -203,13 +203,7 @@ void __put_task_struct(struct task_struct *tsk) } EXPORT_SYMBOL_GPL(__put_task_struct); -/* - * macro override instead of weak attribute alias, to workaround - * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions. - */ -#ifndef arch_task_cache_init -#define arch_task_cache_init() -#endif +void __init __weak arch_task_cache_init(void) { } void __init fork_init(unsigned long mempages) { -- cgit v1.2.3-59-g8ed1b From 2889f60814e15dea644782597d897cdba943564f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 5 May 2012 15:05:41 +0000 Subject: fork: Move thread info gfp flags to header These flags can be useful for extra allocations outside of the core code. Add __GFP_NOTRACK to them, so the archs which have kmemcheck do not have to provide extra allocators just for that reason. Signed-off-by: Thomas Gleixner Link: http://lkml.kernel.org/r/20120505150141.428211694@linutronix.de --- include/linux/thread_info.h | 6 ++++++ kernel/fork.c | 8 ++------ 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 8d03f079688c..db78775eff3b 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -54,6 +54,12 @@ extern long do_no_restart_syscall(struct restart_block *parm); #ifdef __KERNEL__ +#ifdef CONFIG_DEBUG_STACK_USAGE +# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#else +# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK) +#endif + /* * flag set/clear/test wrappers * - pass TIF_xxxx constants to these functions diff --git a/kernel/fork.c b/kernel/fork.c index a79b36e2e912..5d22b9b8cf7b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -123,12 +123,8 @@ static struct kmem_cache *task_struct_cachep; static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) { -#ifdef CONFIG_DEBUG_STACK_USAGE - gfp_t mask = GFP_KERNEL | __GFP_ZERO; -#else - gfp_t mask = GFP_KERNEL; -#endif - struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER); + struct page *page = alloc_pages_node(node, THREADINFO_GFP, + THREAD_SIZE_ORDER); return page ? page_address(page) : NULL; } -- cgit v1.2.3-59-g8ed1b From 41101809a865dd0be1b56eff46c83fad321870b2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 5 May 2012 15:05:41 +0000 Subject: fork: Provide weak arch_release_[task_struct|thread_info] functions These functions allow us to move most of the duplicated thread_info allocators to the core code. Signed-off-by: Thomas Gleixner Link: http://lkml.kernel.org/r/20120505150141.366461660@linutronix.de --- kernel/fork.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 5d22b9b8cf7b..2dfad0269674 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -112,14 +112,26 @@ int nr_processes(void) } #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR -# define alloc_task_struct_node(node) \ - kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node) -# define free_task_struct(tsk) \ - kmem_cache_free(task_struct_cachep, (tsk)) static struct kmem_cache *task_struct_cachep; + +static inline struct task_struct *alloc_task_struct_node(int node) +{ + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); +} + +void __weak arch_release_task_struct(struct task_struct *tsk) { } + +static inline void free_task_struct(struct task_struct *tsk) +{ + arch_release_task_struct(tsk); + kmem_cache_free(task_struct_cachep, tsk); +} #endif #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR + +void __weak arch_release_thread_info(struct thread_info *ti) { } + static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) { @@ -131,6 +143,7 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, static inline void free_thread_info(struct thread_info *ti) { + arch_release_thread_info(ti); free_pages((unsigned long)ti, THREAD_SIZE_ORDER); } #endif -- cgit v1.2.3-59-g8ed1b From 0d15d74a1ead10673b5b1db66d4c90552769096c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 5 May 2012 15:05:41 +0000 Subject: fork: Provide kmemcache based thread_info allocator Several architectures have their own kmemcache based thread allocator because THREAD_SIZE is smaller than PAGE_SIZE. Add it to the core code conditionally on THREAD_SIZE < PAGE_SIZE so the private copies can go. Signed-off-by: Thomas Gleixner Link: http://lkml.kernel.org/r/20120505150141.491002124@linutronix.de --- kernel/fork.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 2dfad0269674..7590bd6e8dff 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -132,6 +132,11 @@ static inline void free_task_struct(struct task_struct *tsk) void __weak arch_release_thread_info(struct thread_info *ti) { } +/* + * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a + * kmemcache based allocator. + */ +# if THREAD_SIZE >= PAGE_SIZE static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) { @@ -146,6 +151,28 @@ static inline void free_thread_info(struct thread_info *ti) arch_release_thread_info(ti); free_pages((unsigned long)ti, THREAD_SIZE_ORDER); } +# else +static struct kmem_cache *thread_info_cache; + +static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, + int node) +{ + return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); +} + +static void free_thread_info(struct thread_info *ti) +{ + arch_release_thread_info(ti); + kmem_cache_free(thread_info_cache, ti); +} + +void thread_info_cache_init(void) +{ + thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, + THREAD_SIZE, 0, NULL); + BUG_ON(thread_info_cache == NULL); +} +# endif #endif /* SLAB cache for signal_struct structures (tsk->signal) */ -- cgit v1.2.3-59-g8ed1b From f5e10287367dcffb5504d19c83e85ca041ca2596 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 5 May 2012 15:05:48 +0000 Subject: task_allocator: Use config switches instead of magic defines Replace __HAVE_ARCH_TASK_ALLOCATOR and __HAVE_ARCH_THREAD_ALLOCATOR with proper config switches. Signed-off-by: Thomas Gleixner Cc: Sam Ravnborg Cc: Tony Luck Link: http://lkml.kernel.org/r/20120505150142.371309416@linutronix.de --- arch/Kconfig | 8 ++++++++ arch/ia64/Kconfig | 2 ++ arch/ia64/include/asm/thread_info.h | 3 --- arch/sparc/Kconfig | 1 + arch/sparc/include/asm/thread_info_32.h | 2 -- kernel/fork.c | 7 +++---- 6 files changed, 14 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/arch/Kconfig b/arch/Kconfig index 597b132b3902..bd265a217bd2 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -152,6 +152,14 @@ config GENERIC_SMP_IDLE_THREAD config ARCH_INIT_TASK bool +# Select if arch has its private alloc_task_struct() function +config ARCH_TASK_STRUCT_ALLOCATOR + bool + +# Select if arch has its private alloc_thread_info() function +config ARCH_THREAD_INFO_ALLOCATOR + bool + config HAVE_REGS_AND_STACK_ACCESS_API bool help diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 022ea3a9d1ab..ba667b60f32d 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -35,6 +35,8 @@ config IA64 select GENERIC_IOMAP select GENERIC_SMP_IDLE_THREAD select ARCH_INIT_TASK + select ARCH_TASK_STRUCT_ALLOCATOR + select ARCH_THREAD_INFO_ALLOCATOR default y help The Itanium Processor Family is Intel's 64-bit successor to diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index e054bcc4273c..310d9734f02d 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -54,8 +54,6 @@ struct thread_info { }, \ } -#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR - #ifndef ASM_OFFSETS_C /* how to get the thread information struct from C */ #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) @@ -84,7 +82,6 @@ struct thread_info { #endif #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) -#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR #define alloc_task_struct_node(node) \ ({ \ struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \ diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index e417f35d5912..ec0347aaeaa8 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -36,6 +36,7 @@ config SPARC32 def_bool !64BIT select GENERIC_ATOMIC64 select CLZ_TAB + select ARCH_THREAD_INFO_ALLOCATOR config SPARC64 def_bool 64BIT diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index c2a1080cdd3b..b29498dea6b7 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -80,8 +80,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); */ #define THREAD_INFO_ORDER 1 -#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR - BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info_node, int) #define alloc_thread_info_node(tsk, node) BTFIXUP_CALL(alloc_thread_info_node)(node) diff --git a/kernel/fork.c b/kernel/fork.c index 7590bd6e8dff..a1793e442b20 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -111,7 +111,7 @@ int nr_processes(void) return total; } -#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR +#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR static struct kmem_cache *task_struct_cachep; static inline struct task_struct *alloc_task_struct_node(int node) @@ -128,8 +128,7 @@ static inline void free_task_struct(struct task_struct *tsk) } #endif -#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR - +#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR void __weak arch_release_thread_info(struct thread_info *ti) { } /* @@ -243,7 +242,7 @@ void __init __weak arch_task_cache_init(void) { } void __init fork_init(unsigned long mempages) { -#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR +#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR #ifndef ARCH_MIN_TASKALIGN #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES #endif -- cgit v1.2.3-59-g8ed1b