aboutsummaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--init/Kconfig38
-rw-r--r--init/main.c69
2 files changed, 39 insertions, 68 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 5cff9a980c39..2de5b1cbadd9 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -320,13 +320,17 @@ config AUDITSYSCALL
help
Enable low-overhead system-call auditing infrastructure that
can be used independently or with another kernel subsystem,
- such as SELinux. To use audit's filesystem watch feature, please
- ensure that INOTIFY is configured.
+ such as SELinux.
+
+config AUDIT_WATCH
+ def_bool y
+ depends on AUDITSYSCALL
+ select FSNOTIFY
config AUDIT_TREE
def_bool y
depends on AUDITSYSCALL
- select INOTIFY
+ select FSNOTIFY
menu "RCU Subsystem"
@@ -573,8 +577,8 @@ config CGROUP_MEM_RES_CTLR
could in turn add some fork/exit overhead.
config CGROUP_MEM_RES_CTLR_SWAP
- bool "Memory Resource Controller Swap Extension(EXPERIMENTAL)"
- depends on CGROUP_MEM_RES_CTLR && SWAP && EXPERIMENTAL
+ bool "Memory Resource Controller Swap Extension"
+ depends on CGROUP_MEM_RES_CTLR && SWAP
help
Add swap management feature to memory resource controller. When you
enable this, you can limit mem+swap usage per cgroup. In other words,
@@ -1143,30 +1147,6 @@ config TRACEPOINTS
source "arch/Kconfig"
-config SLOW_WORK
- default n
- bool
- help
- The slow work thread pool provides a number of dynamically allocated
- threads that can be used by the kernel to perform operations that
- take a relatively long time.
-
- An example of this would be CacheFiles doing a path lookup followed
- by a series of mkdirs and a create call, all of which have to touch
- disk.
-
- See Documentation/slow-work.txt.
-
-config SLOW_WORK_DEBUG
- bool "Slow work debugging through debugfs"
- default n
- depends on SLOW_WORK && DEBUG_FS
- help
- Display the contents of the slow work run queue through debugfs,
- including items currently executing.
-
- See Documentation/slow-work.txt.
-
endmenu # General setup
config HAVE_GENERIC_DMA_COHERENT
diff --git a/init/main.c b/init/main.c
index a42fdf4aeba9..22d61cb06f98 100644
--- a/init/main.c
+++ b/init/main.c
@@ -32,7 +32,6 @@
#include <linux/start_kernel.h>
#include <linux/security.h>
#include <linux/smp.h>
-#include <linux/workqueue.h>
#include <linux/profile.h>
#include <linux/rcupdate.h>
#include <linux/moduleparam.h>
@@ -66,11 +65,9 @@
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/kmemcheck.h>
-#include <linux/kmemtrace.h>
#include <linux/sfi.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
-#include <trace/boot.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -204,11 +201,11 @@ static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
static const char *panic_later, *panic_param;
-extern struct obs_kernel_param __setup_start[], __setup_end[];
+extern const struct obs_kernel_param __setup_start[], __setup_end[];
static int __init obsolete_checksetup(char *line)
{
- struct obs_kernel_param *p;
+ const struct obs_kernel_param *p;
int had_early_param = 0;
p = __setup_start;
@@ -444,7 +441,6 @@ static noinline void __init_refok rest_init(void)
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
rcu_read_unlock();
complete(&kthreadd_done);
- unlock_kernel();
/*
* The boot idle thread must execute schedule()
@@ -462,7 +458,7 @@ static noinline void __init_refok rest_init(void)
/* Check for early params. */
static int __init do_early_param(char *param, char *val)
{
- struct obs_kernel_param *p;
+ const struct obs_kernel_param *p;
for (p = __setup_start; p < __setup_end; p++) {
if ((p->early && strcmp(param, p->str) == 0) ||
@@ -532,6 +528,7 @@ static void __init mm_init(void)
page_cgroup_init_flatmem();
mem_init();
kmem_cache_init();
+ percpu_init_late();
pgtable_cache_init();
vmalloc_init();
}
@@ -539,7 +536,7 @@ static void __init mm_init(void)
asmlinkage void __init start_kernel(void)
{
char * command_line;
- extern struct kernel_param __start___param[], __stop___param[];
+ extern const struct kernel_param __start___param[], __stop___param[];
smp_setup_processor_id();
@@ -565,7 +562,6 @@ asmlinkage void __init start_kernel(void)
* Interrupts are still disabled. Do necessary setups, then
* enable them
*/
- lock_kernel();
tick_init();
boot_cpu_init();
page_address_init();
@@ -663,7 +659,6 @@ asmlinkage void __init start_kernel(void)
#endif
page_cgroup_init();
enable_debug_pagealloc();
- kmemtrace_init();
kmemleak_init();
debug_objects_mem_init();
idr_init_cache();
@@ -725,38 +720,39 @@ int initcall_debug;
core_param(initcall_debug, initcall_debug, bool, 0644);
static char msgbuf[64];
-static struct boot_trace_call call;
-static struct boot_trace_ret ret;
-int do_one_initcall(initcall_t fn)
+static int __init_or_module do_one_initcall_debug(initcall_t fn)
{
- int count = preempt_count();
ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+ int ret;
- if (initcall_debug) {
- call.caller = task_pid_nr(current);
- printk("calling %pF @ %i\n", fn, call.caller);
- calltime = ktime_get();
- trace_boot_call(&call, fn);
- enable_boot_trace();
- }
+ printk(KERN_DEBUG "calling %pF @ %i\n", fn, task_pid_nr(current));
+ calltime = ktime_get();
+ ret = fn();
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ printk(KERN_DEBUG "initcall %pF returned %d after %lld usecs\n", fn,
+ ret, duration);
- ret.result = fn();
+ return ret;
+}
- if (initcall_debug) {
- disable_boot_trace();
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- trace_boot_ret(&ret, fn);
- printk("initcall %pF returned %d after %Ld usecs\n", fn,
- ret.result, ret.duration);
- }
+int __init_or_module do_one_initcall(initcall_t fn)
+{
+ int count = preempt_count();
+ int ret;
+
+ if (initcall_debug)
+ ret = do_one_initcall_debug(fn);
+ else
+ ret = fn();
msgbuf[0] = 0;
- if (ret.result && ret.result != -ENODEV && initcall_debug)
- sprintf(msgbuf, "error code %d ", ret.result);
+ if (ret && ret != -ENODEV && initcall_debug)
+ sprintf(msgbuf, "error code %d ", ret);
if (preempt_count() != count) {
strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
@@ -770,7 +766,7 @@ int do_one_initcall(initcall_t fn)
printk("initcall %pF returned with %s\n", fn, msgbuf);
}
- return ret.result;
+ return ret;
}
@@ -796,7 +792,6 @@ static void __init do_initcalls(void)
*/
static void __init do_basic_setup(void)
{
- init_workqueues();
cpuset_init_smp();
usermodehelper_init();
init_tmpfs();
@@ -829,7 +824,6 @@ static noinline int init_post(void)
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
free_initmem();
- unlock_kernel();
mark_rodata_ro();
system_state = SYSTEM_RUNNING;
numa_default_policy();
@@ -869,8 +863,6 @@ static int __init kernel_init(void * unused)
* Wait until kthreadd is all set-up.
*/
wait_for_completion(&kthreadd_done);
- lock_kernel();
-
/*
* init can allocate pages on any node
*/
@@ -894,7 +886,6 @@ static int __init kernel_init(void * unused)
smp_prepare_cpus(setup_max_cpus);
do_pre_smp_initcalls();
- start_boot_trace();
smp_init();
sched_init_smp();