aboutsummaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig22
-rw-r--r--init/main.c52
2 files changed, 47 insertions, 27 deletions
diff --git a/init/Kconfig b/init/Kconfig
index af4de4f1b02c..2d70c8c4b1d8 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1420,6 +1420,28 @@ config KALLSYMS_ALL
Say N unless you really need all symbols.
+config KALLSYMS_ABSOLUTE_PERCPU
+ bool
+ default X86_64 && SMP
+
+config KALLSYMS_BASE_RELATIVE
+ bool
+ depends on KALLSYMS
+ default !IA64 && !(TILE && 64BIT)
+ help
+ Instead of emitting them as absolute values in the native word size,
+ emit the symbol references in the kallsyms table as 32-bit entries,
+ each containing a relative value in the range [base, base + U32_MAX]
+ or, when KALLSYMS_ABSOLUTE_PERCPU is in effect, each containing either
+ an absolute value in the range [0, S32_MAX] or a relative value in the
+ range [base, base + S32_MAX], where base is the lowest relative symbol
+ address encountered in the image.
+
+ On 64-bit builds, this reduces the size of the address table by 50%,
+ but more importantly, it results in entries whose values are build
+ time constants, and no relocation pass is required at runtime to fix
+ up the entries based on the runtime load address of the kernel.
+
config PRINTK
default y
bool "Enable support for printk" if EXPERT
diff --git a/init/main.c b/init/main.c
index 58c9e374704b..b3c6e363ae18 100644
--- a/init/main.c
+++ b/init/main.c
@@ -93,9 +93,6 @@ static int kernel_init(void *);
extern void init_IRQ(void);
extern void fork_init(void);
extern void radix_tree_init(void);
-#ifndef CONFIG_DEBUG_RODATA
-static inline void mark_rodata_ro(void) { }
-#endif
/*
* Debug helper: via this flag we know that we are in 'early bootup code'
@@ -388,7 +385,6 @@ static noinline void __init_refok rest_init(void)
int pid;
rcu_scheduler_starting();
- smpboot_thread_init();
/*
* We need to spawn init first so that it obtains pid 1, however
* the init task will end up wanting to create kthreads, which, if
@@ -452,20 +448,6 @@ void __init parse_early_param(void)
done = 1;
}
-/*
- * Activate the first processor.
- */
-
-static void __init boot_cpu_init(void)
-{
- int cpu = smp_processor_id();
- /* Mark the boot cpu "present", "online" etc for SMP and UP case */
- set_cpu_online(cpu, true);
- set_cpu_active(cpu, true);
- set_cpu_present(cpu, true);
- set_cpu_possible(cpu, true);
-}
-
void __init __weak smp_setup_processor_id(void)
{
}
@@ -499,11 +481,6 @@ asmlinkage __visible void __init start_kernel(void)
char *command_line;
char *after_dashes;
- /*
- * Need to run as early as possible, to initialize the
- * lockdep hash:
- */
- lockdep_init();
set_task_stack_end_magic(&init_task);
smp_setup_processor_id();
debug_objects_early_init();
@@ -530,6 +507,7 @@ asmlinkage __visible void __init start_kernel(void)
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
+ boot_cpu_state_init();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
build_all_zonelists(NULL, NULL);
@@ -727,7 +705,6 @@ static int __init initcall_blacklist(char *str)
static bool __init_or_module initcall_blacklisted(initcall_t fn)
{
- struct list_head *tmp;
struct blacklist_entry *entry;
char *fn_name;
@@ -735,8 +712,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
if (!fn_name)
return false;
- list_for_each(tmp, &blacklisted_initcalls) {
- entry = list_entry(tmp, struct blacklist_entry, next);
+ list_for_each_entry(entry, &blacklisted_initcalls, next) {
if (!strcmp(fn_name, entry->buf)) {
pr_debug("initcall %s blacklisted\n", fn_name);
kfree(fn_name);
@@ -929,6 +905,28 @@ static int try_to_run_init_process(const char *init_filename)
static noinline void __init kernel_init_freeable(void);
+#ifdef CONFIG_DEBUG_RODATA
+static bool rodata_enabled = true;
+static int __init set_debug_rodata(char *str)
+{
+ return strtobool(str, &rodata_enabled);
+}
+__setup("rodata=", set_debug_rodata);
+
+static void mark_readonly(void)
+{
+ if (rodata_enabled)
+ mark_rodata_ro();
+ else
+ pr_info("Kernel memory protection disabled.\n");
+}
+#else
+static inline void mark_readonly(void)
+{
+ pr_warn("This architecture does not have kernel memory protection.\n");
+}
+#endif
+
static int __ref kernel_init(void *unused)
{
int ret;
@@ -937,7 +935,7 @@ static int __ref kernel_init(void *unused)
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
free_initmem();
- mark_rodata_ro();
+ mark_readonly();
system_state = SYSTEM_RUNNING;
numa_default_policy();