aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2020-09-07 15:15:34 +0200
committerBorislav Petkov <bp@suse.de>2020-09-07 22:18:38 +0200
commitf5963ba7a45fc6ff298a34976064354be437e1d8 (patch)
tree3a390e287e24f87c73a2caf0e26970b84e7b1c82 /arch
parentx86/head/64: Switch to initial stack earlier (diff)
downloadlinux-dev-f5963ba7a45fc6ff298a34976064354be437e1d8.tar.xz
linux-dev-f5963ba7a45fc6ff298a34976064354be437e1d8.zip
x86/head/64: Install a CPU bringup IDT
Add a separate bringup IDT for the CPU bringup code that will be used until the kernel switches to the idt_table. There are two reasons for a separate IDT: 1) When the idt_table is set up and the secondary CPUs are booted, it contains entries (e.g. IST entries) which require certain CPU state to be set up. This includes a working TSS (for IST), MSR_GS_BASE (for stack protector) or CR4.FSGSBASE (for paranoid_entry) path. By using a dedicated IDT for early boot this state need not to be set up early. 2) The idt_table is static to idt.c, so any function using/modifying must be in idt.c too. That means that all compiler driven instrumentation like tracing or KASAN is also active in this code. But during early CPU bringup the environment is not set up for this instrumentation to work correctly. To avoid all of these hassles and make early exception handling robust, use a dedicated bringup IDT. The IDT is loaded two times, first on the boot CPU while the kernel is still running on direct mapped addresses, and again later after the switch to kernel addresses has happened. The second IDT load happens on the boot and secondary CPUs. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20200907131613.12703-34-joro@8bytes.org
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/setup.h1
-rw-r--r--arch/x86/kernel/head64.c39
-rw-r--r--arch/x86/kernel/head_64.S5
3 files changed, 45 insertions, 0 deletions
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 5c2fd05bd52c..4b3ca5ade2fd 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -50,6 +50,7 @@ extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp
extern unsigned long __startup_secondary_64(void);
extern void startup_64_setup_env(unsigned long physbase);
extern int early_make_pgtable(unsigned long address);
+extern void early_setup_idt(void);
#ifdef CONFIG_X86_INTEL_MID
extern void x86_intel_mid_early_setup(void);
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 8c82be44be94..7bfd5c27c773 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -36,6 +36,8 @@
#include <asm/microcode.h>
#include <asm/kasan.h>
#include <asm/fixmap.h>
+#include <asm/realmode.h>
+#include <asm/desc.h>
/*
* Manage page tables very early on.
@@ -509,6 +511,41 @@ void __init x86_64_start_reservations(char *real_mode_data)
}
/*
+ * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
+ * used until the idt_table takes over. On the boot CPU this happens in
+ * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
+ * this happens in the functions called from head_64.S.
+ *
+ * The idt_table can't be used that early because all the code modifying it is
+ * in idt.c and can be instrumented by tracing or KASAN, which both don't work
+ * during early CPU bringup. Also the idt_table has the runtime vectors
+ * configured which require certain CPU state to be setup already (like TSS),
+ * which also hasn't happened yet in early CPU bringup.
+ */
+static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
+
+static struct desc_ptr bringup_idt_descr = {
+ .size = (NUM_EXCEPTION_VECTORS * sizeof(gate_desc)) - 1,
+ .address = 0, /* Set at runtime */
+};
+
+/* This runs while still in the direct mapping */
+static void startup_64_load_idt(unsigned long physbase)
+{
+ struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
+
+ desc->address = (unsigned long)fixup_pointer(bringup_idt_table, physbase);
+ native_load_idt(desc);
+}
+
+/* This is used when running on kernel addresses */
+void early_setup_idt(void)
+{
+ bringup_idt_descr.address = (unsigned long)bringup_idt_table;
+ native_load_idt(&bringup_idt_descr);
+}
+
+/*
* Setup boot CPU state needed before kernel switches to virtual addresses.
*/
void __head startup_64_setup_env(unsigned long physbase)
@@ -521,4 +558,6 @@ void __head startup_64_setup_env(unsigned long physbase)
asm volatile("movl %%eax, %%ds\n"
"movl %%eax, %%ss\n"
"movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
+
+ startup_64_load_idt(physbase);
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 83050c9e54d9..1de09b58e578 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -198,6 +198,11 @@ SYM_CODE_START(secondary_startup_64)
*/
movq initial_stack(%rip), %rsp
+ /* Setup and Load IDT */
+ pushq %rsi
+ call early_setup_idt
+ popq %rsi
+
/* Check if nx is implemented */
movl $0x80000001, %eax
cpuid