diff options
Diffstat (limited to 'arch/x86/realmode/init.c')
-rw-r--r-- | arch/x86/realmode/init.c | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 41d7669a97ad..88be32026768 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -9,6 +9,7 @@ #include <asm/realmode.h> #include <asm/tlbflush.h> #include <asm/crash.h> +#include <asm/msr.h> #include <asm/sev.h> struct real_mode_header *real_mode_header; @@ -61,10 +62,12 @@ void __init reserve_real_mode(void) set_real_mode_mem(mem); /* - * Unconditionally reserve the entire fisrt 1M, see comment in + * Unconditionally reserve the entire first 1M, see comment in * setup_arch(). */ memblock_reserve(0, SZ_1M); + + memblock_clear_kho_scratch(0, SZ_1M); } static void __init sme_sev_setup_real_mode(struct trampoline_header *th) @@ -145,7 +148,7 @@ static void __init setup_real_mode(void) * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR * so we need to mask it out. */ - rdmsrl(MSR_EFER, efer); + rdmsrq(MSR_EFER, efer); trampoline_header->efer = efer & ~EFER_LMA; trampoline_header->start = (u64) secondary_startup_64; @@ -154,6 +157,9 @@ static void __init setup_real_mode(void) trampoline_header->flags = 0; + trampoline_lock = &trampoline_header->lock; + *trampoline_lock = 0; + trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); /* Map the real mode stub as virtual == physical */ @@ -200,14 +206,18 @@ static void __init set_real_mode_permissions(void) set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); } -static int __init init_real_mode(void) +void __init init_real_mode(void) { if (!real_mode_header) panic("Real mode trampoline was not allocated"); setup_real_mode(); set_real_mode_permissions(); +} +static int __init do_init_real_mode(void) +{ + x86_platform.realmode_init(); return 0; } -early_initcall(init_real_mode); +early_initcall(do_init_real_mode); |