diff options
Diffstat (limited to 'arch/s390/boot/startup.c')
-rw-r--r-- | arch/s390/boot/startup.c | 320 |
1 files changed, 250 insertions, 70 deletions
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 863e6bcaa5a1..6cf89314209a 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/string.h> #include <linux/elf.h> +#include <asm/page-states.h> #include <asm/boot_data.h> #include <asm/sections.h> +#include <asm/maccess.h> #include <asm/cpu_mf.h> #include <asm/setup.h> #include <asm/kasan.h> @@ -10,26 +12,31 @@ #include <asm/sclp.h> #include <asm/diag.h> #include <asm/uv.h> +#include <asm/abs_lowcore.h> +#include <asm/physmem_info.h> #include "decompressor.h" #include "boot.h" #include "uv.h" unsigned long __bootdata_preserved(__kaslr_offset); -unsigned long __bootdata(__amode31_base); +unsigned long __bootdata_preserved(__abs_lowcore); +unsigned long __bootdata_preserved(__memcpy_real_area); +pte_t *__bootdata_preserved(memcpy_real_ptep); unsigned long __bootdata_preserved(VMALLOC_START); unsigned long __bootdata_preserved(VMALLOC_END); struct page *__bootdata_preserved(vmemmap); unsigned long __bootdata_preserved(vmemmap_size); unsigned long __bootdata_preserved(MODULES_VADDR); unsigned long __bootdata_preserved(MODULES_END); +unsigned long __bootdata_preserved(max_mappable); unsigned long __bootdata(ident_map_size); -int __bootdata(is_full_image) = 1; -struct initrd_data __bootdata(initrd_data); u64 __bootdata_preserved(stfle_fac_list[16]); u64 __bootdata_preserved(alt_stfle_fac_list[16]); struct oldmem_data __bootdata_preserved(oldmem_data); +struct machine_info machine; + void error(char *x) { sclp_early_printk("\n\n"); @@ -39,6 +46,60 @@ void error(char *x) disabled_wait(); } +static void detect_facilities(void) +{ + if (test_facility(8)) { + machine.has_edat1 = 1; + local_ctl_set_bit(0, CR0_EDAT_BIT); + } + if (test_facility(78)) + machine.has_edat2 = 1; + if (test_facility(130)) + machine.has_nx = 1; +} + +static int cmma_test_essa(void) +{ + unsigned long reg1, reg2, tmp = 0; + int rc = 1; + psw_t old; + + /* Test ESSA_GET_STATE */ + asm volatile( + " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" + " epsw %[reg1],%[reg2]\n" + " st %[reg1],0(%[psw_pgm])\n" + " st %[reg2],4(%[psw_pgm])\n" + " larl %[reg1],1f\n" + " stg %[reg1],8(%[psw_pgm])\n" + " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n" + " la %[rc],0\n" + "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" + : [reg1] "=&d" (reg1), + [reg2] "=&a" (reg2), + [rc] "+&d" (rc), + [tmp] "=&d" (tmp), + "+Q" (S390_lowcore.program_new_psw), + "=Q" (old) + : [psw_old] "a" (&old), + [psw_pgm] "a" (&S390_lowcore.program_new_psw), + [cmd] "i" (ESSA_GET_STATE) + : "cc", "memory"); + return rc; +} + +static void cmma_init(void) +{ + if (!cmma_flag) + return; + if (cmma_test_essa()) { + cmma_flag = 0; + return; + } + if (test_facility(147)) + cmma_flag = 2; +} + static void setup_lpp(void) { S390_lowcore.current_pid = 0; @@ -54,16 +115,20 @@ unsigned long mem_safe_offset(void) } #endif -static void rescue_initrd(unsigned long addr) +static void rescue_initrd(unsigned long min, unsigned long max) { + unsigned long old_addr, addr, size; + if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) return; - if (!initrd_data.start || !initrd_data.size) + if (!get_physmem_reserved(RR_INITRD, &addr, &size)) return; - if (addr <= initrd_data.start) + if (addr >= min && addr + size <= max) return; - memmove((void *)addr, (void *)initrd_data.start, initrd_data.size); - initrd_data.start = addr; + old_addr = addr; + physmem_free(RR_INITRD); + addr = physmem_alloc_top_down(RR_INITRD, size, 0); + memmove((void *)addr, (void *)old_addr, size); } static void copy_bootdata(void) @@ -76,7 +141,8 @@ static void copy_bootdata(void) memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size); } -static void handle_relocs(unsigned long offset) +#ifdef CONFIG_PIE_BUILD +static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, unsigned long offset) { Elf64_Rela *rela_start, *rela_end, *rela; int r_type, r_sym, rc; @@ -107,6 +173,54 @@ static void handle_relocs(unsigned long offset) } } +static void kaslr_adjust_got(unsigned long offset) {} +static void rescue_relocs(void) {} +static void free_relocs(void) {} +#else +static int *vmlinux_relocs_64_start; +static int *vmlinux_relocs_64_end; + +static void rescue_relocs(void) +{ + unsigned long size = __vmlinux_relocs_64_end - __vmlinux_relocs_64_start; + + vmlinux_relocs_64_start = (void *)physmem_alloc_top_down(RR_RELOC, size, 0); + vmlinux_relocs_64_end = (void *)vmlinux_relocs_64_start + size; + memmove(vmlinux_relocs_64_start, __vmlinux_relocs_64_start, size); +} + +static void free_relocs(void) +{ + physmem_free(RR_RELOC); +} + +static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, unsigned long offset) +{ + int *reloc; + long loc; + + /* Adjust R_390_64 relocations */ + for (reloc = vmlinux_relocs_64_start; reloc < vmlinux_relocs_64_end; reloc++) { + loc = (long)*reloc + offset; + if (loc < min_addr || loc > max_addr) + error("64-bit relocation outside of kernel!\n"); + *(u64 *)loc += offset; + } +} + +static void kaslr_adjust_got(unsigned long offset) +{ + u64 *entry; + + /* + * Even without -fPIE, Clang still uses a global offset table for some + * reason. Adjust the GOT entries. + */ + for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) + *entry += offset; +} +#endif + /* * Merge information from several sources into a single ident_map_size value. * "ident_map_size" represents the upper limit of physical memory we may ever @@ -117,7 +231,7 @@ static void handle_relocs(unsigned long offset) * * Consider the following factors: * 1. max_physmem_end - end of physical memory online or standby. - * Always <= end of the last online memory block (get_mem_detect_end()). + * Always >= end of the last online memory range (get_physmem_online_end()). * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the * kernel is able to support. * 3. "mem=" kernel command line option which limits physical memory usage. @@ -137,53 +251,61 @@ static void setup_ident_map_size(unsigned long max_physmem_end) #ifdef CONFIG_CRASH_DUMP if (oldmem_data.start) { - kaslr_enabled = 0; + __kaslr_enabled = 0; ident_map_size = min(ident_map_size, oldmem_data.size); } else if (ipl_block_valid && is_ipl_block_dump()) { - kaslr_enabled = 0; + __kaslr_enabled = 0; if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) ident_map_size = min(ident_map_size, hsa_size); } #endif } -static void setup_kernel_memory_layout(void) +static unsigned long setup_kernel_memory_layout(void) { unsigned long vmemmap_start; + unsigned long asce_limit; unsigned long rte_size; unsigned long pages; + unsigned long vsize; + unsigned long vmax; pages = ident_map_size / PAGE_SIZE; /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); /* choose kernel address space layout: 4 or 3 levels. */ - vmemmap_start = round_up(ident_map_size, _REGION3_SIZE); - if (IS_ENABLED(CONFIG_KASAN) || - vmalloc_size > _REGION2_SIZE || - vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN > - _REGION2_SIZE) { - MODULES_END = _REGION1_SIZE; + vsize = round_up(ident_map_size, _REGION3_SIZE) + vmemmap_size + + MODULES_LEN + MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE; + vsize = size_add(vsize, vmalloc_size); + if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) { + asce_limit = _REGION1_SIZE; rte_size = _REGION2_SIZE; } else { - MODULES_END = _REGION2_SIZE; + asce_limit = _REGION2_SIZE; rte_size = _REGION3_SIZE; } + /* - * forcing modules and vmalloc area under the ultravisor + * Forcing modules and vmalloc area under the ultravisor * secure storage limit, so that any vmalloc allocation * we do could be used to back secure guest storage. */ - adjust_to_uv_max(&MODULES_END); + vmax = adjust_to_uv_max(asce_limit); #ifdef CONFIG_KASAN /* force vmalloc and modules below kasan shadow */ - MODULES_END = min(MODULES_END, KASAN_SHADOW_START); + vmax = min(vmax, KASAN_SHADOW_START); #endif + __memcpy_real_area = round_down(vmax - MEMCPY_REAL_SIZE, PAGE_SIZE); + __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE, + sizeof(struct lowcore)); + MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE); MODULES_VADDR = MODULES_END - MODULES_LEN; VMALLOC_END = MODULES_VADDR; /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ - vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE)); + vsize = round_down(VMALLOC_END / 2, _SEGMENT_SIZE); + vmalloc_size = min(vmalloc_size, vsize); VMALLOC_START = VMALLOC_END - vmalloc_size; /* split remaining virtual space between 1:1 mapping & vmemmap array */ @@ -191,22 +313,25 @@ static void setup_kernel_memory_layout(void) pages = SECTION_ALIGN_UP(pages); /* keep vmemmap_start aligned to a top level region table entry */ vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size); - /* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */ vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS); + /* maximum mappable address as seen by arch_get_mappable_range() */ + max_mappable = vmemmap_start; /* make sure identity map doesn't overlay with vmemmap */ ident_map_size = min(ident_map_size, vmemmap_start); vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); /* make sure vmemmap doesn't overlay with vmalloc area */ VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START); vmemmap = (struct page *)vmemmap_start; + + return asce_limit; } /* * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's. */ -static void clear_bss_section(void) +static void clear_bss_section(unsigned long vmlinux_lma) { - memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size); + memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size); } /* @@ -223,78 +348,133 @@ static void setup_vmalloc_size(void) vmalloc_size = max(size, vmalloc_size); } -static void offset_vmlinux_info(unsigned long offset) +static void kaslr_adjust_vmlinux_info(unsigned long offset) { - vmlinux.default_lma += offset; *(unsigned long *)(&vmlinux.entry) += offset; vmlinux.bootdata_off += offset; vmlinux.bootdata_preserved_off += offset; +#ifdef CONFIG_PIE_BUILD vmlinux.rela_dyn_start += offset; vmlinux.rela_dyn_end += offset; vmlinux.dynsym_start += offset; -} - -static unsigned long reserve_amode31(unsigned long safe_addr) -{ - __amode31_base = PAGE_ALIGN(safe_addr); - return safe_addr + vmlinux.amode31_size; +#else + vmlinux.got_start += offset; + vmlinux.got_end += offset; +#endif + vmlinux.init_mm_off += offset; + vmlinux.swapper_pg_dir_off += offset; + vmlinux.invalid_pg_dir_off += offset; +#ifdef CONFIG_KASAN + vmlinux.kasan_early_shadow_page_off += offset; + vmlinux.kasan_early_shadow_pte_off += offset; + vmlinux.kasan_early_shadow_pmd_off += offset; + vmlinux.kasan_early_shadow_pud_off += offset; + vmlinux.kasan_early_shadow_p4d_off += offset; +#endif } void startup_kernel(void) { - unsigned long random_lma; + unsigned long max_physmem_end; + unsigned long vmlinux_lma = 0; + unsigned long amode31_lma = 0; + unsigned long asce_limit; unsigned long safe_addr; void *img; + psw_t psw; - initrd_data.start = parmarea.initrd_start; - initrd_data.size = parmarea.initrd_size; + setup_lpp(); + safe_addr = mem_safe_offset(); + + /* + * Reserve decompressor memory together with decompression heap, buffer and + * memory which might be occupied by uncompressed kernel at default 1Mb + * position (if KASLR is off or failed). + */ + physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr); + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size) + physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size); oldmem_data.start = parmarea.oldmem_base; oldmem_data.size = parmarea.oldmem_size; - setup_lpp(); store_ipl_parmblock(); - safe_addr = mem_safe_offset(); - safe_addr = reserve_amode31(safe_addr); - safe_addr = read_ipl_report(safe_addr); + read_ipl_report(); uv_query_info(); - rescue_initrd(safe_addr); sclp_early_read_info(); setup_boot_command_line(); parse_boot_command_line(); + detect_facilities(); + cmma_init(); sanitize_prot_virt_host(); - setup_ident_map_size(detect_memory()); + max_physmem_end = detect_max_physmem_end(); + setup_ident_map_size(max_physmem_end); setup_vmalloc_size(); - setup_kernel_memory_layout(); - - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) { - random_lma = get_random_base(safe_addr); - if (random_lma) { - __kaslr_offset = random_lma - vmlinux.default_lma; - img = (void *)vmlinux.default_lma; - offset_vmlinux_info(__kaslr_offset); + asce_limit = setup_kernel_memory_layout(); + /* got final ident_map_size, physmem allocations could be performed now */ + physmem_set_usable_limit(ident_map_size); + detect_physmem_online_ranges(max_physmem_end); + save_ipl_cert_comp_list(); + rescue_initrd(safe_addr, ident_map_size); + rescue_relocs(); + + if (kaslr_enabled()) { + vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size, + THREAD_SIZE, vmlinux.default_lma, + ident_map_size); + if (vmlinux_lma) { + __kaslr_offset = vmlinux_lma - vmlinux.default_lma; + kaslr_adjust_vmlinux_info(__kaslr_offset); } } + vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma; + physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size); if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) { img = decompress_kernel(); - memmove((void *)vmlinux.default_lma, img, vmlinux.image_size); - } else if (__kaslr_offset) - memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size); + memmove((void *)vmlinux_lma, img, vmlinux.image_size); + } else if (__kaslr_offset) { + img = (void *)vmlinux.default_lma; + memmove((void *)vmlinux_lma, img, vmlinux.image_size); + memset(img, 0, vmlinux.image_size); + } + + /* vmlinux decompression is done, shrink reserved low memory */ + physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end); + if (kaslr_enabled()) + amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G); + amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size; + physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size); - clear_bss_section(); + /* + * The order of the following operations is important: + * + * - kaslr_adjust_relocs() must follow clear_bss_section() to establish + * static memory references to data in .bss to be used by setup_vmem() + * (i.e init_mm.pgd) + * + * - setup_vmem() must follow kaslr_adjust_relocs() to be able using + * static memory references to data in .bss (i.e init_mm.pgd) + * + * - copy_bootdata() must follow setup_vmem() to propagate changes + * to bootdata made by setup_vmem() + */ + clear_bss_section(vmlinux_lma); + kaslr_adjust_relocs(vmlinux_lma, vmlinux_lma + vmlinux.image_size, __kaslr_offset); + kaslr_adjust_got(__kaslr_offset); + free_relocs(); + setup_vmem(asce_limit); copy_bootdata(); - if (IS_ENABLED(CONFIG_RELOCATABLE)) - handle_relocs(__kaslr_offset); - - if (__kaslr_offset) { - /* - * Save KASLR offset for early dumps, before vmcore_info is set. - * Mark as uneven to distinguish from real vmcore_info pointer. - */ - S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL; - /* Clear non-relocated kernel */ - if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) - memset(img, 0, vmlinux.image_size); - } - vmlinux.entry(); + + /* + * Save KASLR offset for early dumps, before vmcore_info is set. + * Mark as uneven to distinguish from real vmcore_info pointer. + */ + S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0; + + /* + * Jump to the decompressed kernel entry point and switch DAT mode on. + */ + psw.addr = vmlinux.entry; + psw.mask = PSW_KERNEL_BITS; + __load_psw(psw); } |