From fb7183ef3c016d9067ff83f3ff2455be1818f902 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 31 Oct 2014 09:22:04 -0700 Subject: x86, boot: Document intermediates more clearly This adds a comment detailing the various intermediate files used to build the bootable decompression image for the x86 kernel. Signed-off-by: Kees Cook Reviewed-by: Josh Triplett Cc: Matt Fleming Cc: Ard Biesheuvel Cc: Junjie Mao Link: http://lkml.kernel.org/r/20141031162204.GA26268@www.outflux.net Signed-off-by: Thomas Gleixner --- arch/x86/boot/compressed/Makefile | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/x86/boot') diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 704f58aa79cd..8a3918180a9e 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -3,6 +3,18 @@ # # create a compressed vmlinux image from the original vmlinux # +# vmlinuz is: +# decompression code (*.o) +# asm globals (piggy.S), including: +# vmlinux.bin.(gz|bz2|lzma|...) +# +# vmlinux.bin is: +# vmlinux stripped of debugging and comments +# vmlinux.bin.all is: +# vmlinux.bin + vmlinux.relocs +# vmlinux.bin.(gz|bz2|lzma|...) is: +# (see scripts/Makefile.lib size_append) +# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 -- cgit v1.2.3-59-g8ed1b From 6d24c5f72dfb26e5fa7f02fa9266dfdbae41adba Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 4 Nov 2014 08:50:18 +0000 Subject: x86-64: Handle PC-relative relocations on per-CPU data This is in preparation of using RIP-relative addressing in many of the per-CPU accesses. Signed-off-by: Jan Beulich Link: http://lkml.kernel.org/r/5458A15A0200007800044A9A@mail.emea.novell.com Signed-off-by: Thomas Gleixner --- arch/x86/boot/compressed/misc.c | 14 +++++++++++++- arch/x86/tools/relocs.c | 36 +++++++++++++++++++++++++++--------- 2 files changed, 40 insertions(+), 10 deletions(-) (limited to 'arch/x86/boot') diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 57ab74df7eea..644abd767c12 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -260,7 +260,7 @@ static void handle_relocations(void *output, unsigned long output_len) /* * Process relocations: 32 bit relocations first then 64 bit after. - * Two sets of binary relocations are added to the end of the kernel + * Three sets of binary relocations are added to the end of the kernel * before compression. Each relocation table entry is the kernel * address of the location which needs to be updated stored as a * 32-bit value which is sign extended to 64 bits. @@ -270,6 +270,8 @@ static void handle_relocations(void *output, unsigned long output_len) * kernel bits... * 0 - zero terminator for 64 bit relocations * 64 bit relocation repeated + * 0 - zero terminator for inverse 32 bit relocations + * 32 bit inverse relocation repeated * 0 - zero terminator for 32 bit relocations * 32 bit relocation repeated * @@ -286,6 +288,16 @@ static void handle_relocations(void *output, unsigned long output_len) *(uint32_t *)ptr += delta; } #ifdef CONFIG_X86_64 + while (*--reloc) { + long extended = *reloc; + extended += map; + + ptr = (unsigned long)extended; + if (ptr < min_addr || ptr > max_addr) + error("inverse 32-bit relocation outside of kernel!\n"); + + *(int32_t *)ptr -= delta; + } for (reloc--; *reloc; reloc--) { long extended = *reloc; extended += map; diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index a5efb21d5228..0c2fae8d929d 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -20,7 +20,10 @@ struct relocs { static struct relocs relocs16; static struct relocs relocs32; +#if ELF_BITS == 64 +static struct relocs relocs32neg; static struct relocs relocs64; +#endif struct section { Elf_Shdr shdr; @@ -762,11 +765,16 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, switch (r_type) { case R_X86_64_NONE: + /* NONE can be ignored. */ + break; + case R_X86_64_PC32: /* - * NONE can be ignored and PC relative relocations don't - * need to be adjusted. + * PC relative relocations don't need to be adjusted unless + * referencing a percpu symbol. */ + if (is_percpu_sym(sym, symname)) + add_reloc(&relocs32neg, offset); break; case R_X86_64_32: @@ -986,7 +994,10 @@ static void emit_relocs(int as_text, int use_real_mode) /* Order the relocations for more efficient processing */ sort_relocs(&relocs16); sort_relocs(&relocs32); +#if ELF_BITS == 64 + sort_relocs(&relocs32neg); sort_relocs(&relocs64); +#endif /* Print the relocations */ if (as_text) { @@ -1007,14 +1018,21 @@ static void emit_relocs(int as_text, int use_real_mode) for (i = 0; i < relocs32.count; i++) write_reloc(relocs32.offset[i], stdout); } else { - if (ELF_BITS == 64) { - /* Print a stop */ - write_reloc(0, stdout); +#if ELF_BITS == 64 + /* Print a stop */ + write_reloc(0, stdout); - /* Now print each relocation */ - for (i = 0; i < relocs64.count; i++) - write_reloc(relocs64.offset[i], stdout); - } + /* Now print each relocation */ + for (i = 0; i < relocs64.count; i++) + write_reloc(relocs64.offset[i], stdout); + + /* Print a stop */ + write_reloc(0, stdout); + + /* Now print each inverse 32-bit relocation */ + for (i = 0; i < relocs32neg.count; i++) + write_reloc(relocs32neg.offset[i], stdout); +#endif /* Print a stop */ write_reloc(0, stdout); -- cgit v1.2.3-59-g8ed1b