From d2479a30499d93377d3ab10b7822bc5f10ed7f22 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 20 Apr 2018 15:20:19 -0700 Subject: x86/pti: Fix boot problems from Global-bit setting commit 16dce603adc9de4237b7bf2ff5c5290f34373e7b Part of the global bit _setting_ patches also includes clearing the Global bit when it should not be enabled. That is done with set_memory_nonglobal(), which uses change_page_attr_clear() in pageattr.c under the covers. The TLB flushing code inside pageattr.c has has checks like BUG_ON(irqs_disabled()), looking for interrupt disabling that might cause deadlocks. But, these also trip in early boot on certain preempt configurations. Just copy the existing BUG_ON() sequence from cpa_flush_range() to the other two sites and check for early boot. Fixes: 39114b7a7 (x86/pti: Never implicitly clear _PAGE_GLOBAL for kernel image) Reported-by: Mariusz Ceier Reported-by: Aaro Koskinen Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Tested-by: Aaro Koskinen Acked-by: Ingo Molnar Cc: Andrea Arcangeli Cc: Juergen Gross Cc: Nadav Amit Cc: Kees Cook Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: David Woodhouse Cc: Hugh Dickins Cc: linux-mm@kvack.org Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Greg Kroah-Hartman Cc: Borislav Petkov Cc: Dan Williams Cc: Arjan van de Ven Link: https://lkml.kernel.org/r/20180420222019.20C4A410@viggo.jf.intel.com --- arch/x86/mm/pageattr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 0f3d50f4c48c..4fadfd2b7017 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -172,7 +172,7 @@ static void __cpa_flush_all(void *arg) static void cpa_flush_all(unsigned long cache) { - BUG_ON(irqs_disabled()); + BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); on_each_cpu(__cpa_flush_all, (void *) cache, 1); } @@ -236,7 +236,7 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache, unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ #endif - BUG_ON(irqs_disabled()); + BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); -- cgit v1.2.3-59-g8ed1b From 58e65b51e6f9b9dd94a25ff2b2772222e0358099 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 20 Apr 2018 15:20:21 -0700 Subject: x86/pti: Fix boot warning from Global-bit setting commit 231df823c4f04176f607afc4576c989895cff40e The pageattr.c code attempts to process "faults" when it goes looking for PTEs to change and finds non-present entries. It allows these faults in the linear map which is "expected to have holes", but WARN()s about them elsewhere, like when called on the kernel image. However, change_page_attr_clear() is now called on the kernel image in the process of trying to clear the Global bit. This trips the warning in __cpa_process_fault() if a non-present PTE is encountered in the kernel image. The "holes" in the kernel image result from free_init_pages()'s use of set_memory_np(). These holes are totally fine, and result from normal operation, just as they would be in the kernel linear map. Just silence the warning when holes in the kernel image are encountered. Fixes: 39114b7a7 (x86/pti: Never implicitly clear _PAGE_GLOBAL for kernel image) Reported-by: Mariusz Ceier Reported-by: Aaro Koskinen Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Tested-by: Aaro Koskinen Acked-by: Ingo Molnar Cc: Andrea Arcangeli Cc: Juergen Gross Cc: Nadav Amit Cc: Kees Cook Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: David Woodhouse Cc: Hugh Dickins Cc: linux-mm@kvack.org Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Greg Kroah-Hartman Cc: Borislav Petkov Cc: Dan Williams Cc: Arjan van de Ven Link: https://lkml.kernel.org/r/20180420222021.1C7D2B3F@viggo.jf.intel.com --- arch/x86/mm/pageattr.c | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4fadfd2b7017..3bded76e8d5c 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -93,6 +93,18 @@ void arch_report_meminfo(struct seq_file *m) static inline void split_page_count(int level) { } #endif +static inline int +within(unsigned long addr, unsigned long start, unsigned long end) +{ + return addr >= start && addr < end; +} + +static inline int +within_inclusive(unsigned long addr, unsigned long start, unsigned long end) +{ + return addr >= start && addr <= end; +} + #ifdef CONFIG_X86_64 static inline unsigned long highmap_start_pfn(void) @@ -106,20 +118,25 @@ static inline unsigned long highmap_end_pfn(void) return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; } -#endif - -static inline int -within(unsigned long addr, unsigned long start, unsigned long end) +static bool __cpa_pfn_in_highmap(unsigned long pfn) { - return addr >= start && addr < end; + /* + * Kernel text has an alias mapping at a high address, known + * here as "highmap". + */ + return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn()); } -static inline int -within_inclusive(unsigned long addr, unsigned long start, unsigned long end) +#else + +static bool __cpa_pfn_in_highmap(unsigned long pfn) { - return addr >= start && addr <= end; + /* There is no highmap on 32-bit */ + return false; } +#endif + /* * Flushing functions */ @@ -1183,6 +1200,10 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, cpa->numpages = 1; cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; return 0; + + } else if (__cpa_pfn_in_highmap(cpa->pfn)) { + /* Faults in the highmap are OK, so do not warn: */ + return -EFAULT; } else { WARN(1, KERN_WARNING "CPA: called for zero pte. " "vaddr = %lx cpa->vaddr = %lx\n", vaddr, @@ -1335,8 +1356,7 @@ static int cpa_process_alias(struct cpa_data *cpa) * to touch the high mapped kernel as well: */ if (!within(vaddr, (unsigned long)_text, _brk_end) && - within_inclusive(cpa->pfn, highmap_start_pfn(), - highmap_end_pfn())) { + __cpa_pfn_in_highmap(cpa->pfn)) { unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; alias_cpa = *cpa; -- cgit v1.2.3-59-g8ed1b From a44ca8f5a30c008b54d07b00eed4eae7f169fcd0 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 20 Apr 2018 15:20:23 -0700 Subject: x86/pti: Reduce amount of kernel text allowed to be Global commit abb67605203687c8b7943d760638d0301787f8d9 Kees reported to me that I made too much of the kernel image global. It was far more than just text: I think this is too much set global: _end is after data, bss, and brk, and all kinds of other stuff that could hold secrets. I think this should match what mark_rodata_ro() is doing. This does exactly that. We use __end_rodata_hpage_align as our marker both because it is huge-page-aligned and it does not contain any sections we expect to hold secrets. Kees's logic was that r/o data is in the kernel image anyway and, in the case of traditional distributions, can be freely downloaded from the web, so there's no reason to hide it. Fixes: 8c06c7740 (x86/pti: Leave kernel text global for !PCID) Reported-by: Kees Cook Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Andrea Arcangeli Cc: Juergen Gross Cc: Josh Poimboeuf Cc: Greg Kroah-Hartman Cc: Peter Zijlstra Cc: David Woodhouse Cc: Hugh Dickins Cc: linux-mm@kvack.org Cc: Linus Torvalds Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Nadav Amit Cc: Dan Williams Cc: Arjan van de Ven Link: https://lkml.kernel.org/r/20180420222023.1C8B2B20@viggo.jf.intel.com --- arch/x86/mm/pti.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index f1fd52f449e0..ae3eb4f5d53b 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -430,12 +430,24 @@ static inline bool pti_kernel_image_global_ok(void) */ void pti_clone_kernel_text(void) { + /* + * rodata is part of the kernel image and is normally + * readable on the filesystem or on the web. But, do not + * clone the areas past rodata, they might contain secrets. + */ unsigned long start = PFN_ALIGN(_text); - unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE); + unsigned long end = (unsigned long)__end_rodata_hpage_align; if (!pti_kernel_image_global_ok()) return; + pr_debug("mapping partial kernel image into user address space\n"); + + /* + * Note that this will undo _some_ of the work that + * pti_set_kernel_image_nonglobal() did to clear the + * global bit. + */ pti_clone_pmds(start, end, _PAGE_RW); } @@ -458,8 +470,6 @@ void pti_set_kernel_image_nonglobal(void) if (pti_kernel_image_global_ok()) return; - pr_debug("set kernel image non-global\n"); - set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT); } -- cgit v1.2.3-59-g8ed1b From b7c21bc56fbedf4a61b628c6b11e0d7048746cc1 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 20 Apr 2018 15:20:26 -0700 Subject: x86/pti: Disallow global kernel text with RANDSTRUCT commit 26d35ca6c3776784f8156e1d6f80cc60d9a2a915 RANDSTRUCT derives its hardening benefits from the attacker's lack of knowledge about the layout of kernel data structures. Keep the kernel image non-global in cases where RANDSTRUCT is in use to help keep the layout a secret. Fixes: 8c06c7740 (x86/pti: Leave kernel text global for !PCID) Reported-by: Kees Cook Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Andrea Arcangeli Cc: Juergen Gross Cc: Josh Poimboeuf Cc: Greg Kroah-Hartman Cc: Peter Zijlstra Cc: David Woodhouse Cc: Hugh Dickins Cc: linux-mm@kvack.org Cc: Linus Torvalds Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Nadav Amit Cc: Dan Williams Cc: Arjan van de Ven Cc: Vlastimil Babka Link: https://lkml.kernel.org/r/20180420222026.D0B4AAC9@viggo.jf.intel.com --- arch/x86/mm/pti.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index ae3eb4f5d53b..4d418e705878 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -421,6 +421,16 @@ static inline bool pti_kernel_image_global_ok(void) if (boot_cpu_has(X86_FEATURE_K8)) return false; + /* + * RANDSTRUCT derives its hardening benefits from the + * attacker's lack of knowledge about the layout of kernel + * data structures. Keep the kernel image non-global in + * cases where RANDSTRUCT is in use to help keep the layout a + * secret. + */ + if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT)) + return false; + return true; } -- cgit v1.2.3-59-g8ed1b From 316d097c4cd4e7f2ef50c40cff2db266593c4ec4 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 20 Apr 2018 15:20:28 -0700 Subject: x86/pti: Filter at vma->vm_page_prot population commit ce9962bf7e22bb3891655c349faff618922d4a73 0day reported warnings at boot on 32-bit systems without NX support: attempted to set unsupported pgprot: 8000000000000025 bits: 8000000000000000 supported: 7fffffffffffffff WARNING: CPU: 0 PID: 1 at arch/x86/include/asm/pgtable.h:540 handle_mm_fault+0xfc1/0xfe0: check_pgprot at arch/x86/include/asm/pgtable.h:535 (inlined by) pfn_pte at arch/x86/include/asm/pgtable.h:549 (inlined by) do_anonymous_page at mm/memory.c:3169 (inlined by) handle_pte_fault at mm/memory.c:3961 (inlined by) __handle_mm_fault at mm/memory.c:4087 (inlined by) handle_mm_fault at mm/memory.c:4124 The problem is that due to the recent commit which removed auto-massaging of page protections, filtering page permissions at PTE creation time is not longer done, so vma->vm_page_prot is passed unfiltered to PTE creation. Filter the page protections before they are installed in vma->vm_page_prot. Fixes: fb43d6cb91 ("x86/mm: Do not auto-massage page protections") Reported-by: Fengguang Wu Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Andrea Arcangeli Cc: Juergen Gross Cc: Kees Cook Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: David Woodhouse Cc: Hugh Dickins Cc: linux-mm@kvack.org Cc: Linus Torvalds Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Greg Kroah-Hartman Cc: Nadav Amit Cc: Dan Williams Cc: Arjan van de Ven Link: https://lkml.kernel.org/r/20180420222028.99D72858@viggo.jf.intel.com --- arch/x86/Kconfig | 4 ++++ arch/x86/include/asm/pgtable.h | 5 +++++ mm/mmap.c | 11 ++++++++++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 00fcf81f2c56..c07f492b871a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -52,6 +52,7 @@ config X86 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER + select ARCH_HAS_FILTER_PGPROT select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV if X86_64 @@ -273,6 +274,9 @@ config ARCH_HAS_CPU_RELAX config ARCH_HAS_CACHE_LINE_SIZE def_bool y +config ARCH_HAS_FILTER_PGPROT + def_bool y + config HAVE_SETUP_PER_CPU_AREA def_bool y diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5f49b4ff0c24..f1633de5a675 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -601,6 +601,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) #define canon_pgprot(p) __pgprot(massage_pgprot(p)) +static inline pgprot_t arch_filter_pgprot(pgprot_t prot) +{ + return canon_pgprot(prot); +} + static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, enum page_cache_mode pcm, enum page_cache_mode new_pcm) diff --git a/mm/mmap.c b/mm/mmap.c index 188f195883b9..9d5968d1e8e3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -100,11 +100,20 @@ pgprot_t protection_map[16] __ro_after_init = { __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; +#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT +static inline pgprot_t arch_filter_pgprot(pgprot_t prot) +{ + return prot; +} +#endif + pgprot_t vm_get_page_prot(unsigned long vm_flags) { - return __pgprot(pgprot_val(protection_map[vm_flags & + pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | pgprot_val(arch_vm_get_page_prot(vm_flags))); + + return arch_filter_pgprot(ret); } EXPORT_SYMBOL(vm_get_page_prot); -- cgit v1.2.3-59-g8ed1b From 8bb2610bc4967f19672444a7b0407367f1540028 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Tue, 17 Apr 2018 07:36:36 -0700 Subject: x86/entry/64/compat: Preserve r8-r11 in int $0x80 32-bit user code that uses int $80 doesn't care about r8-r11. There is, however, some 64-bit user code that intentionally uses int $0x80 to invoke 32-bit system calls. From what I've seen, basically all such code assumes that r8-r15 are all preserved, but the kernel clobbers r8-r11. Since I doubt that there's any code that depends on int $0x80 zeroing r8-r11, change the kernel to preserve them. I suspect that very little user code is broken by the old clobber, since r8-r11 are only rarely allocated by gcc, and they're clobbered by function calls, so they only way we'd see a problem is if the same function that invokes int $0x80 also spills something important to one of these registers. The current behavior seems to date back to the historical commit "[PATCH] x86-64 merge for 2.6.4". Before that, all regs were preserved. I can't find any explanation of why this change was made. Update the test_syscall_vdso_32 testcase as well to verify the new behavior, and it strengthens the test to make sure that the kernel doesn't accidentally permute r8..r15. Suggested-by: Denys Vlasenko Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Dominik Brodowski Link: https://lkml.kernel.org/r/d4c4d9985fbe64f8c9e19291886453914b48caee.1523975710.git.luto@kernel.org --- arch/x86/entry/entry_64_compat.S | 8 +++--- tools/testing/selftests/x86/test_syscall_vdso.c | 35 +++++++++++++++---------- 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 9af927e59d49..9de7f1e1dede 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat) pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ - pushq $0 /* pt_regs->r8 = 0 */ + pushq %r8 /* pt_regs->r8 */ xorl %r8d, %r8d /* nospec r8 */ - pushq $0 /* pt_regs->r9 = 0 */ + pushq %r9 /* pt_regs->r9 */ xorl %r9d, %r9d /* nospec r9 */ - pushq $0 /* pt_regs->r10 = 0 */ + pushq %r10 /* pt_regs->r10 */ xorl %r10d, %r10d /* nospec r10 */ - pushq $0 /* pt_regs->r11 = 0 */ + pushq %r11 /* pt_regs->r11 */ xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx */ diff --git a/tools/testing/selftests/x86/test_syscall_vdso.c b/tools/testing/selftests/x86/test_syscall_vdso.c index 40370354d4c1..c9c3281077bc 100644 --- a/tools/testing/selftests/x86/test_syscall_vdso.c +++ b/tools/testing/selftests/x86/test_syscall_vdso.c @@ -100,12 +100,19 @@ asm ( " shl $32, %r8\n" " orq $0x7f7f7f7f, %r8\n" " movq %r8, %r9\n" - " movq %r8, %r10\n" - " movq %r8, %r11\n" - " movq %r8, %r12\n" - " movq %r8, %r13\n" - " movq %r8, %r14\n" - " movq %r8, %r15\n" + " incq %r9\n" + " movq %r9, %r10\n" + " incq %r10\n" + " movq %r10, %r11\n" + " incq %r11\n" + " movq %r11, %r12\n" + " incq %r12\n" + " movq %r12, %r13\n" + " incq %r13\n" + " movq %r13, %r14\n" + " incq %r14\n" + " movq %r14, %r15\n" + " incq %r15\n" " ret\n" " .code32\n" " .popsection\n" @@ -128,12 +135,13 @@ int check_regs64(void) int err = 0; int num = 8; uint64_t *r64 = ®s64.r8; + uint64_t expected = 0x7f7f7f7f7f7f7f7fULL; if (!kernel_is_64bit) return 0; do { - if (*r64 == 0x7f7f7f7f7f7f7f7fULL) + if (*r64 == expected++) continue; /* register did not change */ if (syscall_addr != (long)&int80) { /* @@ -147,18 +155,17 @@ int check_regs64(void) continue; } } else { - /* INT80 syscall entrypoint can be used by + /* + * INT80 syscall entrypoint can be used by * 64-bit programs too, unlike SYSCALL/SYSENTER. * Therefore it must preserve R12+ * (they are callee-saved registers in 64-bit C ABI). * - * This was probably historically not intended, - * but R8..11 are clobbered (cleared to 0). - * IOW: they are the only registers which aren't - * preserved across INT80 syscall. + * Starting in Linux 4.17 (and any kernel that + * backports the change), R8..11 are preserved. + * Historically (and probably unintentionally), they + * were clobbered or zeroed. */ - if (*r64 == 0 && num <= 11) - continue; } printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64); err++; -- cgit v1.2.3-59-g8ed1b