From dd71a17b1193dd4a4c35ecd0ba227aac3d110836 Mon Sep 17 00:00:00 2001 From: Bryan O'Donoghue Date: Tue, 23 Feb 2016 01:29:58 +0000 Subject: x86/platform/intel/quark: Change the kernel's IMR lock bit to false Currently when setting up an IMR around the kernel's .text section we lock that IMR, preventing further modification. While superficially this appears to be the right thing to do, in fact this doesn't account for a legitimate change in the memory map such as when executing a new kernel via kexec. In such a scenario a second kernel can have a different size and location to it's predecessor and can view some of the memory occupied by it's predecessor as legitimately usable DMA RAM. If this RAM were then subsequently allocated to DMA agents within the system it could conceivably trigger an IMR violation. This patch fixes the this potential situation by keeping the kernel's .text section IMR lock bit false by default. Suggested-by: Ingo Molnar Reported-by: Andy Shevchenko Signed-off-by: Bryan O'Donoghue Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: boon.leong.ong@intel.com Cc: paul.gortmaker@windriver.com Link: http://lkml.kernel.org/r/1456190999-12685-2-git-send-email-pure.logic@nexus-software.ie Signed-off-by: Ingo Molnar --- arch/x86/platform/intel-quark/imr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c index c61b6c332e97..bfadcd0f4944 100644 --- a/arch/x86/platform/intel-quark/imr.c +++ b/arch/x86/platform/intel-quark/imr.c @@ -592,14 +592,14 @@ static void __init imr_fixup_memmap(struct imr_device *idev) end = (unsigned long)__end_rodata - 1; /* - * Setup a locked IMR around the physical extent of the kernel + * Setup an unlocked IMR around the physical extent of the kernel * from the beginning of the .text secton to the end of the * .rodata section as one physically contiguous block. * * We don't round up @size since it is already PAGE_SIZE aligned. * See vmlinux.lds.S for details. */ - ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); if (ret < 0) { pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", size / 1024, start, end); -- cgit v1.2.3-59-g8ed1b From 04d1d281dcfe683a53cddfab8371fc8bb302b069 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Tue, 23 Feb 2016 13:19:29 -0800 Subject: x86/entry/32: Add an ASM_CLAC to entry_SYSENTER_32 Both before and after 5f310f739b4c ("x86/entry/32: Re-implement SYSENTER using the new C path"), we relied on a uaccess very early in the SYSENTER path to clear AC. After that change, though, we can potentially make it all the way into C code with AC set, which enlarges the attack surface for SMAP bypass by doing SYSENTER with AC set. Strengthen the SMAP protection by addding the missing ASM_CLAC right at the beginning. Signed-off-by: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/3e36be110724896e32a4a1fe73bacb349d3cba94.1456262295.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/entry/entry_32.S | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 77d8c5112900..bb3e376d0f33 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -294,6 +294,7 @@ sysenter_past_esp: pushl $__USER_DS /* pt_regs->ss */ pushl %ebp /* pt_regs->sp (stashed in bp) */ pushfl /* pt_regs->flags (except IF = 0) */ + ASM_CLAC /* Clear AC after saving FLAGS */ orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ pushl $__USER_CS /* pt_regs->cs */ pushl $0 /* pt_regs->ip = 0 (placeholder) */ -- cgit v1.2.3-59-g8ed1b From 3d44d51bd339766f0178f0cf2e8d048b4a4872aa Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 24 Feb 2016 12:18:49 -0800 Subject: x86/entry/compat: Add missing CLAC to entry_INT80_32 This doesn't seem to fix a regression -- I don't think the CLAC was ever there. I double-checked in a debugger: entries through the int80 gate do not automatically clear AC. Stable maintainers: I can provide a backport to 4.3 and earlier if needed. This needs to be backported all the way to 3.10. Reported-by: Brian Gerst Signed-off-by: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: # v3.10 and later Fixes: 63bcff2a307b ("x86, smap: Add STAC and CLAC instructions to control user space access") Link: http://lkml.kernel.org/r/b02b7e71ae54074be01fc171cbd4b72517055c0e.1456345086.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/entry/entry_64_compat.S | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index ff1c6d61f332..3c990eeee40b 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -261,6 +261,7 @@ ENTRY(entry_INT80_compat) * Interrupts are off on entry. */ PARAVIRT_ADJUST_EXCEPTION_FRAME + ASM_CLAC /* Do this early to minimize exposure */ SWAPGS /* -- cgit v1.2.3-59-g8ed1b From bf70e5513dfea29c3682e7eb3dbb45f0723bac09 Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Thu, 25 Feb 2016 01:58:12 -0800 Subject: x86/mm: Fix slow_virt_to_phys() for X86_PAE again "d1cd12108346: x86, pageattr: Prevent overflow in slow_virt_to_phys() for X86_PAE" was unintentionally removed by the recent "34437e67a672: x86/mm: Fix slow_virt_to_phys() to handle large PAT bit". And, the variable 'phys_addr' was defined as "unsigned long" by mistake -- it should be "phys_addr_t". As a result, Hyper-V network driver in 32-PAE Linux guest can't work again. Fixes: commit 34437e67a672: "x86/mm: Fix slow_virt_to_phys() to handle large PAT bit" Signed-off-by: Dexuan Cui Reviewed-by: Toshi Kani Cc: olaf@aepfle.de Cc: gregkh@linuxfoundation.org Cc: jasowang@redhat.com Cc: driverdev-devel@linuxdriverproject.org Cc: linux-mm@kvack.org Cc: apw@canonical.com Cc: Andrew Morton Cc: K. Y. Srinivasan Cc: Haiyang Zhang Link: http://lkml.kernel.org/r/1456394292-9030-1-git-send-email-decui@microsoft.com Signed-off-by: Thomas Gleixner --- arch/x86/mm/pageattr.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 2440814b0069..9cf96d82147a 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -419,24 +419,30 @@ pmd_t *lookup_pmd_address(unsigned long address) phys_addr_t slow_virt_to_phys(void *__virt_addr) { unsigned long virt_addr = (unsigned long)__virt_addr; - unsigned long phys_addr, offset; + phys_addr_t phys_addr; + unsigned long offset; enum pg_level level; pte_t *pte; pte = lookup_address(virt_addr, &level); BUG_ON(!pte); + /* + * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t + * before being left-shifted PAGE_SHIFT bits -- this trick is to + * make 32-PAE kernel work correctly. + */ switch (level) { case PG_LEVEL_1G: - phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; + phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; offset = virt_addr & ~PUD_PAGE_MASK; break; case PG_LEVEL_2M: - phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; + phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; offset = virt_addr & ~PMD_PAGE_MASK; break; default: - phys_addr = pte_pfn(*pte) << PAGE_SHIFT; + phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; offset = virt_addr & ~PAGE_MASK; } -- cgit v1.2.3-59-g8ed1b From 9bf148cb0812595bfdf5100bd2c07e9bec9c6ef5 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 26 Feb 2016 18:55:31 +0000 Subject: x86/mpx: Fix off-by-one comparison with nr_registers In the unlikely event that regno == nr_registers then we get an array overrun on regoff because the invalid register check is currently off-by-one. Fix this with a check that regno is >= nr_registers instead. Detected with static analysis using CoverityScan. Fixes: fcc7ffd67991 "x86, mpx: Decode MPX instruction to get bound violation information" Signed-off-by: Colin Ian King Acked-by: Dave Hansen Cc: Borislav Petkov Cc: "Kirill A . Shutemov" Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1456512931-3388-1-git-send-email-colin.king@canonical.com Signed-off-by: Thomas Gleixner --- arch/x86/mm/mpx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index b2fd67da1701..ef05755a1900 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, break; } - if (regno > nr_registers) { + if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } -- cgit v1.2.3-59-g8ed1b