diff options
author | 2019-08-07 18:53:28 +0000 | |
---|---|---|
committer | 2019-08-07 18:53:28 +0000 | |
commit | 5c3fa5a3cf0f4823ae1dce174ce77d5435e353bc (patch) | |
tree | 6a72cb8e836ea8768152bfadc33924bf4e383095 | |
parent | Add codepatch_jmp(), like codepath_call() but inserting a jmp instead of a call. (diff) | |
download | wireguard-openbsd-5c3fa5a3cf0f4823ae1dce174ce77d5435e353bc.tar.xz wireguard-openbsd-5c3fa5a3cf0f4823ae1dce174ce77d5435e353bc.zip |
Mitigate CVE-2019-1125: block speculation past conditional jump to mis-skip
or mis-take swapgs in interrupt path and in trap/fault/exception path. The
latter is improved to have no conditionals around this when Meltdown mitigation
is in effect. Codepatch out the fences based on the description of CPU bugs
in the (well written) Linux commit message.
feedback from kettenis@
ok deraadt@
-rw-r--r-- | sys/arch/amd64/amd64/cpu.c | 60 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/locore.S | 4 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/vector.S | 20 | ||||
-rw-r--r-- | sys/arch/amd64/include/codepatch.h | 22 | ||||
-rw-r--r-- | sys/arch/amd64/include/frameasm.h | 4 |
5 files changed, 93 insertions, 17 deletions
diff --git a/sys/arch/amd64/amd64/cpu.c b/sys/arch/amd64/amd64/cpu.c index 8d51216ba0b..aa1ca484652 100644 --- a/sys/arch/amd64/amd64/cpu.c +++ b/sys/arch/amd64/amd64/cpu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.c,v 1.137 2019/05/28 18:17:01 guenther Exp $ */ +/* $OpenBSD: cpu.c,v 1.138 2019/08/07 18:53:28 guenther Exp $ */ /* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */ /*- @@ -173,20 +173,66 @@ void replacemeltdown(void) { static int replacedone = 0; - int s; + struct cpu_info *ci = &cpu_info_primary; + int swapgs_vuln = 0, s; if (replacedone) return; replacedone = 1; + if (strcmp(cpu_vendor, "GenuineIntel") == 0) { + int family = ci->ci_family; + int model = ci->ci_model; + + swapgs_vuln = 1; + if (family == 0x6 && + (model == 0x37 || model == 0x4a || model == 0x4c || + model == 0x4d || model == 0x5a || model == 0x5d || + model == 0x6e || model == 0x65 || model == 0x75)) { + /* Silvermont, Airmont */ + swapgs_vuln = 0; + } else if (family == 0x6 && (model == 0x85 || model == 0x57)) { + /* KnightsLanding */ + swapgs_vuln = 0; + } + } + s = splhigh(); if (!cpu_meltdown) codepatch_nop(CPTAG_MELTDOWN_NOP); - else if (pmap_use_pcid) { - extern long _pcid_set_reuse; - DPRINTF("%s: codepatching PCID use", __func__); - codepatch_replace(CPTAG_PCID_SET_REUSE, &_pcid_set_reuse, - PCID_SET_REUSE_SIZE); + else { + extern long alltraps_kern_meltdown; + + /* eliminate conditional branch in alltraps */ + codepatch_jmp(CPTAG_MELTDOWN_ALLTRAPS, &alltraps_kern_meltdown); + + /* enable reuse of PCID for U-K page tables */ + if (pmap_use_pcid) { + extern long _pcid_set_reuse; + DPRINTF("%s: codepatching PCID use", __func__); + codepatch_replace(CPTAG_PCID_SET_REUSE, + &_pcid_set_reuse, PCID_SET_REUSE_SIZE); + } + } + + /* + * CVE-2019-1125: if the CPU has SMAP and it's not vulnerable to + * Meltdown, then it's protected both from speculatively mis-skipping + * the swapgs during interrupts of userspace and from speculatively + * mis-taking a swapgs during interrupts while already in the kernel + * as the speculative path will fault from SMAP. Warning: enabling + * WRGSBASE would break this 'protection'. + * + * Otherwise, if the CPU's swapgs can't be speculated over and it + * _is_ vulnerable to Meltdown then the %cr3 change will serialize + * user->kern transitions, but we still need to mitigate the + * already-in-kernel cases. + */ + if (!cpu_meltdown && (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMAP)) { + codepatch_nop(CPTAG_FENCE_SWAPGS_MIS_TAKEN); + codepatch_nop(CPTAG_FENCE_NO_SAFE_SMAP); + } else if (!swapgs_vuln && cpu_meltdown) { + codepatch_nop(CPTAG_FENCE_SWAPGS_MIS_TAKEN); } splx(s); } diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S index 01cac6f47e6..5f30c9c48aa 100644 --- a/sys/arch/amd64/amd64/locore.S +++ b/sys/arch/amd64/amd64/locore.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.S,v 1.118 2019/05/17 19:07:15 guenther Exp $ */ +/* $OpenBSD: locore.S,v 1.119 2019/08/07 18:53:28 guenther Exp $ */ /* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */ /* @@ -562,7 +562,7 @@ XUsyscall_meltdown: * (thank you, Intel), at which point we'll continue at the * "movq CPUVAR(KERN_RSP),%rax" after Xsyscall below. * In case the CPU speculates past the mov to cr3, we put a - * retpoline-style pause-jmp-to-pause loop. + * retpoline-style pause-lfence-jmp-to-pause loop. */ swapgs movq %rax,CPUVAR(SCRATCH) diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S index 4cbd92a28b4..d3c1524e06c 100644 --- a/sys/arch/amd64/amd64/vector.S +++ b/sys/arch/amd64/amd64/vector.S @@ -1,4 +1,4 @@ -/* $OpenBSD: vector.S,v 1.79 2019/01/20 00:53:08 mlarkin Exp $ */ +/* $OpenBSD: vector.S,v 1.80 2019/08/07 18:53:28 guenther Exp $ */ /* $NetBSD: vector.S,v 1.5 2004/06/28 09:13:11 fvdl Exp $ */ /* @@ -141,6 +141,7 @@ calltrap_specstk: # special stack path .text .globl INTRENTRY_LABEL(calltrap_specstk) INTRENTRY_LABEL(calltrap_specstk): + lfence # block speculation through jz above cld SMAP_CLAC movq %rsp,%rdi @@ -183,6 +184,7 @@ IDTVEC(trap03) .text .global INTRENTRY_LABEL(trap03) INTRENTRY_LABEL(trap03): + FENCE_NO_SAFE_SMAP INTR_ENTRY_KERN INTR_SAVE_MOST_GPRS_NO_ADJ sti @@ -313,7 +315,8 @@ IDTVEC(trap0d) .Lhandle_doreti: /* iretq faulted; resume in a stub that acts like we got a #GP */ leaq .Lhandle_doreti_resume(%rip),%rcx -1: movq %rcx,24(%rsp) /* over %r[cd]x and err to %rip */ +1: lfence /* block speculation through conditionals above */ + movq %rcx,24(%rsp) /* over %r[cd]x and err to %rip */ popq %rcx popq %rdx addq $8,%rsp /* pop the err code */ @@ -388,12 +391,13 @@ KUTEXT_PAGE_START * the kernel page tables (thank you, Intel) will make us * continue at the "movq CPUVAR(KERN_RSP),%rax" after alltraps * below. In case the CPU speculates past the mov to cr3, - * we put a retpoline-style pause-jmp-to-pause loop. + * we put a retpoline-style pause-lfence-jmp-to-pause loop. */ Xalltraps: swapgs movq %rax,CPUVAR(SCRATCH) movq CPUVAR(KERN_CR3),%rax + .byte 0x66, 0x90 /* space for FENCE_SWAPGS_MIS_TAKEN below */ movq %rax,%cr3 0: pause lfence @@ -403,9 +407,12 @@ KUTEXT_PAGE_END KTEXT_PAGE_START .align NBPG, 0xcc GENTRY(alltraps) + CODEPATCH_START testb $SEL_RPL,24(%rsp) je alltraps_kern swapgs + CODEPATCH_END(CPTAG_MELTDOWN_ALLTRAPS) + FENCE_SWAPGS_MIS_TAKEN movq %rax,CPUVAR(SCRATCH) .space (0b - Xalltraps) - (. - alltraps), 0x90 @@ -428,9 +435,15 @@ END(alltraps) /* * Traps from supervisor mode (kernel) + * If we're not mitigating Meltdown, then there's a conditional branch + * above and we may need a fence to mitigate CVE-2019-1225. If we're + * doing Meltdown mitigation there's just an unconditional branch and + * can skip the fence. */ _ALIGN_TRAPS GENTRY(alltraps_kern) + FENCE_NO_SAFE_SMAP +GENTRY(alltraps_kern_meltdown) INTR_ENTRY_KERN INTR_SAVE_MOST_GPRS_NO_ADJ sti @@ -467,6 +480,7 @@ spl_lowered: .popsection #endif /* DIAGNOSTIC */ END(alltraps_kern) +END(alltraps_kern_meltdown) KTEXT_PAGE_END diff --git a/sys/arch/amd64/include/codepatch.h b/sys/arch/amd64/include/codepatch.h index 144f2179953..35465a58933 100644 --- a/sys/arch/amd64/include/codepatch.h +++ b/sys/arch/amd64/include/codepatch.h @@ -1,4 +1,4 @@ -/* $OpenBSD: codepatch.h,v 1.10 2019/08/07 18:53:12 guenther Exp $ */ +/* $OpenBSD: codepatch.h,v 1.11 2019/08/07 18:53:28 guenther Exp $ */ /* * Copyright (c) 2014-2015 Stefan Fritsch <sf@sfritsch.de> * @@ -59,9 +59,12 @@ void codepatch_disable(void); #define CPTAG_XRSTOR 4 #define CPTAG_XSAVE 5 #define CPTAG_MELTDOWN_NOP 6 -#define CPTAG_PCID_SET_REUSE 7 -#define CPTAG_MDS 8 -#define CPTAG_MDS_VMM 9 +#define CPTAG_MELTDOWN_ALLTRAPS 7 +#define CPTAG_PCID_SET_REUSE 8 +#define CPTAG_MDS 9 +#define CPTAG_MDS_VMM 10 +#define CPTAG_FENCE_SWAPGS_MIS_TAKEN 11 +#define CPTAG_FENCE_NO_SAFE_SMAP 12 /* * As stac/clac SMAP instructions are 3 bytes, we want the fastest @@ -80,6 +83,17 @@ void codepatch_disable(void); SMAP_NOP ;\ CODEPATCH_END(CPTAG_CLAC) +/* CVE-2019-1125: block speculation after swapgs */ +#define FENCE_SWAPGS_MIS_TAKEN \ + CODEPATCH_START ; \ + lfence ; \ + CODEPATCH_END(CPTAG_FENCE_SWAPGS_MIS_TAKEN) +/* block speculation when a correct SMAP impl would have been enough */ +#define FENCE_NO_SAFE_SMAP \ + CODEPATCH_START ; \ + lfence ; \ + CODEPATCH_END(CPTAG_FENCE_NO_SAFE_SMAP) + #define PCID_SET_REUSE_SIZE 12 #define PCID_SET_REUSE_NOP \ 997: ;\ diff --git a/sys/arch/amd64/include/frameasm.h b/sys/arch/amd64/include/frameasm.h index f22a71c901c..d82acc8500b 100644 --- a/sys/arch/amd64/include/frameasm.h +++ b/sys/arch/amd64/include/frameasm.h @@ -1,4 +1,4 @@ -/* $OpenBSD: frameasm.h,v 1.21 2019/05/12 21:27:47 guenther Exp $ */ +/* $OpenBSD: frameasm.h,v 1.22 2019/08/07 18:53:28 guenther Exp $ */ /* $NetBSD: frameasm.h,v 1.1 2003/04/26 18:39:40 fvdl Exp $ */ #ifndef _AMD64_MACHINE_FRAMEASM_H @@ -63,6 +63,7 @@ testb $SEL_RPL,24(%rsp) ; \ je INTRENTRY_LABEL(label) ; \ swapgs ; \ + FENCE_SWAPGS_MIS_TAKEN ; \ movq %rax,CPUVAR(SCRATCH) ; \ CODEPATCH_START ; \ movq CPUVAR(KERN_CR3),%rax ; \ @@ -73,6 +74,7 @@ _ALIGN_TRAPS ; \ .global INTRENTRY_LABEL(label) ; \ INTRENTRY_LABEL(label): /* from kernel */ \ + FENCE_NO_SAFE_SMAP ; \ INTR_ENTRY_KERN ; \ jmp 99f ; \ _ALIGN_TRAPS ; \ |