aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2022-06-14 23:16:03 +0200
committerBorislav Petkov <bp@suse.de>2022-06-27 10:34:00 +0200
commita09a6e2399ba0595c3042b3164f3ca68a3cff33e (patch)
treebf16062820b967a0b9c59169adf156eec8ed55a4 /arch/x86/include/asm
parentx86/bugs: Do IBPB fallback check only once (diff)
downloadlinux-dev-a09a6e2399ba0595c3042b3164f3ca68a3cff33e.tar.xz
linux-dev-a09a6e2399ba0595c3042b3164f3ca68a3cff33e.zip
objtool: Add entry UNRET validation
Since entry asm is tricky, add a validation pass that ensures the retbleed mitigation has been done before the first actual RET instruction. Entry points are those that either have UNWIND_HINT_ENTRY, which acts as UNWIND_HINT_EMPTY but marks the instruction as an entry point, or those that have UWIND_HINT_IRET_REGS at +0. This is basically a variant of validate_branch() that is intra-function and it will simply follow all branches from marked entry points and ensures that all paths lead to ANNOTATE_UNRET_END. If a path hits RET or an indirection the path is a fail and will be reported. There are 3 ANNOTATE_UNRET_END instances: - UNTRAIN_RET itself - exception from-kernel; this path doesn't need UNTRAIN_RET - all early exceptions; these also don't need UNTRAIN_RET Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/nospec-branch.h12
-rw-r--r--arch/x86/include/asm/unwind_hints.h4
2 files changed, 16 insertions, 0 deletions
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 05dd75478d7b..bba42bd78edf 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -82,6 +82,17 @@
#define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
/*
+ * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
+ * eventually turn into it's own annotation.
+ */
+.macro ANNOTATE_UNRET_END
+#ifdef CONFIG_DEBUG_ENTRY
+ ANNOTATE_RETPOLINE_SAFE
+ nop
+#endif
+.endm
+
+/*
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
* indirect jmp/call which may be susceptible to the Spectre variant 2
* attack.
@@ -131,6 +142,7 @@
*/
.macro UNTRAIN_RET
#ifdef CONFIG_RETPOLINE
+ ANNOTATE_UNRET_END
ALTERNATIVE_2 "", \
"call zen_untrain_ret", X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
index 8b33674288ea..6f70fe4c93f2 100644
--- a/arch/x86/include/asm/unwind_hints.h
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -11,6 +11,10 @@
UNWIND_HINT sp_reg=ORC_REG_UNDEFINED type=UNWIND_HINT_TYPE_CALL end=1
.endm
+.macro UNWIND_HINT_ENTRY
+ UNWIND_HINT sp_reg=ORC_REG_UNDEFINED type=UNWIND_HINT_TYPE_ENTRY end=1
+.endm
+
.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0
.if \base == %rsp
.if \indirect