aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt3
-rw-r--r--arch/x86/entry/Makefile2
-rw-r--r--arch/x86/entry/entry.S22
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/nospec-branch.h8
-rw-r--r--arch/x86/kernel/cpu/bugs.c43
6 files changed, 67 insertions, 13 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 0aeea69e9b7a..f2d26cb7e853 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5207,6 +5207,9 @@
disabling SMT if necessary for
the full mitigation (only on Zen1
and older without STIBP).
+ ibpb - mitigate short speculation windows on
+ basic block boundaries too. Safe, highest
+ perf impact.
unret - force enable untrained return thunks,
only effective on AMD f15h-f17h
based systems.
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 7fec5dcf6438..eeadbd7d92cc 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -11,7 +11,7 @@ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
CFLAGS_common.o += -fno-stack-protector
-obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
+obj-y := entry.o entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o
obj-y += vdso/
diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
new file mode 100644
index 000000000000..bfb7bcb362bc
--- /dev/null
+++ b/arch/x86/entry/entry.S
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common place for both 32- and 64-bit entry routines.
+ */
+
+#include <linux/linkage.h>
+#include <asm/export.h>
+#include <asm/msr-index.h>
+
+.pushsection .noinstr.text, "ax"
+
+SYM_FUNC_START(entry_ibpb)
+ movl $MSR_IA32_PRED_CMD, %ecx
+ movl $PRED_CMD_IBPB, %eax
+ xorl %edx, %edx
+ wrmsr
+ RET
+SYM_FUNC_END(entry_ibpb)
+/* For KVM */
+EXPORT_SYMBOL_GPL(entry_ibpb);
+
+.popsection
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index b5bb35e8588e..09dce77f4848 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -296,7 +296,7 @@
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
-/* FREE! (11*32+10) */
+#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
/* FREE! (11*32+11) */
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 455d79c6c2f3..05dd75478d7b 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -123,14 +123,17 @@
* return thunk isn't mapped into the userspace tables (then again, AMD
* typically has NO_MELTDOWN).
*
- * Doesn't clobber any registers but does require a stable stack.
+ * While zen_untrain_ret() doesn't clobber anything but requires stack,
+ * entry_ibpb() will clobber AX, CX, DX.
*
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
* where we have a stack but before any RET instruction.
*/
.macro UNTRAIN_RET
#ifdef CONFIG_RETPOLINE
- ALTERNATIVE "", "call zen_untrain_ret", X86_FEATURE_UNRET
+ ALTERNATIVE_2 "", \
+ "call zen_untrain_ret", X86_FEATURE_UNRET, \
+ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
#endif
.endm
@@ -147,6 +150,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
extern void __x86_return_thunk(void);
extern void zen_untrain_ret(void);
+extern void entry_ibpb(void);
#ifdef CONFIG_RETPOLINE
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 00e9c769ce0b..04077d13b3ae 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -805,6 +805,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
enum retbleed_mitigation {
RETBLEED_MITIGATION_NONE,
RETBLEED_MITIGATION_UNRET,
+ RETBLEED_MITIGATION_IBPB,
RETBLEED_MITIGATION_IBRS,
RETBLEED_MITIGATION_EIBRS,
};
@@ -813,11 +814,13 @@ enum retbleed_mitigation_cmd {
RETBLEED_CMD_OFF,
RETBLEED_CMD_AUTO,
RETBLEED_CMD_UNRET,
+ RETBLEED_CMD_IBPB,
};
const char * const retbleed_strings[] = {
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
[RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
+ [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
[RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
[RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
};
@@ -847,6 +850,8 @@ static int __init retbleed_parse_cmdline(char *str)
retbleed_cmd = RETBLEED_CMD_AUTO;
} else if (!strcmp(str, "unret")) {
retbleed_cmd = RETBLEED_CMD_UNRET;
+ } else if (!strcmp(str, "ibpb")) {
+ retbleed_cmd = RETBLEED_CMD_IBPB;
} else if (!strcmp(str, "nosmt")) {
retbleed_nosmt = true;
} else {
@@ -861,11 +866,13 @@ static int __init retbleed_parse_cmdline(char *str)
early_param("retbleed", retbleed_parse_cmdline);
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
-#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n"
+#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler; falling back to IBPB!\n"
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
static void __init retbleed_select_mitigation(void)
{
+ bool mitigate_smt = false;
+
if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
return;
@@ -877,11 +884,21 @@ static void __init retbleed_select_mitigation(void)
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
break;
+ case RETBLEED_CMD_IBPB:
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+ break;
+
case RETBLEED_CMD_AUTO:
default:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+
+ if (IS_ENABLED(CONFIG_RETPOLINE) &&
+ IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK))
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ else
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+ }
/*
* The Intel mitigation (IBRS) was already selected in
@@ -897,26 +914,34 @@ static void __init retbleed_select_mitigation(void)
if (!IS_ENABLED(CONFIG_RETPOLINE) ||
!IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) {
pr_err(RETBLEED_COMPILER_MSG);
- retbleed_mitigation = RETBLEED_MITIGATION_NONE;
- break;
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+ goto retbleed_force_ibpb;
}
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
- if (!boot_cpu_has(X86_FEATURE_STIBP) &&
- (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
- cpu_smt_disable(false);
-
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
pr_err(RETBLEED_UNTRAIN_MSG);
+
+ mitigate_smt = true;
+ break;
+
+ case RETBLEED_MITIGATION_IBPB:
+retbleed_force_ibpb:
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+ mitigate_smt = true;
break;
default:
break;
}
+ if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
+ (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
+ cpu_smt_disable(false);
+
/*
* Let IBRS trump all on Intel without affecting the effects of the
* retbleed= cmdline option.