aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBalbir Singh <sblbir@amazon.com>2021-04-26 21:59:11 +0200
committerThomas Gleixner <tglx@linutronix.de>2021-07-28 11:42:24 +0200
commit58e106e725eed59896b9141a1c9a917d2f67962a (patch)
treeed070e440c19cd31bd32342673065af6b48f636d
parentx86/mm: Refactor cond_ibpb() to support other use cases (diff)
downloadlinux-dev-58e106e725eed59896b9141a1c9a917d2f67962a.tar.xz
linux-dev-58e106e725eed59896b9141a1c9a917d2f67962a.zip
sched: Add task_work callback for paranoid L1D flush
The upcoming paranoid L1D flush infrastructure allows to conditionally (opt-in) flush L1D in switch_mm() as a defense against potential new side channels or for paranoia reasons. As the flush makes only sense when a task runs on a non-SMT enabled core, because SMT siblings share L1, the switch_mm() logic will kill a task which is flagged for L1D flush when it is running on a SMT thread. Add a taskwork callback so switch_mm() can queue a SIG_KILL command which is invoked when the task tries to return to user space. Signed-off-by: Balbir Singh <sblbir@amazon.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20210108121056.21940-1-sblbir@amazon.com
-rw-r--r--arch/Kconfig3
-rw-r--r--include/linux/sched.h10
2 files changed, 13 insertions, 0 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 129df498a8e1..98db63496bab 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1282,6 +1282,9 @@ config ARCH_SPLIT_ARG64
config ARCH_HAS_ELFCORE_COMPAT
bool
+config ARCH_HAS_PARANOID_L1D_FLUSH
+ bool
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ec8d07d88641..c048e59d3fbd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1400,6 +1400,16 @@ struct task_struct {
struct llist_head kretprobe_instances;
#endif
+#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
+ /*
+ * If L1D flush is supported on mm context switch
+ * then we use this callback head to queue kill work
+ * to kill tasks that are not running on SMT disabled
+ * cores
+ */
+ struct callback_head l1d_flush_kill;
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.