aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2020-05-28 18:03:40 +0100
committerWill Deacon <will@kernel.org>2020-05-28 18:03:40 +0100
commit082af5ec5080b028f7d0846a6c27cbb87f288205 (patch)
tree9b34b36586c9453628849fc5acee655012cb7b1e /include/linux
parentMerge branch 'for-next/kvm/errata' into for-next/core (diff)
parentarm64: entry-ftrace.S: Update comment to indicate that x18 is live (diff)
downloadwireguard-linux-082af5ec5080b028f7d0846a6c27cbb87f288205.tar.xz
wireguard-linux-082af5ec5080b028f7d0846a6c27cbb87f288205.zip
Merge branch 'for-next/scs' into for-next/core
Support for Clang's Shadow Call Stack in the kernel (Sami Tolvanen and Will Deacon) * for-next/scs: arm64: entry-ftrace.S: Update comment to indicate that x18 is live scs: Move DEFINE_SCS macro into core code scs: Remove references to asm/scs.h from core code scs: Move scs_overflow_check() out of architecture code arm64: scs: Use 'scs_sp' register alias for x18 scs: Move accounting into alloc/free functions arm64: scs: Store absolute SCS stack pointer value in thread_info efi/libstub: Disable Shadow Call Stack arm64: scs: Add shadow stacks for SDEI arm64: Implement Shadow Call Stack arm64: Disable SCS for hypervisor code arm64: vdso: Disable Shadow Call Stack arm64: efi: Restore register x18 if it was corrupted arm64: Preserve register x18 when CPU is suspended arm64: Reserve register x18 from general allocation with SCS scs: Disable when function graph tracing is enabled scs: Add support for stack usage debugging scs: Add page accounting for shadow call stack allocations scs: Add support for Clang's Shadow Call Stack (SCS)
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/compiler-clang.h4
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--include/linux/scs.h72
4 files changed, 83 insertions, 0 deletions
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 333a6695a918..790c0c6b8552 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -42,3 +42,7 @@
* compilers, like ICC.
*/
#define barrier() __asm__ __volatile__("" : : : "memory")
+
+#if __has_feature(shadow_call_stack)
+# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
+#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index e970f97a7fcb..97b62f47a80d 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -193,6 +193,10 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end
#endif
+#ifndef __noscs
+# define __noscs
+#endif
+
#ifndef asm_volatile_goto
#define asm_volatile_goto(x...) asm goto(x)
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1b9de7d220fb..acffc3bc6178 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -156,6 +156,9 @@ enum zone_stat_item {
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
NR_PAGETABLE, /* used for pagetables */
NR_KERNEL_STACK_KB, /* measured in KiB */
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+ NR_KERNEL_SCS_KB, /* measured in KiB */
+#endif
/* Second 128 byte cacheline */
NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
diff --git a/include/linux/scs.h b/include/linux/scs.h
new file mode 100644
index 000000000000..6dec390cf154
--- /dev/null
+++ b/include/linux/scs.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shadow Call Stack support.
+ *
+ * Copyright (C) 2019 Google LLC
+ */
+
+#ifndef _LINUX_SCS_H
+#define _LINUX_SCS_H
+
+#include <linux/gfp.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+
+/*
+ * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
+ * architecture) provided ~40% safety margin on stack usage while keeping
+ * memory allocation overhead reasonable.
+ */
+#define SCS_SIZE SZ_1K
+#define GFP_SCS (GFP_KERNEL | __GFP_ZERO)
+
+/* An illegal pointer value to mark the end of the shadow stack. */
+#define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA)
+
+/* Allocate a static per-CPU shadow stack */
+#define DEFINE_SCS(name) \
+ DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \
+
+#define task_scs(tsk) (task_thread_info(tsk)->scs_base)
+#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp)
+
+void scs_init(void);
+int scs_prepare(struct task_struct *tsk, int node);
+void scs_release(struct task_struct *tsk);
+
+static inline void scs_task_reset(struct task_struct *tsk)
+{
+ /*
+ * Reset the shadow stack to the base address in case the task
+ * is reused.
+ */
+ task_scs_sp(tsk) = task_scs(tsk);
+}
+
+static inline unsigned long *__scs_magic(void *s)
+{
+ return (unsigned long *)(s + SCS_SIZE) - 1;
+}
+
+static inline bool task_scs_end_corrupted(struct task_struct *tsk)
+{
+ unsigned long *magic = __scs_magic(task_scs(tsk));
+ unsigned long sz = task_scs_sp(tsk) - task_scs(tsk);
+
+ return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC;
+}
+
+#else /* CONFIG_SHADOW_CALL_STACK */
+
+static inline void scs_init(void) {}
+static inline void scs_task_reset(struct task_struct *tsk) {}
+static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
+static inline void scs_release(struct task_struct *tsk) {}
+static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; }
+
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
+#endif /* _LINUX_SCS_H */