aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-21 19:32:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-21 19:32:04 -0700
commit2142b7f0c6bbe1f9515ce3383de9f7a32a5a025b (patch)
treee1c28d1fc2cf8a905254b6f4475a4e65dfddce82 /include
parentMerge tag 'pstore-v5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux (diff)
parentarm64: Add gcc Shadow Call Stack support (diff)
downloadlinux-dev-2142b7f0c6bbe1f9515ce3383de9f7a32a5a025b.tar.xz
linux-dev-2142b7f0c6bbe1f9515ce3383de9f7a32a5a025b.zip
Merge tag 'hardening-v5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull kernel hardening updates from Kees Cook: - Add arm64 Shadow Call Stack support for GCC 12 (Dan Li) - Avoid memset with stack offset randomization under Clang (Marco Elver) - Clean up stackleak plugin to play nice with .noinstr (Kees Cook) - Check stack depth for greater usercopy hardening coverage (Kees Cook) * tag 'hardening-v5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: arm64: Add gcc Shadow Call Stack support m68k: Implement "current_stack_pointer" xtensa: Implement "current_stack_pointer" usercopy: Check valid lifetime via stack depth stack: Constrain and fix stack offset randomization with Clang builds stack: Introduce CONFIG_RANDOMIZE_KSTACK_OFFSET gcc-plugins/stackleak: Ignore .noinstr.text and .entry.text gcc-plugins/stackleak: Exactly match strings instead of prefixes gcc-plugins/stackleak: Provide verbose mode
Diffstat (limited to 'include')
-rw-r--r--include/linux/compiler-gcc.h4
-rw-r--r--include/linux/randomize_kstack.h21
2 files changed, 23 insertions, 2 deletions
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index ccbbd31b3aae..deff5b308470 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -97,6 +97,10 @@
#define KASAN_ABI_VERSION 4
#endif
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
+#endif
+
#if __has_attribute(__no_sanitize_address__)
#define __no_sanitize_address __attribute__((no_sanitize_address))
#else
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index bebc911161b6..1468caf001c0 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_RANDOMIZE_KSTACK_H
#define _LINUX_RANDOMIZE_KSTACK_H
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <linux/percpu-defs.h>
@@ -16,8 +17,20 @@ DECLARE_PER_CPU(u32, kstack_offset);
* alignment. Also, since this use is being explicitly masked to a max of
* 10 bits, stack-clash style attacks are unlikely. For more details see
* "VLAs" in Documentation/process/deprecated.rst
+ *
+ * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently
+ * only with Clang and not GCC). Initializing the unused area on each syscall
+ * entry is expensive, and generating an implicit call to memset() may also be
+ * problematic (such as in noinstr functions). Therefore, if the compiler
+ * supports it (which it should if it initializes allocas), always use the
+ * "uninitialized" variant of the builtin.
*/
-void *__builtin_alloca(size_t size);
+#if __has_builtin(__builtin_alloca_uninitialized)
+#define __kstack_alloca __builtin_alloca_uninitialized
+#else
+#define __kstack_alloca __builtin_alloca
+#endif
+
/*
* Use, at most, 10 bits of entropy. We explicitly cap this to keep the
* "VLA" from being unbounded (see above). 10 bits leaves enough room for
@@ -36,7 +49,7 @@ void *__builtin_alloca(size_t size);
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
u32 offset = raw_cpu_read(kstack_offset); \
- u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \
+ u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
/* Keep allocation even after "ptr" loses scope. */ \
asm volatile("" :: "r"(ptr) : "memory"); \
} \
@@ -50,5 +63,9 @@ void *__builtin_alloca(size_t size);
raw_cpu_write(kstack_offset, offset); \
} \
} while (0)
+#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
+#define add_random_kstack_offset() do { } while (0)
+#define choose_random_kstack_offset(rand) do { } while (0)
+#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
#endif