From da64e9d1f8c3dad6898dac6edb39a68d3aa9ce93 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 27 Apr 2020 09:00:11 -0700 Subject: arm64: Reserve register x18 from general allocation with SCS Reserve the x18 register from general allocation when SCS is enabled, because the compiler uses the register to store the current task's shadow stack pointer. Note that all external kernel modules must also be compiled with -ffixed-x18 if the kernel has SCS enabled. Signed-off-by: Sami Tolvanen Reviewed-by: Nick Desaulniers Reviewed-by: Kees Cook Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/Makefile | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 85e4149cc5d5..409a6c1be8cc 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -81,6 +81,10 @@ endif KBUILD_CFLAGS += $(branch-prot-flags-y) +ifeq ($(CONFIG_SHADOW_CALL_STACK), y) +KBUILD_CFLAGS += -ffixed-x18 +endif + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ -- cgit v1.2.3-59-g8ed1b From 6d37d81f449a103a8b43c5c972b5055b8936ef0e Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 27 Apr 2020 09:00:12 -0700 Subject: arm64: Preserve register x18 when CPU is suspended Don't lose the current task's shadow stack when the CPU is suspended. Signed-off-by: Sami Tolvanen Reviewed-by: Nick Desaulniers Reviewed-by: Kees Cook Reviewed-by: Mark Rutland Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/suspend.h | 2 +- arch/arm64/mm/proc.S | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 8939c87c4dce..0cde2f473971 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -2,7 +2,7 @@ #ifndef __ASM_SUSPEND_H #define __ASM_SUSPEND_H -#define NR_CTX_REGS 12 +#define NR_CTX_REGS 13 #define NR_CALLEE_SAVED_REGS 12 /* diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 197a9ba2d5ea..ed15be0f8103 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -58,6 +58,8 @@ * cpu_do_suspend - save CPU registers context * * x0: virtual address of context pointer + * + * This must be kept in sync with struct cpu_suspend_ctx in . */ SYM_FUNC_START(cpu_do_suspend) mrs x2, tpidr_el0 @@ -82,6 +84,11 @@ alternative_endif stp x8, x9, [x0, #48] stp x10, x11, [x0, #64] stp x12, x13, [x0, #80] + /* + * Save x18 as it may be used as a platform register, e.g. by shadow + * call stack. + */ + str x18, [x0, #96] ret SYM_FUNC_END(cpu_do_suspend) @@ -98,6 +105,13 @@ SYM_FUNC_START(cpu_do_resume) ldp x9, x10, [x0, #48] ldp x11, x12, [x0, #64] ldp x13, x14, [x0, #80] + /* + * Restore x18, as it may be used as a platform register, and clear + * the buffer to minimize the risk of exposure when used for shadow + * call stack. + */ + ldr x18, [x0, #96] + str xzr, [x0, #96] msr tpidr_el0, x2 msr tpidrro_el0, x3 msr contextidr_el1, x4 -- cgit v1.2.3-59-g8ed1b From e73f02c6eb15729164b9dd5e19214b54446823ab Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 27 Apr 2020 09:00:13 -0700 Subject: arm64: efi: Restore register x18 if it was corrupted If we detect a corrupted x18, restore the register before jumping back to potentially SCS instrumented code. This is safe, because the wrapper is called with preemption disabled and a separate shadow stack is used for interrupt handling. Signed-off-by: Sami Tolvanen Reviewed-by: Kees Cook Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/efi-rt-wrapper.S | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S index 3fc71106cb2b..6ca6c0dc11a1 100644 --- a/arch/arm64/kernel/efi-rt-wrapper.S +++ b/arch/arm64/kernel/efi-rt-wrapper.S @@ -34,5 +34,14 @@ ENTRY(__efi_rt_asm_wrapper) ldp x29, x30, [sp], #32 b.ne 0f ret -0: b efi_handle_corrupted_x18 // tail call +0: + /* + * With CONFIG_SHADOW_CALL_STACK, the kernel uses x18 to store a + * shadow stack pointer, which we need to restore before returning to + * potentially instrumented code. This is safe because the wrapper is + * called with preemption disabled and a separate shadow stack is used + * for interrupts. + */ + mov x18, x2 + b efi_handle_corrupted_x18 // tail call ENDPROC(__efi_rt_asm_wrapper) -- cgit v1.2.3-59-g8ed1b From cde5dec89e5dee5a6de12cd99fdb24651ee03146 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 27 Apr 2020 09:00:14 -0700 Subject: arm64: vdso: Disable Shadow Call Stack Shadow stacks are only available in the kernel, so disable SCS instrumentation for the vDSO. Signed-off-by: Sami Tolvanen Reviewed-by: Nick Desaulniers Reviewed-by: Kees Cook Reviewed-by: Mark Rutland Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index dd2514bb1511..a87a4f11724e 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -25,7 +25,7 @@ ccflags-y += -DDISABLE_BRANCH_PROFILING VDSO_LDFLAGS := -Bsymbolic -CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) KBUILD_CFLAGS += $(DISABLE_LTO) KASAN_SANITIZE := n UBSAN_SANITIZE := n -- cgit v1.2.3-59-g8ed1b From 9654736891c3ac6a60b52ce70d33cf57cf95bff7 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 27 Apr 2020 09:00:15 -0700 Subject: arm64: Disable SCS for hypervisor code Disable SCS for code that runs at a different exception level by adding __noscs to __hyp_text. Suggested-by: James Morse Signed-off-by: Sami Tolvanen Reviewed-by: Kees Cook Acked-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/include/asm/kvm_hyp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index fe57f60f06a8..875b106c5d98 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -13,7 +13,7 @@ #include #include -#define __hyp_text __section(.hyp.text) notrace +#define __hyp_text __section(.hyp.text) notrace __noscs #define read_sysreg_elx(r,nvh,vh) \ ({ \ -- cgit v1.2.3-59-g8ed1b From 5287569a790d2546a06db07e391bf84b8bd6cf51 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 27 Apr 2020 09:00:16 -0700 Subject: arm64: Implement Shadow Call Stack This change implements shadow stack switching, initial SCS set-up, and interrupt shadow stacks for arm64. Signed-off-by: Sami Tolvanen Reviewed-by: Kees Cook Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 5 ++++ arch/arm64/include/asm/scs.h | 46 ++++++++++++++++++++++++++++++++++++ arch/arm64/include/asm/thread_info.h | 13 ++++++++++ arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/asm-offsets.c | 4 ++++ arch/arm64/kernel/entry.S | 24 +++++++++++++++++-- arch/arm64/kernel/head.S | 6 +++++ arch/arm64/kernel/process.c | 2 ++ arch/arm64/kernel/scs.c | 15 ++++++++++++ 9 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/include/asm/scs.h create mode 100644 arch/arm64/kernel/scs.c (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 40fb05d96c60..c380a16533f6 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -64,6 +64,7 @@ config ARM64 select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_SUPPORTS_MEMORY_FAILURE + select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG) select ARCH_SUPPORTS_NUMA_BALANCING @@ -1025,6 +1026,10 @@ config ARCH_HAS_CACHE_LINE_SIZE config ARCH_ENABLE_SPLIT_PMD_PTLOCK def_bool y if PGTABLE_LEVELS > 2 +# Supported by clang >= 7.0 +config CC_HAVE_SHADOW_CALL_STACK + def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) + config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" ---help--- diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h new file mode 100644 index 000000000000..96549353b0cb --- /dev/null +++ b/arch/arm64/include/asm/scs.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SCS_H +#define _ASM_SCS_H + +#ifdef __ASSEMBLY__ + +#ifdef CONFIG_SHADOW_CALL_STACK + .macro scs_load tsk, tmp + ldp x18, \tmp, [\tsk, #TSK_TI_SCS_BASE] + add x18, x18, \tmp + .endm + + .macro scs_save tsk, tmp + ldr \tmp, [\tsk, #TSK_TI_SCS_BASE] + sub \tmp, x18, \tmp + str \tmp, [\tsk, #TSK_TI_SCS_OFFSET] + .endm +#else + .macro scs_load tsk, tmp + .endm + + .macro scs_save tsk, tmp + .endm +#endif /* CONFIG_SHADOW_CALL_STACK */ + +#else /* __ASSEMBLY__ */ + +#include + +#ifdef CONFIG_SHADOW_CALL_STACK + +static inline void scs_overflow_check(struct task_struct *tsk) +{ + if (unlikely(scs_corrupted(tsk))) + panic("corrupted shadow stack detected inside scheduler\n"); +} + +#else /* CONFIG_SHADOW_CALL_STACK */ + +static inline void scs_overflow_check(struct task_struct *tsk) {} + +#endif /* CONFIG_SHADOW_CALL_STACK */ + +#endif /* __ASSEMBLY __ */ + +#endif /* _ASM_SCS_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 512174a8e789..9df79c0a4c43 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -41,6 +41,10 @@ struct thread_info { #endif } preempt; }; +#ifdef CONFIG_SHADOW_CALL_STACK + void *scs_base; + unsigned long scs_offset; +#endif }; #define thread_saved_pc(tsk) \ @@ -100,11 +104,20 @@ void arch_release_task_struct(struct task_struct *tsk); _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_SYSCALL_EMU) +#ifdef CONFIG_SHADOW_CALL_STACK +#define INIT_SCS \ + .scs_base = init_shadow_call_stack, \ + .scs_offset = 0, +#else +#define INIT_SCS +#endif + #define INIT_THREAD_INFO(tsk) \ { \ .flags = _TIF_FOREIGN_FPSTATE, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ + INIT_SCS \ } #endif /* __ASM_THREAD_INFO_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 4e5b8ee31442..151f28521f1e 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o obj-$(CONFIG_ARM64_SSBD) += ssbd.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o +obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o obj-y += vdso/ probes/ obj-$(CONFIG_COMPAT_VDSO) += vdso32/ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 9981a0a5a87f..d7934250b68c 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -33,6 +33,10 @@ int main(void) DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit)); #ifdef CONFIG_ARM64_SW_TTBR0_PAN DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); +#endif +#ifdef CONFIG_SHADOW_CALL_STACK + DEFINE(TSK_TI_SCS_BASE, offsetof(struct task_struct, thread_info.scs_base)); + DEFINE(TSK_TI_SCS_OFFSET, offsetof(struct task_struct, thread_info.scs_offset)); #endif DEFINE(TSK_STACK, offsetof(struct task_struct, stack)); #ifdef CONFIG_STACKPROTECTOR diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index ddcde093c433..244268d5ae47 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -179,6 +180,8 @@ alternative_cb_end apply_ssbd 1, x22, x23 ptrauth_keys_install_kernel tsk, 1, x20, x22, x23 + + scs_load tsk, x20 .else add x21, sp, #S_FRAME_SIZE get_current_task tsk @@ -343,6 +346,8 @@ alternative_else_nop_endif msr cntkctl_el1, x1 4: #endif + scs_save tsk, x0 + /* No kernel C function calls after this as user keys are set. */ ptrauth_keys_install_user tsk, x0, x1, x2 @@ -388,6 +393,9 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 .macro irq_stack_entry mov x19, sp // preserve the original sp +#ifdef CONFIG_SHADOW_CALL_STACK + mov x24, x18 // preserve the original shadow stack +#endif /* * Compare sp with the base of the task stack. @@ -405,15 +413,25 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 /* switch to the irq stack */ mov sp, x26 + +#ifdef CONFIG_SHADOW_CALL_STACK + /* also switch to the irq shadow stack */ + adr_this_cpu x18, irq_shadow_call_stack, x26 +#endif + 9998: .endm /* - * x19 should be preserved between irq_stack_entry and - * irq_stack_exit. + * The callee-saved regs (x19-x29) should be preserved between + * irq_stack_entry and irq_stack_exit, but note that kernel_entry + * uses x20-x23 to store data for later use. */ .macro irq_stack_exit mov sp, x19 +#ifdef CONFIG_SHADOW_CALL_STACK + mov x18, x24 +#endif .endm /* GPRs used by entry code */ @@ -901,6 +919,8 @@ SYM_FUNC_START(cpu_switch_to) mov sp, x9 msr sp_el0, x1 ptrauth_keys_install_kernel x1, 1, x8, x9, x10 + scs_save x0, x8 + scs_load x1, x8 ret SYM_FUNC_END(cpu_switch_to) NOKPROBE(cpu_switch_to) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 57a91032b4c2..2b01c19c5483 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -424,6 +425,10 @@ SYM_FUNC_START_LOCAL(__primary_switched) stp xzr, x30, [sp, #-16]! mov x29, sp +#ifdef CONFIG_SHADOW_CALL_STACK + adr_l x18, init_shadow_call_stack // Set shadow call stack +#endif + str_l x21, __fdt_pointer, x5 // Save FDT pointer ldr_l x4, kimage_vaddr // Save the offset between @@ -737,6 +742,7 @@ SYM_FUNC_START_LOCAL(__secondary_switched) ldr x2, [x0, #CPU_BOOT_TASK] cbz x2, __secondary_too_slow msr sp_el0, x2 + scs_load x2, x3 mov x29, #0 mov x30, #0 b secondary_start_kernel diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 56be4cbf771f..a35d3318492c 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) @@ -515,6 +516,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); uao_thread_switch(next); ssbs_thread_switch(next); + scs_overflow_check(next); /* * Complete any pending TLB or cache maintenance on this CPU in case diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c new file mode 100644 index 000000000000..acc6741d1a40 --- /dev/null +++ b/arch/arm64/kernel/scs.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Shadow Call Stack support. + * + * Copyright (C) 2019 Google LLC + */ + +#include +#include + +/* Allocate a static per-CPU shadow stack */ +#define DEFINE_SCS(name) \ + DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \ + +DEFINE_SCS(irq_shadow_call_stack); -- cgit v1.2.3-59-g8ed1b From 439dc2a11727314cdc3ad0ad13c122d910dae411 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 27 Apr 2020 09:00:17 -0700 Subject: arm64: scs: Add shadow stacks for SDEI This change adds per-CPU shadow call stacks for the SDEI handler. Similarly to how the kernel stacks are handled, we add separate shadow stacks for normal and critical events. Signed-off-by: Sami Tolvanen Reviewed-by: James Morse Tested-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 14 +++++++++++++- arch/arm64/kernel/scs.c | 5 +++++ 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 244268d5ae47..cb0516e6f963 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -1049,13 +1049,16 @@ SYM_CODE_START(__sdei_asm_handler) mov x19, x1 +#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK) + ldrb w4, [x19, #SDEI_EVENT_PRIORITY] +#endif + #ifdef CONFIG_VMAP_STACK /* * entry.S may have been using sp as a scratch register, find whether * this is a normal or critical event and switch to the appropriate * stack for this CPU. */ - ldrb w4, [x19, #SDEI_EVENT_PRIORITY] cbnz w4, 1f ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6 b 2f @@ -1065,6 +1068,15 @@ SYM_CODE_START(__sdei_asm_handler) mov sp, x5 #endif +#ifdef CONFIG_SHADOW_CALL_STACK + /* Use a separate shadow call stack for normal and critical events */ + cbnz w4, 3f + adr_this_cpu dst=x18, sym=sdei_shadow_call_stack_normal, tmp=x6 + b 4f +3: adr_this_cpu dst=x18, sym=sdei_shadow_call_stack_critical, tmp=x6 +4: +#endif + /* * We may have interrupted userspace, or a guest, or exit-from or * return-to either of these. We can't trust sp_el0, restore it. diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c index acc6741d1a40..adc97f826fab 100644 --- a/arch/arm64/kernel/scs.c +++ b/arch/arm64/kernel/scs.c @@ -13,3 +13,8 @@ DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \ DEFINE_SCS(irq_shadow_call_stack); + +#ifdef CONFIG_ARM_SDE_INTERFACE +DEFINE_SCS(sdei_shadow_call_stack_normal); +DEFINE_SCS(sdei_shadow_call_stack_critical); +#endif -- cgit v1.2.3-59-g8ed1b From 51189c7a7ed1b4ed4493e27275d466ff60406d3a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 May 2020 14:11:05 +0100 Subject: arm64: scs: Store absolute SCS stack pointer value in thread_info Storing the SCS information in thread_info as a {base,offset} pair introduces an additional load instruction on the ret-to-user path, since the SCS stack pointer in x18 has to be converted back to an offset by subtracting the base. Replace the offset with the absolute SCS stack pointer value instead and avoid the redundant load. Tested-by: Sami Tolvanen Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/scs.h | 9 ++++----- arch/arm64/include/asm/thread_info.h | 4 ++-- arch/arm64/kernel/asm-offsets.c | 2 +- include/linux/scs.h | 8 ++++---- kernel/scs.c | 3 +-- 5 files changed, 12 insertions(+), 14 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index 96549353b0cb..6b8cf4352fe3 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -4,16 +4,15 @@ #ifdef __ASSEMBLY__ +#include + #ifdef CONFIG_SHADOW_CALL_STACK .macro scs_load tsk, tmp - ldp x18, \tmp, [\tsk, #TSK_TI_SCS_BASE] - add x18, x18, \tmp + ldr x18, [\tsk, #TSK_TI_SCS_SP] .endm .macro scs_save tsk, tmp - ldr \tmp, [\tsk, #TSK_TI_SCS_BASE] - sub \tmp, x18, \tmp - str \tmp, [\tsk, #TSK_TI_SCS_OFFSET] + str x18, [\tsk, #TSK_TI_SCS_SP] .endm #else .macro scs_load tsk, tmp diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 9df79c0a4c43..6ea8b6a26ae9 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -43,7 +43,7 @@ struct thread_info { }; #ifdef CONFIG_SHADOW_CALL_STACK void *scs_base; - unsigned long scs_offset; + void *scs_sp; #endif }; @@ -107,7 +107,7 @@ void arch_release_task_struct(struct task_struct *tsk); #ifdef CONFIG_SHADOW_CALL_STACK #define INIT_SCS \ .scs_base = init_shadow_call_stack, \ - .scs_offset = 0, + .scs_sp = init_shadow_call_stack, #else #define INIT_SCS #endif diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index d7934250b68c..a098a45f63d8 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -36,7 +36,7 @@ int main(void) #endif #ifdef CONFIG_SHADOW_CALL_STACK DEFINE(TSK_TI_SCS_BASE, offsetof(struct task_struct, thread_info.scs_base)); - DEFINE(TSK_TI_SCS_OFFSET, offsetof(struct task_struct, thread_info.scs_offset)); + DEFINE(TSK_TI_SCS_SP, offsetof(struct task_struct, thread_info.scs_sp)); #endif DEFINE(TSK_STACK, offsetof(struct task_struct, stack)); #ifdef CONFIG_STACKPROTECTOR diff --git a/include/linux/scs.h b/include/linux/scs.h index 3f3662621a27..0eb2485ef832 100644 --- a/include/linux/scs.h +++ b/include/linux/scs.h @@ -27,7 +27,7 @@ #define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA) #define task_scs(tsk) (task_thread_info(tsk)->scs_base) -#define task_scs_offset(tsk) (task_thread_info(tsk)->scs_offset) +#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp) void scs_init(void); int scs_prepare(struct task_struct *tsk, int node); @@ -39,7 +39,7 @@ static inline void scs_task_reset(struct task_struct *tsk) * Reset the shadow stack to the base address in case the task * is reused. */ - task_scs_offset(tsk) = 0; + task_scs_sp(tsk) = task_scs(tsk); } static inline unsigned long *__scs_magic(void *s) @@ -50,9 +50,9 @@ static inline unsigned long *__scs_magic(void *s) static inline bool scs_corrupted(struct task_struct *tsk) { unsigned long *magic = __scs_magic(task_scs(tsk)); + unsigned long sz = task_scs_sp(tsk) - task_scs(tsk); - return (task_scs_offset(tsk) >= SCS_SIZE - 1 || - READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC); + return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC; } #else /* CONFIG_SHADOW_CALL_STACK */ diff --git a/kernel/scs.c b/kernel/scs.c index 9389c28f0853..5ff8663e4a67 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -60,8 +60,7 @@ int scs_prepare(struct task_struct *tsk, int node) if (!s) return -ENOMEM; - task_scs(tsk) = s; - task_scs_offset(tsk) = 0; + task_scs(tsk) = task_scs_sp(tsk) = s; scs_account(tsk, 1); return 0; } -- cgit v1.2.3-59-g8ed1b From 711e8b0de0d63c70c825b473da01288b661a2386 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 May 2020 14:46:46 +0100 Subject: arm64: scs: Use 'scs_sp' register alias for x18 x18 holds the SCS stack pointer value, so introduce a register alias to make this easier to read in assembly code. Tested-by: Sami Tolvanen Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/scs.h | 6 ++++-- arch/arm64/kernel/entry.S | 10 +++++----- arch/arm64/kernel/head.S | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index 6b8cf4352fe3..d46efdd2060a 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -7,12 +7,14 @@ #include #ifdef CONFIG_SHADOW_CALL_STACK + scs_sp .req x18 + .macro scs_load tsk, tmp - ldr x18, [\tsk, #TSK_TI_SCS_SP] + ldr scs_sp, [\tsk, #TSK_TI_SCS_SP] .endm .macro scs_save tsk, tmp - str x18, [\tsk, #TSK_TI_SCS_SP] + str scs_sp, [\tsk, #TSK_TI_SCS_SP] .endm #else .macro scs_load tsk, tmp diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index cb0516e6f963..741faf0706f1 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -394,7 +394,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 .macro irq_stack_entry mov x19, sp // preserve the original sp #ifdef CONFIG_SHADOW_CALL_STACK - mov x24, x18 // preserve the original shadow stack + mov x24, scs_sp // preserve the original shadow stack #endif /* @@ -416,7 +416,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_SHADOW_CALL_STACK /* also switch to the irq shadow stack */ - adr_this_cpu x18, irq_shadow_call_stack, x26 + adr_this_cpu scs_sp, irq_shadow_call_stack, x26 #endif 9998: @@ -430,7 +430,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 .macro irq_stack_exit mov sp, x19 #ifdef CONFIG_SHADOW_CALL_STACK - mov x18, x24 + mov scs_sp, x24 #endif .endm @@ -1071,9 +1071,9 @@ SYM_CODE_START(__sdei_asm_handler) #ifdef CONFIG_SHADOW_CALL_STACK /* Use a separate shadow call stack for normal and critical events */ cbnz w4, 3f - adr_this_cpu dst=x18, sym=sdei_shadow_call_stack_normal, tmp=x6 + adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6 b 4f -3: adr_this_cpu dst=x18, sym=sdei_shadow_call_stack_critical, tmp=x6 +3: adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6 4: #endif diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 2b01c19c5483..1293baddfd20 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -426,7 +426,7 @@ SYM_FUNC_START_LOCAL(__primary_switched) mov x29, sp #ifdef CONFIG_SHADOW_CALL_STACK - adr_l x18, init_shadow_call_stack // Set shadow call stack + adr_l scs_sp, init_shadow_call_stack // Set shadow call stack #endif str_l x21, __fdt_pointer, x5 // Save FDT pointer -- cgit v1.2.3-59-g8ed1b From 88485be531f4aee841ddc53b56e2f6e6a338854d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 May 2020 14:56:05 +0100 Subject: scs: Move scs_overflow_check() out of architecture code There is nothing architecture-specific about scs_overflow_check() as it's just a trivial wrapper around scs_corrupted(). For parity with task_stack_end_corrupted(), rename scs_corrupted() to task_scs_end_corrupted() and call it from schedule_debug() when CONFIG_SCHED_STACK_END_CHECK_is enabled, which better reflects its purpose as a debug feature to catch inadvertent overflow of the SCS. Finally, remove the unused scs_overflow_check() function entirely. This has absolutely no impact on architectures that do not support SCS (currently arm64 only). Tested-by: Sami Tolvanen Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/scs.h | 18 ------------------ arch/arm64/kernel/process.c | 2 -- arch/arm64/kernel/scs.c | 2 +- include/linux/scs.h | 4 ++-- kernel/sched/core.c | 3 +++ kernel/scs.c | 3 ++- 6 files changed, 8 insertions(+), 24 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index d46efdd2060a..eaa2cd92e4c1 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -24,24 +24,6 @@ .endm #endif /* CONFIG_SHADOW_CALL_STACK */ -#else /* __ASSEMBLY__ */ - -#include - -#ifdef CONFIG_SHADOW_CALL_STACK - -static inline void scs_overflow_check(struct task_struct *tsk) -{ - if (unlikely(scs_corrupted(tsk))) - panic("corrupted shadow stack detected inside scheduler\n"); -} - -#else /* CONFIG_SHADOW_CALL_STACK */ - -static inline void scs_overflow_check(struct task_struct *tsk) {} - -#endif /* CONFIG_SHADOW_CALL_STACK */ - #endif /* __ASSEMBLY __ */ #endif /* _ASM_SCS_H */ diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index a35d3318492c..56be4cbf771f 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -52,7 +52,6 @@ #include #include #include -#include #include #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) @@ -516,7 +515,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); uao_thread_switch(next); ssbs_thread_switch(next); - scs_overflow_check(next); /* * Complete any pending TLB or cache maintenance on this CPU in case diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c index adc97f826fab..955875dff9e1 100644 --- a/arch/arm64/kernel/scs.c +++ b/arch/arm64/kernel/scs.c @@ -6,7 +6,7 @@ */ #include -#include +#include /* Allocate a static per-CPU shadow stack */ #define DEFINE_SCS(name) \ diff --git a/include/linux/scs.h b/include/linux/scs.h index 0eb2485ef832..2fd3df50e93e 100644 --- a/include/linux/scs.h +++ b/include/linux/scs.h @@ -47,7 +47,7 @@ static inline unsigned long *__scs_magic(void *s) return (unsigned long *)(s + SCS_SIZE) - 1; } -static inline bool scs_corrupted(struct task_struct *tsk) +static inline bool task_scs_end_corrupted(struct task_struct *tsk) { unsigned long *magic = __scs_magic(task_scs(tsk)); unsigned long sz = task_scs_sp(tsk) - task_scs(tsk); @@ -60,8 +60,8 @@ static inline bool scs_corrupted(struct task_struct *tsk) static inline void scs_init(void) {} static inline void scs_task_reset(struct task_struct *tsk) {} static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; } -static inline bool scs_corrupted(struct task_struct *tsk) { return false; } static inline void scs_release(struct task_struct *tsk) {} +static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; } #endif /* CONFIG_SHADOW_CALL_STACK */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 934e03cfaec7..a1d815a11b90 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3878,6 +3878,9 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt) #ifdef CONFIG_SCHED_STACK_END_CHECK if (task_stack_end_corrupted(prev)) panic("corrupted stack end detected inside scheduler\n"); + + if (task_scs_end_corrupted(prev)) + panic("corrupted shadow stack detected inside scheduler\n"); #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP diff --git a/kernel/scs.c b/kernel/scs.c index aea841cd7586..faf0ecd7b893 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -98,7 +98,8 @@ void scs_release(struct task_struct *tsk) if (!s) return; - WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n"); + WARN(task_scs_end_corrupted(tsk), + "corrupted shadow stack detected when freeing task\n"); scs_check_usage(tsk); scs_free(s); } -- cgit v1.2.3-59-g8ed1b From 871e100e432c651c9c46fb9c3184b4577e0de3ae Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 May 2020 16:17:12 +0100 Subject: scs: Move DEFINE_SCS macro into core code Defining static shadow call stacks is not architecture-specific, so move the DEFINE_SCS() macro into the core header file. Tested-by: Sami Tolvanen Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/scs.c | 4 ---- include/linux/scs.h | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c index 955875dff9e1..e8f7ff45dd8f 100644 --- a/arch/arm64/kernel/scs.c +++ b/arch/arm64/kernel/scs.c @@ -8,10 +8,6 @@ #include #include -/* Allocate a static per-CPU shadow stack */ -#define DEFINE_SCS(name) \ - DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \ - DEFINE_SCS(irq_shadow_call_stack); #ifdef CONFIG_ARM_SDE_INTERFACE diff --git a/include/linux/scs.h b/include/linux/scs.h index 2fd3df50e93e..6dec390cf154 100644 --- a/include/linux/scs.h +++ b/include/linux/scs.h @@ -26,6 +26,10 @@ /* An illegal pointer value to mark the end of the shadow stack. */ #define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA) +/* Allocate a static per-CPU shadow stack */ +#define DEFINE_SCS(name) \ + DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \ + #define task_scs(tsk) (task_thread_info(tsk)->scs_base) #define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp) -- cgit v1.2.3-59-g8ed1b From 258c3d628fe9e7512d98a0000709773457c66ef1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 18 May 2020 14:01:01 +0100 Subject: arm64: entry-ftrace.S: Update comment to indicate that x18 is live The Shadow Call Stack pointer is held in x18, so update the ftrace entry comment to indicate that it cannot be safely clobbered. Reported-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-ftrace.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 833d48c9acb5..a338f40e64d3 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -23,8 +23,9 @@ * * ... where is either ftrace_caller or ftrace_regs_caller. * - * Each instrumented function follows the AAPCS, so here x0-x8 and x19-x30 are - * live, and x9-x18 are safe to clobber. + * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are + * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to + * clobber. * * We save the callsite's context into a pt_regs before invoking any ftrace * callbacks. So that we can get a sensible backtrace, we create a stack record -- cgit v1.2.3-59-g8ed1b