aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/traps.c')
-rw-r--r--arch/arm/kernel/traps.c178
1 files changed, 157 insertions, 21 deletions
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 195dff58bafc..20b2db6dcd1c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -30,11 +30,13 @@
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/exception.h>
+#include <asm/spectre.h>
#include <asm/unistd.h>
#include <asm/traps.h>
#include <asm/ptrace.h>
#include <asm/unwind.h>
#include <asm/tls.h>
+#include <asm/stacktrace.h>
#include <asm/system_misc.h>
#include <asm/opcodes.h>
@@ -60,19 +62,32 @@ static int __init user_debug_setup(char *str)
__setup("user_debug=", user_debug_setup);
#endif
-static void dump_mem(const char *, const char *, unsigned long, unsigned long);
-
void dump_backtrace_entry(unsigned long where, unsigned long from,
unsigned long frame, const char *loglvl)
{
unsigned long end = frame + 4 + sizeof(struct pt_regs);
-#ifdef CONFIG_KALLSYMS
+ if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) &&
+ IS_ENABLED(CONFIG_CC_IS_GCC) &&
+ end > ALIGN(frame, THREAD_SIZE)) {
+ /*
+ * If we are walking past the end of the stack, it may be due
+ * to the fact that we are on an IRQ or overflow stack. In this
+ * case, we can load the address of the other stack from the
+ * frame record.
+ */
+ frame = ((unsigned long *)frame)[-2] - 4;
+ end = frame + 4 + sizeof(struct pt_regs);
+ }
+
+#ifndef CONFIG_KALLSYMS
+ printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
+ loglvl, where, from);
+#elif defined CONFIG_BACKTRACE_VERBOSE
printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
loglvl, where, (void *)where, from, (void *)from);
#else
- printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
- loglvl, where, from);
+ printk("%s %ps from %pS\n", loglvl, (void *)where, (void *)from);
#endif
if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
@@ -108,7 +123,8 @@ void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
static int verify_stack(unsigned long sp)
{
if (sp < PAGE_OFFSET ||
- (sp > (unsigned long)high_memory && high_memory != NULL))
+ (!IS_ENABLED(CONFIG_VMAP_STACK) &&
+ sp > (unsigned long)high_memory && high_memory != NULL))
return -EFAULT;
return 0;
@@ -118,8 +134,8 @@ static int verify_stack(unsigned long sp)
/*
* Dump out the contents of some memory nicely...
*/
-static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
- unsigned long top)
+void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ unsigned long top)
{
unsigned long first;
int i;
@@ -189,14 +205,14 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
#ifdef CONFIG_ARM_UNWIND
-static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
- const char *loglvl)
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
unwind_backtrace(regs, tsk, loglvl);
}
#else
-static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
- const char *loglvl)
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
unsigned int fp, mode;
int ok = 1;
@@ -278,7 +294,8 @@ static int __die(const char *str, int err, struct pt_regs *regs)
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
- THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+ ALIGN(regs->ARM_sp - THREAD_SIZE, THREAD_ALIGN)
+ + THREAD_SIZE);
dump_backtrace(regs, tsk, KERN_EMERG);
dump_instr(KERN_EMERG, regs);
}
@@ -333,7 +350,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
if (panic_on_oops)
panic("Fatal exception");
if (signr)
- do_exit(signr);
+ make_task_dead(signr);
}
/*
@@ -470,7 +487,7 @@ asmlinkage void do_undefinstr(struct pt_regs *regs)
die_sig:
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
- pr_info("%s (%d): undefined instruction: pc=%p\n",
+ pr_info("%s (%d): undefined instruction: pc=%px\n",
current->comm, task_pid_nr(current), pc);
__show_regs(regs);
dump_instr(KERN_INFO, regs);
@@ -574,7 +591,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
if (end < start || flags)
return -EINVAL;
- if (!access_ok(start, end - start))
+ if (!access_ok((void __user *)start, end - start))
return -EFAULT;
return __do_cache_op(start, end);
@@ -787,10 +804,59 @@ static inline void __init kuser_init(void *vectors)
}
#endif
+#ifndef CONFIG_CPU_V7M
+static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
+{
+ memcpy(vma, lma_start, lma_end - lma_start);
+}
+
+static void flush_vectors(void *vma, size_t offset, size_t size)
+{
+ unsigned long start = (unsigned long)vma + offset;
+ unsigned long end = start + size;
+
+ flush_icache_range(start, end);
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+int spectre_bhb_update_vectors(unsigned int method)
+{
+ extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
+ extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
+ void *vec_start, *vec_end;
+
+ if (system_state >= SYSTEM_FREEING_INITMEM) {
+ pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
+ smp_processor_id());
+ return SPECTRE_VULNERABLE;
+ }
+
+ switch (method) {
+ case SPECTRE_V2_METHOD_LOOP8:
+ vec_start = __vectors_bhb_loop8_start;
+ vec_end = __vectors_bhb_loop8_end;
+ break;
+
+ case SPECTRE_V2_METHOD_BPIALL:
+ vec_start = __vectors_bhb_bpiall_start;
+ vec_end = __vectors_bhb_bpiall_end;
+ break;
+
+ default:
+ pr_err("CPU%u: unknown Spectre BHB state %d\n",
+ smp_processor_id(), method);
+ return SPECTRE_VULNERABLE;
+ }
+
+ copy_from_lma(vectors_page, vec_start, vec_end);
+ flush_vectors(vectors_page, 0, vec_end - vec_start);
+
+ return SPECTRE_MITIGATED;
+}
+#endif
+
void __init early_trap_init(void *vectors_base)
{
-#ifndef CONFIG_CPU_V7M
- unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
unsigned i;
@@ -811,17 +877,87 @@ void __init early_trap_init(void *vectors_base)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
- memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
+ copy_from_lma(vectors_base, __vectors_start, __vectors_end);
+ copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
kuser_init(vectors_base);
- flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
+ flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
+}
#else /* ifndef CONFIG_CPU_V7M */
+void __init early_trap_init(void *vectors_base)
+{
/*
* on V7-M there is no need to copy the vector table to a dedicated
* memory area. The address is configurable and so a table in the kernel
* image can be used.
*/
+}
+#endif
+
+#ifdef CONFIG_VMAP_STACK
+
+DECLARE_PER_CPU(u8 *, irq_stack_ptr);
+
+asmlinkage DEFINE_PER_CPU(u8 *, overflow_stack_ptr);
+
+static int __init allocate_overflow_stacks(void)
+{
+ u8 *stack;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stack = (u8 *)__get_free_page(GFP_KERNEL);
+ if (WARN_ON(!stack))
+ return -ENOMEM;
+ per_cpu(overflow_stack_ptr, cpu) = &stack[OVERFLOW_STACK_SIZE];
+ }
+ return 0;
+}
+early_initcall(allocate_overflow_stacks);
+
+asmlinkage void handle_bad_stack(struct pt_regs *regs)
+{
+ unsigned long tsk_stk = (unsigned long)current->stack;
+#ifdef CONFIG_IRQSTACKS
+ unsigned long irq_stk = (unsigned long)raw_cpu_read(irq_stack_ptr);
+#endif
+ unsigned long ovf_stk = (unsigned long)raw_cpu_read(overflow_stack_ptr);
+
+ console_verbose();
+ pr_emerg("Insufficient stack space to handle exception!");
+
+ pr_emerg("Task stack: [0x%08lx..0x%08lx]\n",
+ tsk_stk, tsk_stk + THREAD_SIZE);
+#ifdef CONFIG_IRQSTACKS
+ pr_emerg("IRQ stack: [0x%08lx..0x%08lx]\n",
+ irq_stk - THREAD_SIZE, irq_stk);
#endif
+ pr_emerg("Overflow stack: [0x%08lx..0x%08lx]\n",
+ ovf_stk - OVERFLOW_STACK_SIZE, ovf_stk);
+
+ die("kernel stack overflow", regs, 0);
}
+
+#ifndef CONFIG_ARM_LPAE
+/*
+ * Normally, we rely on the logic in do_translation_fault() to update stale PMD
+ * entries covering the vmalloc space in a task's page tables when it first
+ * accesses the region in question. Unfortunately, this is not sufficient when
+ * the task stack resides in the vmalloc region, as do_translation_fault() is a
+ * C function that needs a stack to run.
+ *
+ * So we need to ensure that these PMD entries are up to date *before* the MM
+ * switch. As we already have some logic in the MM switch path that takes care
+ * of this, let's trigger it by bumping the counter every time the core vmalloc
+ * code modifies a PMD entry in the vmalloc region. Use release semantics on
+ * the store so that other CPUs observing the counter's new value are
+ * guaranteed to see the updated page table entries as well.
+ */
+void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
+{
+ if (start < VMALLOC_END && end > VMALLOC_START)
+ atomic_inc_return_release(&init_mm.context.vmalloc_seq);
+}
+#endif
+#endif