aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi3
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h6
-rw-r--r--arch/arm64/include/asm/cpufeature.h7
-rw-r--r--arch/arm64/include/asm/daifflags.h2
-rw-r--r--arch/arm64/include/asm/efi.h6
-rw-r--r--arch/arm64/include/asm/elf.h2
-rw-r--r--arch/arm64/include/asm/memory.h10
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/processor.h14
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/stacktrace.h78
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h40
-rw-r--r--arch/arm64/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c8
-rw-r--r--arch/arm64/kernel/debug-monitors.c14
-rw-r--r--arch/arm64/kernel/entry.S22
-rw-r--r--arch/arm64/kernel/fpsimd.c29
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c11
-rw-r--r--arch/arm64/kernel/module.c4
-rw-r--r--arch/arm64/kernel/perf_callchain.c7
-rw-r--r--arch/arm64/kernel/probes/kprobes.c40
-rw-r--r--arch/arm64/kernel/process.c36
-rw-r--r--arch/arm64/kernel/return_address.c12
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c62
-rw-r--r--arch/arm64/kernel/time.c7
-rw-r--r--arch/arm64/kernel/traps.c13
-rw-r--r--arch/arm64/kernel/vdso/Makefile13
-rw-r--r--arch/arm64/kernel/vdso32/Makefile14
-rw-r--r--arch/arm64/mm/fault.c57
32 files changed, 367 insertions, 167 deletions
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index bb1f1dbb34e8..61de992bbea3 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -52,7 +52,7 @@ ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y)
ifeq ($(CONFIG_CC_IS_CLANG), y)
$(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built)
- else ifeq ($(CROSS_COMPILE_COMPAT),)
+ else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),)
$(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built)
else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),)
$(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT)
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
index e25f7fcd7997..cffa8991880d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -462,7 +462,7 @@
#define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0
#define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0
#define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0
-#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CLK 0x1D0 0x438 0x000 0x1 0x0
#define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2
#define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0
#define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0
@@ -472,7 +472,7 @@
#define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0
#define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0
#define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0
-#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CAPTURE2 0x1D8 0x440 0x000 0x1 0x0
#define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2
#define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0
#define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index d09b808eff87..52aae341d0da 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -715,8 +715,7 @@
sai2: sai@308b0000 {
#sound-dai-cells = <0>;
- compatible = "fsl,imx8mq-sai",
- "fsl,imx6sx-sai";
+ compatible = "fsl,imx8mq-sai";
reg = <0x308b0000 0x10000>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MQ_CLK_SAI2_IPG>,
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 79155a8cfe7c..89e4c8b79349 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -155,6 +155,12 @@ static inline void gic_pmr_mask_irqs(void)
BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
GIC_PRIO_PSR_I_SET));
BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
+ /*
+ * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
+ * and non-secure PMR accesses are not subject to the shifts that
+ * are applied to IRQ priorities
+ */
+ BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
gic_write_pmr(GIC_PRIO_IRQOFF);
}
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 407e2bf23676..c96ffa4722d3 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -35,9 +35,10 @@
*/
enum ftr_type {
- FTR_EXACT, /* Use a predefined safe value */
- FTR_LOWER_SAFE, /* Smaller value is safe */
- FTR_HIGHER_SAFE,/* Bigger value is safe */
+ FTR_EXACT, /* Use a predefined safe value */
+ FTR_LOWER_SAFE, /* Smaller value is safe */
+ FTR_HIGHER_SAFE, /* Bigger value is safe */
+ FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
};
#define FTR_STRICT true /* SANITY check strict matching required */
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 987926ed535e..063c964af705 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -13,6 +13,8 @@
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
+#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+
/* mask/save/unmask/restore all exceptions, including interrupts. */
static inline void local_daif_mask(void)
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 8e79ce9c3f5c..76a144702586 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -105,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
#define alloc_screen_info(x...) &screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+ struct screen_info *si)
+{
+}
/* redeclare as 'hidden' so the compiler will generate relative references */
extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 3c7037c6ba9b..b618017205a3 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -202,7 +202,7 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
({ \
set_thread_flag(TIF_32BIT); \
})
-#ifdef CONFIG_GENERIC_COMPAT_VDSO
+#ifdef CONFIG_COMPAT_VDSO
#define COMPAT_ARCH_DLINFO \
do { \
/* \
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index b7ba75809751..fb04f10a78ab 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -210,7 +210,11 @@ extern u64 vabits_user;
#define __tag_reset(addr) untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
#else
-#define __tag_set(addr, tag) (addr)
+static inline const void *__tag_set(const void *addr, u8 tag)
+{
+ return addr;
+}
+
#define __tag_reset(addr) (addr)
#define __tag_get(addr) 0
#endif
@@ -301,8 +305,8 @@ static inline void *phys_to_virt(phys_addr_t x)
#define page_to_virt(page) ({ \
unsigned long __addr = \
((__page_to_voff(page)) | PAGE_OFFSET); \
- unsigned long __addr_tag = \
- __tag_set(__addr, page_kasan_tag(page)); \
+ const void *__addr_tag = \
+ __tag_set((void *)__addr, page_kasan_tag(page)); \
((void *)__addr_tag); \
})
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 87a4b2ddc1a1..5fdcfe237338 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -301,7 +301,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
/*
* Huge pte definitions.
*/
-#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
/*
@@ -448,8 +447,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_SECT)
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud) (0)
-#define pud_table(pud) (1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
#else
#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
PUD_TYPE_SECT)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fd5b1a4efc70..844e2964b0f5 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON;
}
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+ regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+ regs->pstate |= PSR_AA32_SSBS_BIT;
+}
+
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = PSR_MODE_EL0t;
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- regs->pstate |= PSR_SSBS_BIT;
+ set_ssbs_bit(regs);
regs->sp = sp;
}
@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
#endif
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- regs->pstate |= PSR_AA32_SSBS_BIT;
+ set_compat_ssbs_bit(regs);
regs->compat_sp = sp;
}
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index b1dd039023ef..1dcf63a9ac1f 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -30,7 +30,7 @@
* in the the priority mask, it indicates that PSR.I should be set and
* interrupt disabling temporarily does not rely on IRQ priorities.
*/
-#define GIC_PRIO_IRQON 0xc0
+#define GIC_PRIO_IRQON 0xe0
#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
#define GIC_PRIO_PSR_I_SET (1 << 4)
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index df45af931459..4d9b1f48dc39 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -8,19 +8,12 @@
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
+#include <linux/types.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
#include <asm/sdei.h>
-struct stackframe {
- unsigned long fp;
- unsigned long pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- int graph;
-#endif
-};
-
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
@@ -28,6 +21,7 @@ enum stack_type {
STACK_TYPE_OVERFLOW,
STACK_TYPE_SDEI_NORMAL,
STACK_TYPE_SDEI_CRITICAL,
+ __NR_STACK_TYPES
};
struct stack_info {
@@ -36,6 +30,37 @@ struct stack_info {
enum stack_type type;
};
+/*
+ * A snapshot of a frame record or fp/lr register values, along with some
+ * accounting information necessary for robust unwinding.
+ *
+ * @fp: The fp value in the frame record (or the real fp)
+ * @pc: The fp value in the frame record (or the real lr)
+ *
+ * @stacks_done: Stacks which have been entirely unwound, for which it is no
+ * longer valid to unwind to.
+ *
+ * @prev_fp: The fp that pointed to this frame record, or a synthetic value
+ * of 0. This is used to ensure that within a stack, each
+ * subsequent frame record is at an increasing address.
+ * @prev_type: The type of stack this frame record was on, or a synthetic
+ * value of STACK_TYPE_UNKNOWN. This is used to detect a
+ * transition from one stack to another.
+ *
+ * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
+ * replacement lr value in the ftrace graph stack.
+ */
+struct stackframe {
+ unsigned long fp;
+ unsigned long pc;
+ DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
+ unsigned long prev_fp;
+ enum stack_type prev_type;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ int graph;
+#endif
+};
+
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
@@ -64,8 +89,9 @@ static inline bool on_irq_stack(unsigned long sp,
return true;
}
-static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
- struct stack_info *info)
+static inline bool on_task_stack(const struct task_struct *tsk,
+ unsigned long sp,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
@@ -112,10 +138,13 @@ static inline bool on_overflow_stack(unsigned long sp,
* We can only safely access per-cpu stacks from current in a non-preemptible
* context.
*/
-static inline bool on_accessible_stack(struct task_struct *tsk,
- unsigned long sp,
- struct stack_info *info)
+static inline bool on_accessible_stack(const struct task_struct *tsk,
+ unsigned long sp,
+ struct stack_info *info)
{
+ if (info)
+ info->type = STACK_TYPE_UNKNOWN;
+
if (on_task_stack(tsk, sp, info))
return true;
if (tsk != current || preemptible())
@@ -130,4 +159,27 @@ static inline bool on_accessible_stack(struct task_struct *tsk,
return false;
}
+static inline void start_backtrace(struct stackframe *frame,
+ unsigned long fp, unsigned long pc)
+{
+ frame->fp = fp;
+ frame->pc = pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ frame->graph = 0;
+#endif
+
+ /*
+ * Prime the first unwind.
+ *
+ * In unwind_frame() we'll check that the FP points to a valid stack,
+ * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
+ * treated as a transition to whichever stack that happens to be. The
+ * prev_fp value won't be used, but we set it to 0 such that it is
+ * definitely not an accessible stack address.
+ */
+ bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
+ frame->prev_fp = 0;
+ frame->prev_type = STACK_TYPE_UNKNOWN;
+}
+
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index f4812777f5c5..c50ee1b7d5cd 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -16,6 +16,8 @@
#define VDSO_HAS_CLOCK_GETRES 1
+#define VDSO_HAS_32BIT_FALLBACK 1
+
static __always_inline
int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
struct timezone *_tz)
@@ -52,6 +54,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
}
static __always_inline
+long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_compat_clock_gettime;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
register struct __kernel_timespec *ts asm("r1") = _ts;
@@ -72,6 +91,27 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
return ret;
}
+static __always_inline
+int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_compat_clock_getres;
+
+ /* The checks below are required for ABI consistency with arm */
+ if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
+ return -EINVAL;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
{
u64 res;
diff --git a/arch/arm64/include/uapi/asm/bpf_perf_event.h b/arch/arm64/include/uapi/asm/bpf_perf_event.h
index b551b741653d..5e1e648aeec4 100644
--- a/arch/arm64/include/uapi/asm/bpf_perf_event.h
+++ b/arch/arm64/include/uapi/asm/bpf_perf_event.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
#define _UAPI__ASM_BPF_PERF_EVENT_H__
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f29f36a65175..d19d14ba9ae4 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -225,8 +225,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/*
* Linux can handle differing I-cache policies. Userspace JITs will
@@ -468,6 +468,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
case FTR_LOWER_SAFE:
ret = new < cur ? new : cur;
break;
+ case FTR_HIGHER_OR_ZERO_SAFE:
+ if (!cur || !new)
+ break;
+ /* Fallthrough */
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index f8719bd30850..48222a4760c2 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -207,16 +207,16 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
- rcu_read_lock();
-
+ /*
+ * Since single-step exception disables interrupt, this function is
+ * entirely not preemptible, and we can use rcu list safely here.
+ */
list_for_each_entry_rcu(hook, list, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
}
- rcu_read_unlock();
-
return retval;
}
NOKPROBE_SYMBOL(call_step_hook);
@@ -305,14 +305,16 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
- rcu_read_lock();
+ /*
+ * Since brk exception disables interrupt, this function is
+ * entirely not preemptible, and we can use rcu list safely here.
+ */
list_for_each_entry_rcu(hook, list, node) {
unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
if ((comment & ~hook->mask) == hook->imm)
fn = hook->fn;
}
- rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9cdc4592da3e..320a30dbe35e 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -586,10 +586,8 @@ el1_sync:
b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
- cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
- b.eq el1_sp_pc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el1_sp_pc
+ b.eq el1_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
@@ -611,9 +609,11 @@ el1_da:
bl do_mem_abort
kernel_exit 1
-el1_sp_pc:
+el1_pc:
/*
- * Stack or PC alignment exception handling
+ * PC alignment exception handling. We don't handle SP alignment faults,
+ * since we will have hit a recursive exception when trying to push the
+ * initial pt_regs.
*/
mrs x0, far_el1
inherit_daif pstate=x23, tmp=x2
@@ -732,9 +732,9 @@ el0_sync:
ccmp x24, #ESR_ELx_EC_WFx, #4, ne
b.eq el0_sys
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
- b.eq el0_sp_pc
+ b.eq el0_sp
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el0_sp_pc
+ b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
@@ -758,7 +758,7 @@ el0_sync_compat:
cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el0_sp_pc
+ b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
@@ -858,11 +858,15 @@ el0_fpsimd_exc:
mov x1, sp
bl do_fpsimd_exc
b ret_to_user
+el0_sp:
+ ldr x26, [sp, #S_SP]
+ b el0_sp_pc
+el0_pc:
+ mrs x26, far_el1
el0_sp_pc:
/*
* Stack or PC alignment exception handling
*/
- mrs x26, far_el1
gic_prio_kentry_setup tmp=x0
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index eec4776ae5f0..37d3912cfe06 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -406,6 +406,18 @@ static __uint128_t arm64_cpu_to_le128(__uint128_t x)
#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
+static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
+ unsigned int vq)
+{
+ unsigned int i;
+ __uint128_t *p;
+
+ for (i = 0; i < SVE_NUM_ZREGS; ++i) {
+ p = (__uint128_t *)ZREG(sst, vq, i);
+ *p = arm64_cpu_to_le128(fst->vregs[i]);
+ }
+}
+
/*
* Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
* task->thread.sve_state.
@@ -423,17 +435,12 @@ static void fpsimd_to_sve(struct task_struct *task)
unsigned int vq;
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
- unsigned int i;
- __uint128_t *p;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
- for (i = 0; i < 32; ++i) {
- p = (__uint128_t *)ZREG(sst, vq, i);
- *p = arm64_cpu_to_le128(fst->vregs[i]);
- }
+ __fpsimd_to_sve(sst, fst, vq);
}
/*
@@ -459,7 +466,7 @@ static void sve_to_fpsimd(struct task_struct *task)
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
- for (i = 0; i < 32; ++i) {
+ for (i = 0; i < SVE_NUM_ZREGS; ++i) {
p = (__uint128_t const *)ZREG(sst, vq, i);
fst->vregs[i] = arm64_le128_to_cpu(*p);
}
@@ -550,8 +557,6 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
unsigned int vq;
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
- unsigned int i;
- __uint128_t *p;
if (!test_tsk_thread_flag(task, TIF_SVE))
return;
@@ -559,11 +564,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
vq = sve_vq_from_vl(task->thread.sve_vl);
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
-
- for (i = 0; i < 32; ++i) {
- p = (__uint128_t *)ZREG(sst, vq, i);
- *p = arm64_cpu_to_le128(fst->vregs[i]);
- }
+ __fpsimd_to_sve(sst, fst, vq);
}
int sve_set_vector_length(struct task_struct *task,
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index dceb84520948..38ee1514cd9c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -536,13 +536,18 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+
+ /* Fallthrough */
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
+
+ /* Fallthrough */
default:
return -EINVAL;
}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 46e643e30708..03ff15bffbb6 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -314,18 +314,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* MOVW instruction relocations. */
case R_AARCH64_MOVW_UABS_G0_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
AARCH64_INSN_IMM_MOVKZ);
@@ -393,6 +396,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
if (ovf && ovf != -ERANGE)
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 9d63514b9836..b0e03e052dd1 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -154,12 +154,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
return;
}
- frame.fp = regs->regs[29];
- frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
-
+ start_backtrace(&frame, regs->regs[29], regs->pc);
walk_stackframe(current, &frame, callchain_trace, entry);
}
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index bd5dfffca272..c4452827419b 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -21,6 +21,7 @@
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/daifflags.h>
#include <asm/system_misc.h>
#include <asm/insn.h>
#include <linux/uaccess.h>
@@ -168,33 +169,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
}
/*
- * When PSTATE.D is set (masked), then software step exceptions can not be
- * generated.
- * SPSR's D bit shows the value of PSTATE.D immediately before the
- * exception was taken. PSTATE.D is set while entering into any exception
- * mode, however software clears it for any normal (none-debug-exception)
- * mode in the exception entry. Therefore, when we are entering into kprobe
- * breakpoint handler from any normal mode then SPSR.D bit is already
- * cleared, however it is set when we are entering from any debug exception
- * mode.
- * Since we always need to generate single step exception after a kprobe
- * breakpoint exception therefore we need to clear it unconditionally, when
- * we become sure that the current breakpoint exception is for kprobe.
- */
-static void __kprobes
-spsr_set_debug_flag(struct pt_regs *regs, int mask)
-{
- unsigned long spsr = regs->pstate;
-
- if (mask)
- spsr |= PSR_D_BIT;
- else
- spsr &= ~PSR_D_BIT;
-
- regs->pstate = spsr;
-}
-
-/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
@@ -205,17 +179,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask)
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- kcb->saved_irqflag = regs->pstate;
+ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
regs->pstate |= PSR_I_BIT;
+ /* Unmask PSTATE.D for enabling software step exceptions. */
+ regs->pstate &= ~PSR_D_BIT;
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- if (kcb->saved_irqflag & PSR_I_BIT)
- regs->pstate |= PSR_I_BIT;
- else
- regs->pstate &= ~PSR_I_BIT;
+ regs->pstate &= ~DAIF_MASK;
+ regs->pstate |= kcb->saved_irqflag;
}
static void __kprobes
@@ -252,8 +226,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
set_ss_context(kcb, slot); /* mark pending ss */
- spsr_set_debug_flag(regs, 0);
-
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
kernel_enable_single_step(regs);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6a869d9f304f..f674f28df663 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
childregs->pstate |= PSR_UAO_BIT;
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
- childregs->pstate |= PSR_SSBS_BIT;
+ set_ssbs_bit(childregs);
if (system_uses_irq_prio_masking())
childregs->pmr_save = GIC_PRIO_IRQON;
@@ -443,6 +443,32 @@ void uao_thread_switch(struct task_struct *next)
}
/*
+ * Force SSBS state on context-switch, since it may be lost after migrating
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
+ */
+static void ssbs_thread_switch(struct task_struct *next)
+{
+ struct pt_regs *regs = task_pt_regs(next);
+
+ /*
+ * Nothing to do for kernel threads, but 'regs' may be junk
+ * (e.g. idle task) so check the flags and bail early.
+ */
+ if (unlikely(next->flags & PF_KTHREAD))
+ return;
+
+ /* If the mitigation is enabled, then we leave SSBS clear. */
+ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
+ test_tsk_thread_flag(next, TIF_SSBD))
+ return;
+
+ if (compat_user_mode(regs))
+ set_compat_ssbs_bit(regs);
+ else if (user_mode(regs))
+ set_ssbs_bit(regs);
+}
+
+/*
* We store our current task in sp_el0, which is clobbered by userspace. Keep a
* shadow copy so that we can restore this upon entry from userspace.
*
@@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
uao_thread_switch(next);
ptrauth_thread_switch(next);
+ ssbs_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -498,11 +525,8 @@ unsigned long get_wchan(struct task_struct *p)
if (!stack_page)
return 0;
- frame.fp = thread_saved_fp(p);
- frame.pc = thread_saved_pc(p);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
+ start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
+
do {
if (unwind_frame(p, &frame))
goto out;
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index b21cba90f82d..a5e8b3b9d798 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -8,6 +8,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
@@ -29,6 +30,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
return 0;
}
}
+NOKPROBE_SYMBOL(save_return_addr);
void *return_address(unsigned int level)
{
@@ -38,12 +40,9 @@ void *return_address(unsigned int level)
data.level = level + 2;
data.addr = NULL;
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.pc = (unsigned long)return_address; /* dummy */
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
-
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)return_address);
walk_stackframe(current, &frame, save_return_addr, &data);
if (!data.level)
@@ -52,3 +51,4 @@ void *return_address(unsigned int level)
return NULL;
}
EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ea90d3bd9253..018a33e01b0e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -152,8 +152,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_crit("CPU%u: died during early boot\n", cpu);
break;
}
- /* Fall through */
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
+ /* Fall through */
case CPU_STUCK_IN_KERNEL:
pr_crit("CPU%u: is stuck in kernel\n", cpu);
if (status & CPU_STUCK_REASON_52_BIT_VA)
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 62d395151abe..a336cb124320 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
@@ -29,9 +30,18 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
+
+/*
+ * Unwind from one frame record (A) to the next frame record (B).
+ *
+ * We terminate early if the location of B indicates a malformed chain of frame
+ * records (e.g. a cycle), determined based on the location and fp value of A
+ * and the location (but not the fp value) of B.
+ */
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
unsigned long fp = frame->fp;
+ struct stack_info info;
if (fp & 0xf)
return -EINVAL;
@@ -39,11 +49,40 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
if (!tsk)
tsk = current;
- if (!on_accessible_stack(tsk, fp, NULL))
+ if (!on_accessible_stack(tsk, fp, &info))
+ return -EINVAL;
+
+ if (test_bit(info.type, frame->stacks_done))
return -EINVAL;
+ /*
+ * As stacks grow downward, any valid record on the same stack must be
+ * at a strictly higher address than the prior record.
+ *
+ * Stacks can nest in several valid orders, e.g.
+ *
+ * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
+ * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
+ *
+ * ... but the nesting itself is strict. Once we transition from one
+ * stack to another, it's never valid to unwind back to that first
+ * stack.
+ */
+ if (info.type == frame->prev_type) {
+ if (fp <= frame->prev_fp)
+ return -EINVAL;
+ } else {
+ set_bit(frame->prev_type, frame->stacks_done);
+ }
+
+ /*
+ * Record this frame record's values and location. The prev_fp and
+ * prev_type are only meaningful to the next unwind_frame() invocation.
+ */
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+ frame->prev_fp = fp;
+ frame->prev_type = info.type;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
@@ -73,6 +112,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
return 0;
}
+NOKPROBE_SYMBOL(unwind_frame);
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
@@ -87,6 +127,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
break;
}
}
+NOKPROBE_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
@@ -122,12 +163,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
data.skip = trace->skip;
data.no_sched_functions = 0;
- frame.fp = regs->regs[29];
- frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
-
+ start_backtrace(&frame, regs->regs[29], regs->pc);
walk_stackframe(current, &frame, save_trace, &data);
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
@@ -146,17 +182,15 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
data.no_sched_functions = nosched;
if (tsk != current) {
- frame.fp = thread_saved_fp(tsk);
- frame.pc = thread_saved_pc(tsk);
+ start_backtrace(&frame, thread_saved_fp(tsk),
+ thread_saved_pc(tsk));
} else {
/* We don't want this function nor the caller */
data.skip += 2;
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.pc = (unsigned long)__save_stack_trace;
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)__save_stack_trace);
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
walk_stackframe(tsk, &frame, save_trace, &data);
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 9f25aedeac9d..0b2946414dc9 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -38,11 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
if (!in_lock_functions(regs->pc))
return regs->pc;
- frame.fp = regs->regs[29];
- frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
+ start_backtrace(&frame, regs->regs[29], regs->pc);
+
do {
int ret = unwind_frame(NULL, &frame);
if (ret < 0)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 8c03456dade6..d3313797cca9 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -100,18 +100,17 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
return;
if (tsk == current) {
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.pc = (unsigned long)dump_backtrace;
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)dump_backtrace);
} else {
/*
* task blocked in __switch_to
*/
- frame.fp = thread_saved_fp(tsk);
- frame.pc = thread_saved_pc(tsk);
+ start_backtrace(&frame,
+ thread_saved_fp(tsk),
+ thread_saved_pc(tsk));
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
printk("Call trace:\n");
do {
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 4ab863045188..dd2514bb1511 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -32,10 +32,10 @@ UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
-ifeq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny
-else
-CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -include $(c-gettimeofday-y)
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
endif
# Clang versions less than 8 do not support -mcmodel=tiny
@@ -57,8 +57,7 @@ $(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
- $(call if_changed,ld)
- $(call if_changed,vdso_check)
+ $(call if_changed,vdsold_and_vdso_check)
# Strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
@@ -74,8 +73,8 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
# Actual build commands
-quiet_cmd_vdsocc = VDSOCC $@
- cmd_vdsocc = $(CC) $(a_flags) $(c_flags) -c -o $@ $<
+quiet_cmd_vdsold_and_vdso_check = LD $@
+ cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
# Install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 60a4c6239712..1fba0776ed40 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -144,8 +144,7 @@ $(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE
# Link rule for the .so file, .lds has to be first
$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
- $(call if_changed,vdsold)
- $(call if_changed,vdso_check)
+ $(call if_changed,vdsold_and_vdso_check)
# Compilation rules for the vDSO sources
$(c-obj-vdso): %.o: %.c FORCE
@@ -156,14 +155,17 @@ $(asm-obj-vdso): %.o: %.S FORCE
$(call if_changed_dep,vdsoas)
# Actual build commands
-quiet_cmd_vdsold = VDSOL $@
+quiet_cmd_vdsold_and_vdso_check = LD32 $@
+ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
+
+quiet_cmd_vdsold = LD32 $@
cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
-Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
-quiet_cmd_vdsocc = VDSOC $@
+quiet_cmd_vdsocc = CC32 $@
cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
-quiet_cmd_vdsocc_gettimeofday = VDSOC_GTD $@
+quiet_cmd_vdsocc_gettimeofday = CC32 $@
cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
-quiet_cmd_vdsoas = VDSOA $@
+quiet_cmd_vdsoas = AS32 $@
cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
quiet_cmd_vdsomunge = MUNGE $@
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 9568c116ac7f..cfd65b63f36f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -777,6 +777,53 @@ void __init hook_debug_fault_code(int nr,
debug_fault_info[nr].name = name;
}
+/*
+ * In debug exception context, we explicitly disable preemption despite
+ * having interrupts disabled.
+ * This serves two purposes: it makes it much less likely that we would
+ * accidentally schedule in exception context and it will force a warning
+ * if we somehow manage to schedule by accident.
+ */
+static void debug_exception_enter(struct pt_regs *regs)
+{
+ /*
+ * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
+ * already disabled to preserve the last enabled/disabled addresses.
+ */
+ if (interrupts_enabled(regs))
+ trace_hardirqs_off();
+
+ if (user_mode(regs)) {
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+ } else {
+ /*
+ * We might have interrupted pretty much anything. In
+ * fact, if we're a debug exception, we can even interrupt
+ * NMI processing. We don't want this code makes in_nmi()
+ * to return true, but we need to notify RCU.
+ */
+ rcu_nmi_enter();
+ }
+
+ preempt_disable();
+
+ /* This code is a bit fragile. Test it. */
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
+}
+NOKPROBE_SYMBOL(debug_exception_enter);
+
+static void debug_exception_exit(struct pt_regs *regs)
+{
+ preempt_enable_no_resched();
+
+ if (!user_mode(regs))
+ rcu_nmi_exit();
+
+ if (interrupts_enabled(regs))
+ trace_hardirqs_on();
+}
+NOKPROBE_SYMBOL(debug_exception_exit);
+
#ifdef CONFIG_ARM64_ERRATUM_1463225
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -817,12 +864,7 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
if (cortex_a76_erratum_1463225_debug_handler(regs))
return;
- /*
- * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
- * already disabled to preserve the last enabled/disabled addresses.
- */
- if (interrupts_enabled(regs))
- trace_hardirqs_off();
+ debug_exception_enter(regs);
if (user_mode(regs) && !is_ttbr0_addr(pc))
arm64_apply_bp_hardening();
@@ -832,7 +874,6 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
inf->sig, inf->code, (void __user *)pc, esr);
}
- if (interrupts_enabled(regs))
- trace_hardirqs_on();
+ debug_exception_exit(regs);
}
NOKPROBE_SYMBOL(do_debug_exception);