aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/acpi.c35
-rw-r--r--arch/arm64/kernel/cpufeature.c13
-rw-r--r--arch/arm64/kernel/cpuinfo.c3
-rw-r--r--arch/arm64/kernel/entry-common.c2
-rw-r--r--arch/arm64/kernel/entry-ftrace.S2
-rw-r--r--arch/arm64/kernel/entry.S6
-rw-r--r--arch/arm64/kernel/fpsimd.c69
-rw-r--r--arch/arm64/kernel/ftrace.c6
-rw-r--r--arch/arm64/kernel/hibernate.c4
-rw-r--r--arch/arm64/kernel/machine_kexec.c8
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c1
-rw-r--r--arch/arm64/kernel/perf_callchain.c15
-rw-r--r--arch/arm64/kernel/perf_event.c252
-rw-r--r--arch/arm64/kernel/process.c84
-rw-r--r--arch/arm64/kernel/ptrace.c8
-rw-r--r--arch/arm64/kernel/return_address.c8
-rw-r--r--arch/arm64/kernel/setup.c13
-rw-r--r--arch/arm64/kernel/signal.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c78
-rw-r--r--arch/arm64/kernel/syscall.c4
-rw-r--r--arch/arm64/kernel/time.c25
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/arm64/kernel/vdso/Makefile1
23 files changed, 364 insertions, 277 deletions
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index f3851724fe35..e4dea8db6924 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -22,6 +22,7 @@
#include <linux/irq_work.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
#include <linux/smp.h>
#include <linux/serial_core.h>
#include <linux/pgtable.h>
@@ -62,29 +63,22 @@ static int __init parse_acpi(char *arg)
}
early_param("acpi", parse_acpi);
-static int __init dt_scan_depth1_nodes(unsigned long node,
- const char *uname, int depth,
- void *data)
+static bool __init dt_is_stub(void)
{
- /*
- * Ignore anything not directly under the root node; we'll
- * catch its parent instead.
- */
- if (depth != 1)
- return 0;
+ int node;
- if (strcmp(uname, "chosen") == 0)
- return 0;
+ fdt_for_each_subnode(node, initial_boot_params, 0) {
+ const char *name = fdt_get_name(initial_boot_params, node, NULL);
+ if (strcmp(name, "chosen") == 0)
+ continue;
+ if (strcmp(name, "hypervisor") == 0 &&
+ of_flat_dt_is_compatible(node, "xen,xen"))
+ continue;
- if (strcmp(uname, "hypervisor") == 0 &&
- of_flat_dt_is_compatible(node, "xen,xen"))
- return 0;
+ return false;
+ }
- /*
- * This node at depth 1 is neither a chosen node nor a xen node,
- * which we do not expect.
- */
- return 1;
+ return true;
}
/*
@@ -205,8 +199,7 @@ void __init acpi_boot_table_init(void)
* and ACPI has not been [force] enabled (acpi=on|force)
*/
if (param_acpi_off ||
- (!param_acpi_on && !param_acpi_force &&
- of_scan_flat_dt(dt_scan_depth1_nodes, NULL)))
+ (!param_acpi_on && !param_acpi_force && !dt_is_stub()))
goto done;
/*
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 6f3e677d88f1..a46ab3b1c4d5 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -225,6 +225,11 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
@@ -325,6 +330,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
@@ -637,6 +643,7 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
&id_aa64isar1_override),
+ ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
@@ -933,6 +940,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
+ init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
@@ -1151,6 +1159,8 @@ void update_cpu_features(int cpu,
info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
+ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
+ info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
/*
* Differing PARange support is fine as long as all peripherals and
@@ -1272,6 +1282,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id)
read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
+ read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
read_sysreg_case(SYS_CNTFRQ_EL0);
read_sysreg_case(SYS_CTR_EL0);
@@ -2476,6 +2487,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
#endif /* CONFIG_ARM64_MTE */
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
+ HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
+ HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
{},
};
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 6e27b759056a..591c18a889a5 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -95,6 +95,8 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_BTI] = "bti",
[KERNEL_HWCAP_MTE] = "mte",
[KERNEL_HWCAP_ECV] = "ecv",
+ [KERNEL_HWCAP_AFP] = "afp",
+ [KERNEL_HWCAP_RPRES] = "rpres",
};
#ifdef CONFIG_COMPAT
@@ -391,6 +393,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
+ info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1);
info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index f7408edf8571..ef7fcefb96bd 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -129,7 +129,7 @@ static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
local_daif_mask();
- flags = READ_ONCE(current_thread_info()->flags);
+ flags = read_thread_flags();
if (unlikely(flags & _TIF_WORK_MASK))
do_notify_resume(regs, flags);
}
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index b3e4f9a088b1..e535480a4069 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -77,11 +77,13 @@
.endm
SYM_CODE_START(ftrace_regs_caller)
+ bti c
ftrace_regs_entry 1
b ftrace_common
SYM_CODE_END(ftrace_regs_caller)
SYM_CODE_START(ftrace_caller)
+ bti c
ftrace_regs_entry 0
b ftrace_common
SYM_CODE_END(ftrace_caller)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 2f69ae43941d..772ec2ecf488 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -966,8 +966,10 @@ SYM_CODE_START(__sdei_asm_handler)
mov sp, x1
mov x1, x0 // address to complete_and_resume
- /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
- cmp x0, #1
+ /* x0 = (x0 <= SDEI_EV_FAILED) ?
+ * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME
+ */
+ cmp x0, #SDEI_EV_FAILED
mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
csel x0, x2, x3, ls
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index fa244c426f61..f2307d6631eb 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
+#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
@@ -406,12 +407,13 @@ static unsigned int find_supported_vector_length(enum vec_type type,
#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
-static int sve_proc_do_default_vl(struct ctl_table *table, int write,
+static int vec_proc_do_default_vl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct vl_info *info = &vl_info[ARM64_VEC_SVE];
+ struct vl_info *info = table->extra1;
+ enum vec_type type = info->type;
int ret;
- int vl = get_sve_default_vl();
+ int vl = get_default_vl(type);
struct ctl_table tmp_table = {
.data = &vl,
.maxlen = sizeof(vl),
@@ -428,7 +430,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
if (!sve_vl_valid(vl))
return -EINVAL;
- set_sve_default_vl(find_supported_vector_length(ARM64_VEC_SVE, vl));
+ set_default_vl(type, find_supported_vector_length(type, vl));
return 0;
}
@@ -436,7 +438,8 @@ static struct ctl_table sve_default_vl_table[] = {
{
.procname = "sve_default_vector_length",
.mode = 0644,
- .proc_handler = sve_proc_do_default_vl,
+ .proc_handler = vec_proc_do_default_vl,
+ .extra1 = &vl_info[ARM64_VEC_SVE],
},
{ }
};
@@ -629,7 +632,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
__fpsimd_to_sve(sst, fst, vq);
}
-int sve_set_vector_length(struct task_struct *task,
+int vec_set_vector_length(struct task_struct *task, enum vec_type type,
unsigned long vl, unsigned long flags)
{
if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
@@ -640,33 +643,35 @@ int sve_set_vector_length(struct task_struct *task,
return -EINVAL;
/*
- * Clamp to the maximum vector length that VL-agnostic SVE code can
- * work with. A flag may be assigned in the future to allow setting
- * of larger vector lengths without confusing older software.
+ * Clamp to the maximum vector length that VL-agnostic code
+ * can work with. A flag may be assigned in the future to
+ * allow setting of larger vector lengths without confusing
+ * older software.
*/
- if (vl > SVE_VL_ARCH_MAX)
- vl = SVE_VL_ARCH_MAX;
+ if (vl > VL_ARCH_MAX)
+ vl = VL_ARCH_MAX;
- vl = find_supported_vector_length(ARM64_VEC_SVE, vl);
+ vl = find_supported_vector_length(type, vl);
if (flags & (PR_SVE_VL_INHERIT |
PR_SVE_SET_VL_ONEXEC))
- task_set_sve_vl_onexec(task, vl);
+ task_set_vl_onexec(task, type, vl);
else
/* Reset VL to system default on next exec: */
- task_set_sve_vl_onexec(task, 0);
+ task_set_vl_onexec(task, type, 0);
/* Only actually set the VL if not deferred: */
if (flags & PR_SVE_SET_VL_ONEXEC)
goto out;
- if (vl == task_get_sve_vl(task))
+ if (vl == task_get_vl(task, type))
goto out;
/*
* To ensure the FPSIMD bits of the SVE vector registers are preserved,
* write any live register state back to task_struct, and convert to a
- * non-SVE thread.
+ * regular FPSIMD thread. Since the vector length can only be changed
+ * with a syscall we can't be in streaming mode while reconfiguring.
*/
if (task == current) {
get_cpu_fpsimd_context();
@@ -687,10 +692,10 @@ int sve_set_vector_length(struct task_struct *task,
*/
sve_free(task);
- task_set_sve_vl(task, vl);
+ task_set_vl(task, type, vl);
out:
- update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
+ update_tsk_thread_flag(task, vec_vl_inherit_flag(type),
flags & PR_SVE_VL_INHERIT);
return 0;
@@ -698,20 +703,21 @@ out:
/*
* Encode the current vector length and flags for return.
- * This is only required for prctl(): ptrace has separate fields
+ * This is only required for prctl(): ptrace has separate fields.
+ * SVE and SME use the same bits for _ONEXEC and _INHERIT.
*
- * flags are as for sve_set_vector_length().
+ * flags are as for vec_set_vector_length().
*/
-static int sve_prctl_status(unsigned long flags)
+static int vec_prctl_status(enum vec_type type, unsigned long flags)
{
int ret;
if (flags & PR_SVE_SET_VL_ONEXEC)
- ret = task_get_sve_vl_onexec(current);
+ ret = task_get_vl_onexec(current, type);
else
- ret = task_get_sve_vl(current);
+ ret = task_get_vl(current, type);
- if (test_thread_flag(TIF_SVE_VL_INHERIT))
+ if (test_thread_flag(vec_vl_inherit_flag(type)))
ret |= PR_SVE_VL_INHERIT;
return ret;
@@ -729,11 +735,11 @@ int sve_set_current_vl(unsigned long arg)
if (!system_supports_sve() || is_compat_task())
return -EINVAL;
- ret = sve_set_vector_length(current, vl, flags);
+ ret = vec_set_vector_length(current, ARM64_VEC_SVE, vl, flags);
if (ret)
return ret;
- return sve_prctl_status(flags);
+ return vec_prctl_status(ARM64_VEC_SVE, flags);
}
/* PR_SVE_GET_VL */
@@ -742,7 +748,7 @@ int sve_get_current_vl(void)
if (!system_supports_sve() || is_compat_task())
return -EINVAL;
- return sve_prctl_status(0);
+ return vec_prctl_status(ARM64_VEC_SVE, 0);
}
static void vec_probe_vqs(struct vl_info *info,
@@ -1107,7 +1113,7 @@ static void fpsimd_flush_thread_vl(enum vec_type type)
vl = get_default_vl(type);
if (WARN_ON(!sve_vl_valid(vl)))
- vl = SVE_VL_MIN;
+ vl = vl_info[type].min_vl;
supported_vl = find_supported_vector_length(type, vl);
if (WARN_ON(supported_vl != vl))
@@ -1213,7 +1219,8 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
/*
* Load the userland FPSIMD state of 'current' from memory, but only if the
* FPSIMD state already held in the registers is /not/ the most recent FPSIMD
- * state of 'current'
+ * state of 'current'. This is called when we are preparing to return to
+ * userspace to ensure that userspace sees a good register state.
*/
void fpsimd_restore_current_state(void)
{
@@ -1244,7 +1251,9 @@ void fpsimd_restore_current_state(void)
/*
* Load an updated userland FPSIMD state for 'current' from memory and set the
* flag that indicates that the FPSIMD register contents are the most recent
- * FPSIMD state of 'current'
+ * FPSIMD state of 'current'. This is used by the signal code to restore the
+ * register state when returning from a signal handler in FPSIMD only cases,
+ * any SVE context will be discarded.
*/
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index fc62dfe73f93..4506c4a90ac1 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command)
* on the way back to parent. For this purpose, this function is called
* in _mcount() or ftrace_caller() to replace return address (*parent) on
* the call stack to return_to_handler.
- *
- * Note that @frame_pointer is used only for sanity check later.
*/
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer)
@@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
*/
old = *parent;
- if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
+ if (!function_graph_enter(old, self_addr, frame_pointer,
+ (void *)frame_pointer)) {
*parent = return_hooker;
+ }
}
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 2758f75d6809..6328308be272 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -7,10 +7,6 @@
* Ubuntu project, hibernation support for mach-dove
* Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
* Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
- * https://lkml.org/lkml/2010/6/18/4
- * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
- * https://patchwork.kernel.org/patch/96442/
- *
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*/
#define pr_fmt(x) "hibernate: " x
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 1038494135c8..e16b248699d5 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -104,13 +104,15 @@ static void *kexec_page_alloc(void *arg)
{
struct kimage *kimage = (struct kimage *)arg;
struct page *page = kimage_alloc_control_pages(kimage, 0);
+ void *vaddr = NULL;
if (!page)
return NULL;
- memset(page_address(page), 0, PAGE_SIZE);
+ vaddr = page_address(page);
+ memset(vaddr, 0, PAGE_SIZE);
- return page_address(page);
+ return vaddr;
}
int machine_kexec_post_load(struct kimage *kimage)
@@ -147,7 +149,7 @@ int machine_kexec_post_load(struct kimage *kimage)
if (rc)
return rc;
kimage->arch.ttbr1 = __pa(trans_pgd);
- kimage->arch.zero_page = __pa(empty_zero_page);
+ kimage->arch.zero_page = __pa_symbol(empty_zero_page);
reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 63634b4d72c1..59c648d51848 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -149,6 +149,7 @@ int load_other_segments(struct kimage *image,
initrd_len, cmdline, 0);
if (!dtb) {
pr_err("Preparing for new dtb failed\n");
+ ret = -EINVAL;
goto out_err;
}
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 4a72c2727309..e9b7d99f4e3a 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -5,10 +5,10 @@
* Copyright (C) 2015 ARM Limited
*/
#include <linux/perf_event.h>
+#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <asm/pointer_auth.h>
-#include <asm/stacktrace.h>
struct frame_tail {
struct frame_tail __user *fp;
@@ -132,30 +132,21 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
}
}
-/*
- * Gets called by walk_stackframe() for every stackframe. This will be called
- * whist unwinding the stackframe and is like a subroutine return so we use
- * the PC.
- */
static bool callchain_trace(void *data, unsigned long pc)
{
struct perf_callchain_entry_ctx *entry = data;
- perf_callchain_store(entry, pc);
- return true;
+ return perf_callchain_store(entry, pc) == 0;
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- struct stackframe frame;
-
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
- start_backtrace(&frame, regs->regs[29], regs->pc);
- walk_stackframe(current, &frame, callchain_trace, entry);
+ arch_stack_walk(callchain_trace, entry, current, regs);
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index b4044469527e..cab678ed6618 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -285,15 +285,24 @@ static const struct attribute_group armv8_pmuv3_events_attr_group = {
PMU_FORMAT_ATTR(event, "config:0-15");
PMU_FORMAT_ATTR(long, "config1:0");
+PMU_FORMAT_ATTR(rdpmc, "config1:1");
+
+static int sysctl_perf_user_access __read_mostly;
static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
{
return event->attr.config1 & 0x1;
}
+static inline bool armv8pmu_event_want_user_access(struct perf_event *event)
+{
+ return event->attr.config1 & 0x2;
+}
+
static struct attribute *armv8_pmuv3_format_attrs[] = {
&format_attr_event.attr,
&format_attr_long.attr,
+ &format_attr_rdpmc.attr,
NULL,
};
@@ -362,7 +371,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
*/
#define ARMV8_IDX_CYCLE_COUNTER 0
#define ARMV8_IDX_COUNTER0 1
-
+#define ARMV8_IDX_CYCLE_COUNTER_USER 32
/*
* We unconditionally enable ARMv8.5-PMU long event counter support
@@ -374,18 +383,22 @@ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5);
}
+static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
+{
+ return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
+}
+
/*
* We must chain two programmable counters for 64 bit events,
* except when we have allocated the 64bit cycle counter (for CPU
- * cycles event). This must be called only when the event has
- * a counter allocated.
+ * cycles event) or when user space counter access is enabled.
*/
static inline bool armv8pmu_event_is_chained(struct perf_event *event)
{
int idx = event->hw.idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- return !WARN_ON(idx < 0) &&
+ return !armv8pmu_event_has_user_read(event) &&
armv8pmu_event_is_64bit(event) &&
!armv8pmu_has_long_event(cpu_pmu) &&
(idx != ARMV8_IDX_CYCLE_COUNTER);
@@ -718,6 +731,28 @@ static inline u32 armv8pmu_getreset_flags(void)
return value;
}
+static void armv8pmu_disable_user_access(void)
+{
+ write_sysreg(0, pmuserenr_el0);
+}
+
+static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
+{
+ int i;
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+
+ /* Clear any unused counters to avoid leaking their contents */
+ for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
+ if (i == ARMV8_IDX_CYCLE_COUNTER)
+ write_sysreg(0, pmccntr_el0);
+ else
+ armv8pmu_write_evcntr(i, 0);
+ }
+
+ write_sysreg(0, pmuserenr_el0);
+ write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0);
+}
+
static void armv8pmu_enable_event(struct perf_event *event)
{
/*
@@ -761,6 +796,14 @@ static void armv8pmu_disable_event(struct perf_event *event)
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
{
+ struct perf_event_context *task_ctx =
+ this_cpu_ptr(cpu_pmu->pmu.pmu_cpu_context)->task_ctx;
+
+ if (sysctl_perf_user_access && task_ctx && task_ctx->nr_user)
+ armv8pmu_enable_user_access(cpu_pmu);
+ else
+ armv8pmu_disable_user_access();
+
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
}
@@ -878,13 +921,16 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
return ARMV8_IDX_CYCLE_COUNTER;
+ else if (armv8pmu_event_is_64bit(event) &&
+ armv8pmu_event_want_user_access(event) &&
+ !armv8pmu_has_long_event(cpu_pmu))
+ return -EAGAIN;
}
/*
* Otherwise use events counters
*/
- if (armv8pmu_event_is_64bit(event) &&
- !armv8pmu_has_long_event(cpu_pmu))
+ if (armv8pmu_event_is_chained(event))
return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
else
return armv8pmu_get_single_idx(cpuc, cpu_pmu);
@@ -900,6 +946,22 @@ static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
clear_bit(idx - 1, cpuc->used_mask);
}
+static int armv8pmu_user_event_idx(struct perf_event *event)
+{
+ if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event))
+ return 0;
+
+ /*
+ * We remap the cycle counter index to 32 to
+ * match the offset applied to the rest of
+ * the counter indices.
+ */
+ if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER)
+ return ARMV8_IDX_CYCLE_COUNTER_USER;
+
+ return event->hw.idx;
+}
+
/*
* Add an event filter to a given event.
*/
@@ -996,6 +1058,25 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
if (armv8pmu_event_is_64bit(event))
event->hw.flags |= ARMPMU_EVT_64BIT;
+ /*
+ * User events must be allocated into a single counter, and so
+ * must not be chained.
+ *
+ * Most 64-bit events require long counter support, but 64-bit
+ * CPU_CYCLES events can be placed into the dedicated cycle
+ * counter when this is free.
+ */
+ if (armv8pmu_event_want_user_access(event)) {
+ if (!(event->attach_state & PERF_ATTACH_TASK))
+ return -EINVAL;
+ if (armv8pmu_event_is_64bit(event) &&
+ (hw_event_id != ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
+ !armv8pmu_has_long_event(armpmu))
+ return -EOPNOTSUPP;
+
+ event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
+ }
+
/* Only expose micro/arch events supported by this PMU */
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
&& test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
@@ -1104,6 +1185,43 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
return probe.present ? 0 : -ENODEV;
}
+static void armv8pmu_disable_user_access_ipi(void *unused)
+{
+ armv8pmu_disable_user_access();
+}
+
+static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret || !write || sysctl_perf_user_access)
+ return ret;
+
+ on_each_cpu(armv8pmu_disable_user_access_ipi, NULL, 1);
+ return 0;
+}
+
+static struct ctl_table armv8_pmu_sysctl_table[] = {
+ {
+ .procname = "perf_user_access",
+ .data = &sysctl_perf_user_access,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = armv8pmu_proc_user_access_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ { }
+};
+
+static void armv8_pmu_register_sysctl_table(void)
+{
+ static u32 tbl_registered = 0;
+
+ if (!cmpxchg_relaxed(&tbl_registered, 0, 1))
+ register_sysctl("kernel", armv8_pmu_sysctl_table);
+}
+
static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
int (*map_event)(struct perf_event *event),
const struct attribute_group *events,
@@ -1127,6 +1245,8 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->filter_match = armv8pmu_filter_match;
+ cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
+
cpu_pmu->name = name;
cpu_pmu->map_event = map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
@@ -1136,6 +1256,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
caps : &armv8_pmuv3_caps_attr_group;
+ armv8_pmu_register_sysctl_table();
return 0;
}
@@ -1145,17 +1266,32 @@ static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
}
-static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_pmuv3",
- armv8_pmuv3_map_event);
+#define PMUV3_INIT_SIMPLE(name) \
+static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
+{ \
+ return armv8_pmu_init_nogroups(cpu_pmu, #name, armv8_pmuv3_map_event);\
}
-static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a34",
- armv8_pmuv3_map_event);
-}
+PMUV3_INIT_SIMPLE(armv8_pmuv3)
+
+PMUV3_INIT_SIMPLE(armv8_cortex_a34)
+PMUV3_INIT_SIMPLE(armv8_cortex_a55)
+PMUV3_INIT_SIMPLE(armv8_cortex_a65)
+PMUV3_INIT_SIMPLE(armv8_cortex_a75)
+PMUV3_INIT_SIMPLE(armv8_cortex_a76)
+PMUV3_INIT_SIMPLE(armv8_cortex_a77)
+PMUV3_INIT_SIMPLE(armv8_cortex_a78)
+PMUV3_INIT_SIMPLE(armv9_cortex_a510)
+PMUV3_INIT_SIMPLE(armv9_cortex_a710)
+PMUV3_INIT_SIMPLE(armv8_cortex_x1)
+PMUV3_INIT_SIMPLE(armv9_cortex_x2)
+PMUV3_INIT_SIMPLE(armv8_neoverse_e1)
+PMUV3_INIT_SIMPLE(armv8_neoverse_n1)
+PMUV3_INIT_SIMPLE(armv9_neoverse_n2)
+PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
+
+PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
+PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
{
@@ -1169,24 +1305,12 @@ static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
armv8_a53_map_event);
}
-static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a55",
- armv8_pmuv3_map_event);
-}
-
static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
armv8_a57_map_event);
}
-static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a65",
- armv8_pmuv3_map_event);
-}
-
static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
@@ -1199,42 +1323,6 @@ static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
armv8_a73_map_event);
}
-static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a75",
- armv8_pmuv3_map_event);
-}
-
-static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a76",
- armv8_pmuv3_map_event);
-}
-
-static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a77",
- armv8_pmuv3_map_event);
-}
-
-static int armv8_a78_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a78",
- armv8_pmuv3_map_event);
-}
-
-static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1",
- armv8_pmuv3_map_event);
-}
-
-static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_n1",
- armv8_pmuv3_map_event);
-}
-
static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
@@ -1248,23 +1336,31 @@ static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
}
static const struct of_device_id armv8_pmu_of_device_ids[] = {
- {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
- {.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init},
+ {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init},
+ {.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init},
{.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
{.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
- {.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init},
+ {.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init},
{.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
- {.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init},
+ {.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init},
{.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
{.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
- {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init},
- {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init},
- {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init},
- {.compatible = "arm,cortex-a78-pmu", .data = armv8_a78_pmu_init},
- {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init},
- {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init},
+ {.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init},
+ {.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init},
+ {.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init},
+ {.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init},
+ {.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init},
+ {.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init},
+ {.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init},
+ {.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init},
+ {.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init},
+ {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
+ {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
+ {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
+ {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
+ {.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init},
{},
};
@@ -1287,7 +1383,7 @@ static int __init armv8_pmu_driver_init(void)
if (acpi_disabled)
return platform_driver_register(&armv8_pmu_driver);
else
- return arm_pmu_acpi_probe(armv8_pmuv3_init);
+ return arm_pmu_acpi_probe(armv8_pmuv3_pmu_init);
}
device_initcall(armv8_pmu_driver_init)
@@ -1301,6 +1397,14 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
userpg->cap_user_time_short = 0;
+ userpg->cap_user_rdpmc = armv8pmu_event_has_user_read(event);
+
+ if (userpg->cap_user_rdpmc) {
+ if (event->hw.flags & ARMPMU_EVT_64BIT)
+ userpg->pmc_width = 64;
+ else
+ userpg->pmc_width = 32;
+ }
do {
rd = sched_clock_read_begin(&seq);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index aacf2f5559a8..5369e649fa79 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -40,6 +40,7 @@
#include <linux/percpu.h>
#include <linux/thread_info.h>
#include <linux/prctl.h>
+#include <linux/stacktrace.h>
#include <asm/alternative.h>
#include <asm/compat.h>
@@ -439,34 +440,26 @@ static void entry_task_switch(struct task_struct *next)
/*
* ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
- * Assuming the virtual counter is enabled at the beginning of times:
- *
- * - disable access when switching from a 64bit task to a 32bit task
- * - enable access when switching from a 32bit task to a 64bit task
+ * Ensure access is disabled when switching to a 32bit task, ensure
+ * access is enabled when switching to a 64bit task.
*/
-static void erratum_1418040_thread_switch(struct task_struct *prev,
- struct task_struct *next)
+static void erratum_1418040_thread_switch(struct task_struct *next)
{
- bool prev32, next32;
- u64 val;
-
- if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
- return;
-
- prev32 = is_compat_thread(task_thread_info(prev));
- next32 = is_compat_thread(task_thread_info(next));
-
- if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
+ if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
+ !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
return;
- val = read_sysreg(cntkctl_el1);
-
- if (!next32)
- val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+ if (is_compat_thread(task_thread_info(next)))
+ sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
else
- val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
+ sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
+}
- write_sysreg(val, cntkctl_el1);
+static void erratum_1418040_new_exec(void)
+{
+ preempt_disable();
+ erratum_1418040_thread_switch(current);
+ preempt_enable();
}
/*
@@ -490,7 +483,8 @@ void update_sctlr_el1(u64 sctlr)
/*
* Thread switching.
*/
-__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
+__notrace_funcgraph __sched
+struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
@@ -501,7 +495,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
contextidr_thread_switch(next);
entry_task_switch(next);
ssbs_thread_switch(next);
- erratum_1418040_thread_switch(prev, next);
+ erratum_1418040_thread_switch(next);
ptrauth_thread_switch_user(next);
/*
@@ -528,30 +522,37 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
return last;
}
+struct wchan_info {
+ unsigned long pc;
+ int count;
+};
+
+static bool get_wchan_cb(void *arg, unsigned long pc)
+{
+ struct wchan_info *wchan_info = arg;
+
+ if (!in_sched_functions(pc)) {
+ wchan_info->pc = pc;
+ return false;
+ }
+ return wchan_info->count++ < 16;
+}
+
unsigned long __get_wchan(struct task_struct *p)
{
- struct stackframe frame;
- unsigned long stack_page, ret = 0;
- int count = 0;
+ struct wchan_info wchan_info = {
+ .pc = 0,
+ .count = 0,
+ };
- stack_page = (unsigned long)try_get_task_stack(p);
- if (!stack_page)
+ if (!try_get_task_stack(p))
return 0;
- start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
-
- do {
- if (unwind_frame(p, &frame))
- goto out;
- if (!in_sched_functions(frame.pc)) {
- ret = frame.pc;
- goto out;
- }
- } while (count++ < 16);
+ arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL);
-out:
put_task_stack(p);
- return ret;
+
+ return wchan_info.pc;
}
unsigned long arch_align_stack(unsigned long sp)
@@ -611,6 +612,7 @@ void arch_setup_new_exec(void)
current->mm->context.flags = mmflags;
ptrauth_thread_init_user();
mte_thread_init_user();
+ erratum_1418040_new_exec();
if (task_spec_ssb_noexec(current)) {
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 88a9034fb9b5..39dbdfdc38d3 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -812,9 +812,9 @@ static int sve_set(struct task_struct *target,
/*
* Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
- * sve_set_vector_length(), which will also validate them for us:
+ * vec_set_vector_length(), which will also validate them for us:
*/
- ret = sve_set_vector_length(target, header.vl,
+ ret = vec_set_vector_length(target, ARM64_VEC_SVE, header.vl,
((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
if (ret)
goto out;
@@ -1839,7 +1839,7 @@ static void tracehook_report_syscall(struct pt_regs *regs,
int syscall_trace_enter(struct pt_regs *regs)
{
- unsigned long flags = READ_ONCE(current_thread_info()->flags);
+ unsigned long flags = read_thread_flags();
if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
@@ -1862,7 +1862,7 @@ int syscall_trace_enter(struct pt_regs *regs)
void syscall_trace_exit(struct pt_regs *regs)
{
- unsigned long flags = READ_ONCE(current_thread_info()->flags);
+ unsigned long flags = read_thread_flags();
audit_syscall_exit(regs);
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index a6d18755652f..68330017d04f 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -9,9 +9,9 @@
#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/kprobes.h>
+#include <linux/stacktrace.h>
#include <asm/stack_pointer.h>
-#include <asm/stacktrace.h>
struct return_address_data {
unsigned int level;
@@ -35,15 +35,11 @@ NOKPROBE_SYMBOL(save_return_addr);
void *return_address(unsigned int level)
{
struct return_address_data data;
- struct stackframe frame;
data.level = level + 2;
data.addr = NULL;
- start_backtrace(&frame,
- (unsigned long)__builtin_frame_address(0),
- (unsigned long)return_address);
- walk_stackframe(current, &frame, save_return_addr, &data);
+ arch_stack_walk(save_return_addr, &data, current, NULL);
if (!data.level)
return data.addr;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index be5f85b0a24d..f70573928f1b 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -189,11 +189,16 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
if (!dt_virt || !early_init_dt_scan(dt_virt)) {
pr_crit("\n"
- "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
+ "Error: invalid device tree blob at physical address %pa (virtual address 0x%px)\n"
"The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
"\nPlease check your bootloader.",
&dt_phys, dt_virt);
+ /*
+ * Note that in this _really_ early stage we cannot even BUG()
+ * or oops, so the least terrible thing to do is cpu_relax(),
+ * or else we could end-up printing non-initialized data, etc.
+ */
while (true)
cpu_relax();
}
@@ -232,12 +237,14 @@ static void __init request_standard_resources(void)
if (memblock_is_nomap(region)) {
res->name = "reserved";
res->flags = IORESOURCE_MEM;
+ res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
} else {
res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
}
- res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
- res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
request_resource(&iomem_resource, res);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 8f6372b44b65..d8aaf4b6f432 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -948,7 +948,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
}
local_daif_mask();
- thread_flags = READ_ONCE(current_thread_info()->flags);
+ thread_flags = read_thread_flags();
} while (thread_flags & _TIF_WORK_MASK);
}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c30624fff6ac..0fb58fed54cb 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -33,14 +33,11 @@
*/
-void start_backtrace(struct stackframe *frame, unsigned long fp,
- unsigned long pc)
+static void start_backtrace(struct stackframe *frame, unsigned long fp,
+ unsigned long pc)
{
frame->fp = fp;
frame->pc = pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame->graph = 0;
-#endif
#ifdef CONFIG_KRETPROBES
frame->kr_cur = NULL;
#endif
@@ -66,7 +63,8 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
* records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B.
*/
-int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+static int notrace unwind_frame(struct task_struct *tsk,
+ struct stackframe *frame)
{
unsigned long fp = frame->fp;
struct stack_info info;
@@ -116,20 +114,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->prev_fp = fp;
frame->prev_type = info.type;
+ frame->pc = ptrauth_strip_insn_pac(frame->pc);
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
- (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
- struct ftrace_ret_stack *ret_stack;
+ (frame->pc == (unsigned long)return_to_handler)) {
+ unsigned long orig_pc;
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
* to hook a function return.
* So replace it to an original value.
*/
- ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
- if (WARN_ON_ONCE(!ret_stack))
+ orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
+ (void *)frame->fp);
+ if (WARN_ON_ONCE(frame->pc == orig_pc))
return -EINVAL;
- frame->pc = ret_stack->ret;
+ frame->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
@@ -137,14 +138,13 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
#endif
- frame->pc = ptrauth_strip_insn_pac(frame->pc);
-
return 0;
}
NOKPROBE_SYMBOL(unwind_frame);
-void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
- bool (*fn)(void *, unsigned long), void *data)
+static void notrace walk_stackframe(struct task_struct *tsk,
+ struct stackframe *frame,
+ bool (*fn)(void *, unsigned long), void *data)
{
while (1) {
int ret;
@@ -158,24 +158,20 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
}
NOKPROBE_SYMBOL(walk_stackframe);
-static void dump_backtrace_entry(unsigned long where, const char *loglvl)
+static bool dump_backtrace_entry(void *arg, unsigned long where)
{
+ char *loglvl = arg;
printk("%s %pSb\n", loglvl, (void *)where);
+ return true;
}
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl)
{
- struct stackframe frame;
- int skip = 0;
-
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
- if (regs) {
- if (user_mode(regs))
- return;
- skip = 1;
- }
+ if (regs && user_mode(regs))
+ return;
if (!tsk)
tsk = current;
@@ -183,36 +179,8 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
if (!try_get_task_stack(tsk))
return;
- if (tsk == current) {
- start_backtrace(&frame,
- (unsigned long)__builtin_frame_address(0),
- (unsigned long)dump_backtrace);
- } else {
- /*
- * task blocked in __switch_to
- */
- start_backtrace(&frame,
- thread_saved_fp(tsk),
- thread_saved_pc(tsk));
- }
-
printk("%sCall trace:\n", loglvl);
- do {
- /* skip until specified stack frame */
- if (!skip) {
- dump_backtrace_entry(frame.pc, loglvl);
- } else if (frame.fp == regs->regs[29]) {
- skip = 0;
- /*
- * Mostly, this is the case where this function is
- * called in panic/abort. As exception handler's
- * stack frame does not contain the corresponding pc
- * at which an exception has taken place, use regs->pc
- * instead.
- */
- dump_backtrace_entry(regs->pc, loglvl);
- }
- } while (!unwind_frame(tsk, &frame));
+ arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
put_task_stack(tsk);
}
@@ -223,8 +191,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
barrier();
}
-#ifdef CONFIG_STACKTRACE
-
noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
@@ -243,5 +209,3 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
walk_stackframe(task, &frame, consume_entry, cookie);
}
-
-#endif
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 50a0f1a38e84..c938603b3ba0 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -81,7 +81,7 @@ void syscall_trace_exit(struct pt_regs *regs);
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
- unsigned long flags = current_thread_info()->flags;
+ unsigned long flags = read_thread_flags();
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;
@@ -148,7 +148,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
*/
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
local_daif_mask();
- flags = current_thread_info()->flags;
+ flags = read_thread_flags();
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
return;
local_daif_restore(DAIF_PROCCTX);
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index eebbc8d7123e..b5855eb7435d 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -18,6 +18,7 @@
#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/profile.h>
+#include <linux/stacktrace.h>
#include <linux/syscore_ops.h>
#include <linux/timer.h>
#include <linux/irq.h>
@@ -29,25 +30,25 @@
#include <clocksource/arm_arch_timer.h>
#include <asm/thread_info.h>
-#include <asm/stacktrace.h>
#include <asm/paravirt.h>
-unsigned long profile_pc(struct pt_regs *regs)
+static bool profile_pc_cb(void *arg, unsigned long pc)
{
- struct stackframe frame;
+ unsigned long *prof_pc = arg;
- if (!in_lock_functions(regs->pc))
- return regs->pc;
+ if (in_lock_functions(pc))
+ return true;
+ *prof_pc = pc;
+ return false;
+}
- start_backtrace(&frame, regs->regs[29], regs->pc);
+unsigned long profile_pc(struct pt_regs *regs)
+{
+ unsigned long prof_pc = 0;
- do {
- int ret = unwind_frame(NULL, &frame);
- if (ret < 0)
- return 0;
- } while (in_lock_functions(frame.pc));
+ arch_stack_walk(profile_pc_cb, &prof_pc, current, regs);
- return frame.pc;
+ return prof_pc;
}
EXPORT_SYMBOL(profile_pc);
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 7b21213a570f..e8986e6067a9 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -994,7 +994,7 @@ static struct break_hook bug_break_hook = {
static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
{
pr_err("%s generated an invalid instruction at %pS!\n",
- in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
+ "Kernel text patching",
(void *)instruction_pointer(regs));
/* We cannot handle this */
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 700767dfd221..60813497a381 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -32,6 +32,7 @@ ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \
$(CC_FLAGS_LTO)
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n