aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/.gitignore1
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c4
-rw-r--r--arch/arm64/kernel/asm-offsets.c16
-rw-r--r--arch/arm64/kernel/cpu-reset.S2
-rw-r--r--arch/arm64/kernel/cpu_errata.c18
-rw-r--r--arch/arm64/kernel/cpu_ops.c11
-rw-r--r--arch/arm64/kernel/cpufeature.c165
-rw-r--r--arch/arm64/kernel/cpuidle.c9
-rw-r--r--arch/arm64/kernel/efi-entry.S90
-rw-r--r--arch/arm64/kernel/efi-header.S6
-rw-r--r--arch/arm64/kernel/entry-common.c2
-rw-r--r--arch/arm64/kernel/entry-ftrace.S48
-rw-r--r--arch/arm64/kernel/entry.S121
-rw-r--r--arch/arm64/kernel/head.S86
-rw-r--r--arch/arm64/kernel/hibernate-asm.S2
-rw-r--r--arch/arm64/kernel/hibernate.c13
-rw-r--r--arch/arm64/kernel/hyp-stub.S2
-rw-r--r--arch/arm64/kernel/image-vars.h7
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c2
-rw-r--r--arch/arm64/kernel/perf_event.c338
-rw-r--r--arch/arm64/kernel/pointer_auth.c7
-rw-r--r--arch/arm64/kernel/process.c9
-rw-r--r--arch/arm64/kernel/ptrace.c16
-rw-r--r--arch/arm64/kernel/relocate_kernel.S4
-rw-r--r--arch/arm64/kernel/setup.c8
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/smp.c159
-rw-r--r--arch/arm64/kernel/stacktrace.c5
-rw-r--r--arch/arm64/kernel/topology.c180
-rw-r--r--arch/arm64/kernel/vdso.c13
-rw-r--r--arch/arm64/kernel/vdso/.gitignore1
-rw-r--r--arch/arm64/kernel/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso/sigreturn.S4
-rw-r--r--arch/arm64/kernel/vdso/vgettimeofday.c2
-rw-r--r--arch/arm64/kernel/vdso32/.gitignore1
-rw-r--r--arch/arm64/kernel/vdso32/Makefile11
-rw-r--r--arch/arm64/kernel/vdso32/sigreturn.S23
-rw-r--r--arch/arm64/kernel/vdso32/vgettimeofday.c14
39 files changed, 859 insertions, 547 deletions
diff --git a/arch/arm64/kernel/.gitignore b/arch/arm64/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/arm64/kernel/.gitignore
+++ b/arch/arm64/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index fc6488660f64..4e5b8ee31442 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -21,7 +21,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
syscall.o
-extra-$(CONFIG_EFI) := efi-entry.o
+targets += efi-entry.o
OBJCOPYFLAGS := --prefix-symbols=__efistub_
$(obj)/%.stub.o: $(obj)/%.o FORCE
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 7832b3216370..c19aa81ddc8c 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -601,7 +601,7 @@ static struct undef_hook setend_hooks[] = {
},
{
/* Thumb mode */
- .instr_mask = 0x0000fff7,
+ .instr_mask = 0xfffffff7,
.instr_val = 0x0000b650,
.pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
.pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),
@@ -630,7 +630,7 @@ static int __init armv8_deprecated_init(void)
register_insn_emulation(&cp15_barrier_ops);
if (IS_ENABLED(CONFIG_SETEND_EMULATION)) {
- if(system_supports_mixed_endian_el0())
+ if (system_supports_mixed_endian_el0())
register_insn_emulation(&setend_ops);
else
pr_info("setend instruction emulation is not supported on this system\n");
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index a5bdce8af65b..9981a0a5a87f 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -40,6 +40,10 @@ int main(void)
#endif
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
+#ifdef CONFIG_ARM64_PTR_AUTH
+ DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
+ DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
+#endif
BLANK();
DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
DEFINE(S_X2, offsetof(struct pt_regs, regs[2]));
@@ -88,6 +92,9 @@ int main(void)
BLANK();
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
+#ifdef CONFIG_ARM64_PTR_AUTH
+ DEFINE(CPU_BOOT_PTRAUTH_KEY, offsetof(struct secondary_data, ptrauth_key));
+#endif
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
@@ -128,5 +135,14 @@ int main(void)
DEFINE(SDEI_EVENT_INTREGS, offsetof(struct sdei_registered_event, interrupted_regs));
DEFINE(SDEI_EVENT_PRIORITY, offsetof(struct sdei_registered_event, priority));
#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+ DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia));
+ DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib));
+ DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda));
+ DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb));
+ DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga));
+ DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia));
+ BLANK();
+#endif
return 0;
}
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 32c7bf858dd9..38087b4c0432 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -32,7 +32,7 @@
ENTRY(__cpu_soft_restart)
/* Clear sctlr_el1 flags. */
mrs x12, sctlr_el1
- ldr x13, =SCTLR_ELx_FLAGS
+ mov_q x13, SCTLR_ELx_FLAGS
bic x12, x12, x13
pre_disable_mmu_workaround
msr sctlr_el1, x12
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 703ad0a84f99..df56d2295d16 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -11,6 +11,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/kvm_asm.h>
#include <asm/smp_plat.h>
static bool __maybe_unused
@@ -113,13 +114,10 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#ifdef CONFIG_KVM_INDIRECT_VECTORS
-extern char __smccc_workaround_1_smc_start[];
-extern char __smccc_workaround_1_smc_end[];
-
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
- void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+ void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
int i;
for (i = 0; i < SZ_2K; i += 0x80)
@@ -163,9 +161,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
raw_spin_unlock(&bp_lock);
}
#else
-#define __smccc_workaround_1_smc_start NULL
-#define __smccc_workaround_1_smc_end NULL
-
static void install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
@@ -176,7 +171,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
#include <linux/arm-smccc.h>
-static void call_smc_arch_workaround_1(void)
+static void __maybe_unused call_smc_arch_workaround_1(void)
{
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
@@ -239,11 +234,14 @@ static int detect_harden_bp_fw(void)
smccc_end = NULL;
break;
+#if IS_ENABLED(CONFIG_KVM_ARM_HOST)
case SMCCC_CONDUIT_SMC:
cb = call_smc_arch_workaround_1;
- smccc_start = __smccc_workaround_1_smc_start;
- smccc_end = __smccc_workaround_1_smc_end;
+ smccc_start = __smccc_workaround_1_smc;
+ smccc_end = __smccc_workaround_1_smc +
+ __SMCCC_WORKAROUND_1_SMC_SZ;
break;
+#endif
default:
return -1;
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index 7e07072757af..e133011f64b5 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -15,10 +15,12 @@
#include <asm/smp_plat.h>
extern const struct cpu_operations smp_spin_table_ops;
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
extern const struct cpu_operations acpi_parking_protocol_ops;
+#endif
extern const struct cpu_operations cpu_psci_ops;
-const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
+static const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = {
&smp_spin_table_ops,
@@ -94,7 +96,7 @@ static const char *__init cpu_read_enable_method(int cpu)
/*
* Read a cpu's enable method and record it in cpu_ops.
*/
-int __init cpu_read_ops(int cpu)
+int __init init_cpu_ops(int cpu)
{
const char *enable_method = cpu_read_enable_method(cpu);
@@ -109,3 +111,8 @@ int __init cpu_read_ops(int cpu)
return 0;
}
+
+const struct cpu_operations *get_cpu_ops(int cpu)
+{
+ return cpu_ops[cpu];
+}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0b6715625cf6..9fac745aa7bb 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -116,6 +116,8 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
+static bool __system_matches_cap(unsigned int n);
+
/*
* NOTE: Any changes to the visibility of features should be kept in
* sync with the documentation of the CPU feature register ABI.
@@ -163,6 +165,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
@@ -551,7 +554,7 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
BUG_ON(!reg);
- for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
+ for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
u64 ftr_mask = arm64_ftr_mask(ftrp);
s64 ftr_new = arm64_ftr_value(ftrp, new);
@@ -1222,6 +1225,57 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
#endif
+#ifdef CONFIG_ARM64_AMU_EXTN
+
+/*
+ * The "amu_cpus" cpumask only signals that the CPU implementation for the
+ * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide
+ * information regarding all the events that it supports. When a CPU bit is
+ * set in the cpumask, the user of this feature can only rely on the presence
+ * of the 4 fixed counters for that CPU. But this does not guarantee that the
+ * counters are enabled or access to these counters is enabled by code
+ * executed at higher exception levels (firmware).
+ */
+static struct cpumask amu_cpus __read_mostly;
+
+bool cpu_has_amu_feat(int cpu)
+{
+ return cpumask_test_cpu(cpu, &amu_cpus);
+}
+
+/* Initialize the use of AMU counters for frequency invariance */
+extern void init_cpu_freq_invariance_counters(void);
+
+static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
+{
+ if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {
+ pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
+ smp_processor_id());
+ cpumask_set_cpu(smp_processor_id(), &amu_cpus);
+ init_cpu_freq_invariance_counters();
+ }
+}
+
+static bool has_amu(const struct arm64_cpu_capabilities *cap,
+ int __unused)
+{
+ /*
+ * The AMU extension is a non-conflicting feature: the kernel can
+ * safely run a mix of CPUs with and without support for the
+ * activity monitors extension. Therefore, unconditionally enable
+ * the capability to allow any late CPU to use the feature.
+ *
+ * With this feature unconditionally enabled, the cpu_enable
+ * function will be called for all CPUs that match the criteria,
+ * including secondary and hotplugged, marking this feature as
+ * present on that respective CPU. The enable function will also
+ * print a detection message.
+ */
+
+ return true;
+}
+#endif
+
#ifdef CONFIG_ARM64_VHE
static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
{
@@ -1316,10 +1370,18 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
#endif /* CONFIG_ARM64_RAS_EXTN */
#ifdef CONFIG_ARM64_PTR_AUTH
-static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
+static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
+ int __unused)
{
- sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
- SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
+ return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
+ __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
+}
+
+static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
+ __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
}
#endif /* CONFIG_ARM64_PTR_AUTH */
@@ -1347,6 +1409,25 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
}
#endif
+/* Internal helper functions to match cpu capability type */
+static bool
+cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
+}
+
+static bool
+cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
+}
+
+static bool
+cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -1499,6 +1580,24 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_clear_disr,
},
#endif /* CONFIG_ARM64_RAS_EXTN */
+#ifdef CONFIG_ARM64_AMU_EXTN
+ {
+ /*
+ * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y.
+ * Therefore, don't provide .desc as we don't want the detection
+ * message to be shown until at least one CPU is detected to
+ * support the feature.
+ */
+ .capability = ARM64_HAS_AMU_EXTN,
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ .matches = has_amu,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_AMU_SHIFT,
+ .min_field_value = ID_AA64PFR0_AMU,
+ .cpu_enable = cpu_amu_enable,
+ },
+#endif /* CONFIG_ARM64_AMU_EXTN */
{
.desc = "Data cache clean to the PoU not required for I/D coherence",
.capability = ARM64_HAS_CACHE_IDC,
@@ -1592,24 +1691,27 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "Address authentication (architected algorithm)",
.capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.sys_reg = SYS_ID_AA64ISAR1_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64ISAR1_APA_SHIFT,
.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
.matches = has_cpuid_feature,
- .cpu_enable = cpu_enable_address_auth,
},
{
.desc = "Address authentication (IMP DEF algorithm)",
.capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.sys_reg = SYS_ID_AA64ISAR1_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64ISAR1_API_SHIFT,
.min_field_value = ID_AA64ISAR1_API_IMP_DEF,
.matches = has_cpuid_feature,
- .cpu_enable = cpu_enable_address_auth,
+ },
+ {
+ .capability = ARM64_HAS_ADDRESS_AUTH,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+ .matches = has_address_auth,
},
{
.desc = "Generic authentication (architected algorithm)",
@@ -1631,6 +1733,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
.matches = has_cpuid_feature,
},
+ {
+ .capability = ARM64_HAS_GENERIC_AUTH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_generic_auth,
+ },
#endif /* CONFIG_ARM64_PTR_AUTH */
#ifdef CONFIG_ARM64_PSEUDO_NMI
{
@@ -1980,10 +2087,8 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
* Run through the list of capabilities to check for conflicts.
* If the system has already detected a capability, take necessary
* action on this CPU.
- *
- * Returns "false" on conflicts.
*/
-static bool verify_local_cpu_caps(u16 scope_mask)
+static void verify_local_cpu_caps(u16 scope_mask)
{
int i;
bool cpu_has_cap, system_has_cap;
@@ -2028,10 +2133,12 @@ static bool verify_local_cpu_caps(u16 scope_mask)
pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
smp_processor_id(), caps->capability,
caps->desc, system_has_cap, cpu_has_cap);
- return false;
- }
- return true;
+ if (cpucap_panic_on_conflict(caps))
+ cpu_panic_kernel();
+ else
+ cpu_die_early();
+ }
}
/*
@@ -2041,12 +2148,8 @@ static bool verify_local_cpu_caps(u16 scope_mask)
static void check_early_cpu_features(void)
{
verify_cpu_asid_bits();
- /*
- * Early features are used by the kernel already. If there
- * is a conflict, we cannot proceed further.
- */
- if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
- cpu_panic_kernel();
+
+ verify_local_cpu_caps(SCOPE_BOOT_CPU);
}
static void
@@ -2094,8 +2197,7 @@ static void verify_local_cpu_capabilities(void)
* check_early_cpu_features(), as they need to be verified
* on all secondary CPUs.
*/
- if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
- cpu_die_early();
+ verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
verify_local_elf_hwcaps(arm64_elf_hwcaps);
@@ -2146,6 +2248,23 @@ bool this_cpu_has_cap(unsigned int n)
return false;
}
+/*
+ * This helper function is used in a narrow window when,
+ * - The system wide safe registers are set with all the SMP CPUs and,
+ * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
+ * In all other cases cpus_have_{const_}cap() should be used.
+ */
+static bool __system_matches_cap(unsigned int n)
+{
+ if (n < ARM64_NCAPS) {
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
+
+ if (cap)
+ return cap->matches(cap, SCOPE_SYSTEM);
+ }
+ return false;
+}
+
void cpu_set_feature(unsigned int num)
{
WARN_ON(num >= MAX_CPU_FEATURES);
@@ -2218,7 +2337,7 @@ void __init setup_cpu_features(void)
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
{
- return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
+ return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
}
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index e4d6af2fdec7..b512b5503f6e 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -18,11 +18,11 @@
int arm_cpuidle_init(unsigned int cpu)
{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
int ret = -EOPNOTSUPP;
- if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_suspend &&
- cpu_ops[cpu]->cpu_init_idle)
- ret = cpu_ops[cpu]->cpu_init_idle(cpu);
+ if (ops && ops->cpu_suspend && ops->cpu_init_idle)
+ ret = ops->cpu_init_idle(cpu);
return ret;
}
@@ -37,8 +37,9 @@ int arm_cpuidle_init(unsigned int cpu)
int arm_cpuidle_suspend(int index)
{
int cpu = smp_processor_id();
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
- return cpu_ops[cpu]->cpu_suspend(index);
+ return ops->cpu_suspend(index);
}
#ifdef CONFIG_ACPI
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 304d5b02ca67..1a03618df0df 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -10,81 +10,35 @@
#include <asm/assembler.h>
-#define EFI_LOAD_ERROR 0x8000000000000001
-
__INIT
- /*
- * We arrive here from the EFI boot manager with:
- *
- * * CPU in little-endian mode
- * * MMU on with identity-mapped RAM
- * * Icache and Dcache on
- *
- * We will most likely be running from some place other than where
- * we want to be. The kernel image wants to be placed at TEXT_OFFSET
- * from start of RAM.
- */
-ENTRY(entry)
- /*
- * Create a stack frame to save FP/LR with extra space
- * for image_addr variable passed to efi_entry().
- */
- stp x29, x30, [sp, #-32]!
- mov x29, sp
-
- /*
- * Call efi_entry to do the real work.
- * x0 and x1 are already set up by firmware. Current runtime
- * address of image is calculated and passed via *image_addr.
- *
- * unsigned long efi_entry(void *handle,
- * efi_system_table_t *sys_table,
- * unsigned long *image_addr) ;
- */
- adr_l x8, _text
- add x2, sp, 16
- str x8, [x2]
- bl efi_entry
- cmn x0, #1
- b.eq efi_load_fail
-
+SYM_CODE_START(efi_enter_kernel)
/*
* efi_entry() will have copied the kernel image if necessary and we
- * return here with device tree address in x0 and the kernel entry
- * point stored at *image_addr. Save those values in registers which
- * are callee preserved.
- */
- mov x20, x0 // DTB address
- ldr x0, [sp, #16] // relocated _text address
- ldr w21, =stext_offset
- add x21, x0, x21
-
- /*
- * Calculate size of the kernel Image (same for original and copy).
+ * end up here with device tree address in x1 and the kernel entry
+ * point stored in x0. Save those values in registers which are
+ * callee preserved.
*/
- adr_l x1, _text
- adr_l x2, _edata
- sub x1, x2, x1
+ ldr w2, =stext_offset
+ add x19, x0, x2 // relocated Image entrypoint
+ mov x20, x1 // DTB address
/*
- * Flush the copied Image to the PoC, and ensure it is not shadowed by
+ * Clean the copied Image to the PoC, and ensure it is not shadowed by
* stale icache entries from before relocation.
*/
- bl __flush_dcache_area
+ ldr w1, =kernel_size
+ bl __clean_dcache_area_poc
ic ialluis
/*
- * Ensure that the rest of this function (in the original Image) is
- * visible when the caches are disabled. The I-cache can't have stale
- * entries for the VA range of the current image, so no maintenance is
- * necessary.
+ * Clean the remainder of this routine to the PoC
+ * so that we can safely disable the MMU and caches.
*/
- adr x0, entry
- adr x1, entry_end
- sub x1, x1, x0
- bl __flush_dcache_area
-
+ adr x0, 0f
+ ldr w1, 3f
+ bl __clean_dcache_area_poc
+0:
/* Turn off Dcache and MMU */
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
@@ -109,12 +63,6 @@ ENTRY(entry)
mov x1, xzr
mov x2, xzr
mov x3, xzr
- br x21
-
-efi_load_fail:
- mov x0, #EFI_LOAD_ERROR
- ldp x29, x30, [sp], #32
- ret
-
-entry_end:
-ENDPROC(entry)
+ br x19
+SYM_CODE_END(efi_enter_kernel)
+3: .long . - 0b
diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
index a7cfacce3e15..914999ccaf8a 100644
--- a/arch/arm64/kernel/efi-header.S
+++ b/arch/arm64/kernel/efi-header.S
@@ -27,7 +27,7 @@ optional_header:
.long __initdata_begin - efi_header_end // SizeOfCode
.long __pecoff_data_size // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
- .long __efistub_entry - _head // AddressOfEntryPoint
+ .long __efistub_efi_entry - _head // AddressOfEntryPoint
.long efi_header_end - _head // BaseOfCode
extra_header_fields:
@@ -36,8 +36,8 @@ extra_header_fields:
.long PECOFF_FILE_ALIGNMENT // FileAlignment
.short 0 // MajorOperatingSystemVersion
.short 0 // MinorOperatingSystemVersion
- .short 0 // MajorImageVersion
- .short 0 // MinorImageVersion
+ .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion
+ .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion
.short 0 // MajorSubsystemVersion
.short 0 // MinorSubsystemVersion
.long 0 // Win32VersionValue
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index fde59981445c..c839b5bf1904 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -175,7 +175,7 @@ NOKPROBE_SYMBOL(el0_pc);
static void notrace el0_sp(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
- local_daif_restore(DAIF_PROCCTX_NOIRQ);
+ local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(regs->sp, esr, regs);
}
NOKPROBE_SYMBOL(el0_sp);
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 7d02f9966d34..833d48c9acb5 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -75,27 +75,27 @@
add x29, sp, #S_STACKFRAME
.endm
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
ftrace_regs_entry 1
b ftrace_common
-ENDPROC(ftrace_regs_caller)
+SYM_CODE_END(ftrace_regs_caller)
-ENTRY(ftrace_caller)
+SYM_CODE_START(ftrace_caller)
ftrace_regs_entry 0
b ftrace_common
-ENDPROC(ftrace_caller)
+SYM_CODE_END(ftrace_caller)
-ENTRY(ftrace_common)
+SYM_CODE_START(ftrace_common)
sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
mov x1, x9 // parent_ip (callsite's LR)
ldr_l x2, function_trace_op // op
mov x3, sp // regs
-GLOBAL(ftrace_call)
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
nop // If enabled, this will be replaced
// "b ftrace_graph_caller"
#endif
@@ -122,17 +122,17 @@ ftrace_common_return:
add sp, sp, #S_FRAME_SIZE + 16
ret x9
-ENDPROC(ftrace_common)
+SYM_CODE_END(ftrace_common)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
+SYM_CODE_START(ftrace_graph_caller)
ldr x0, [sp, #S_PC]
sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
add x1, sp, #S_LR // parent_ip (callsite's LR)
ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
bl prepare_ftrace_return
b ftrace_common_return
-ENDPROC(ftrace_graph_caller)
+SYM_CODE_END(ftrace_graph_caller)
#endif
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
@@ -218,7 +218,7 @@ ENDPROC(ftrace_graph_caller)
* - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook
*/
-ENTRY(_mcount)
+SYM_FUNC_START(_mcount)
mcount_enter
ldr_l x2, ftrace_trace_function
@@ -242,7 +242,7 @@ skip_ftrace_call: // }
b.ne ftrace_graph_caller // ftrace_graph_caller();
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
mcount_exit
-ENDPROC(_mcount)
+SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount)
@@ -253,9 +253,9 @@ NOKPROBE(_mcount)
* and later on, NOP to branch to ftrace_caller() when enabled or branch to
* NOP when disabled per-function base.
*/
-ENTRY(_mcount)
+SYM_FUNC_START(_mcount)
ret
-ENDPROC(_mcount)
+SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount)
@@ -268,24 +268,24 @@ NOKPROBE(_mcount)
* - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook
*/
-ENTRY(ftrace_caller)
+SYM_FUNC_START(ftrace_caller)
mcount_enter
mcount_get_pc0 x0 // function's pc
mcount_get_lr x1 // function's lr
-GLOBAL(ftrace_call) // tracer(pc, lr);
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
nop // This will be replaced with "bl xxx"
// where xxx can be any kind of tracer.
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
nop // If enabled, this will be replaced
// "b ftrace_graph_caller"
#endif
mcount_exit
-ENDPROC(ftrace_caller)
+SYM_FUNC_END(ftrace_caller)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -298,20 +298,20 @@ ENDPROC(ftrace_caller)
* the call stack in order to intercept instrumented function's return path
* and run return_to_handler() later on its exit.
*/
-ENTRY(ftrace_graph_caller)
+SYM_FUNC_START(ftrace_graph_caller)
mcount_get_pc x0 // function's pc
mcount_get_lr_addr x1 // pointer to function's saved lr
mcount_get_parent_fp x2 // parent's fp
bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
mcount_exit
-ENDPROC(ftrace_graph_caller)
+SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
-ENTRY(ftrace_stub)
+SYM_FUNC_START(ftrace_stub)
ret
-ENDPROC(ftrace_stub)
+SYM_FUNC_END(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
@@ -320,7 +320,7 @@ ENDPROC(ftrace_stub)
* Run ftrace_return_to_handler() before going back to parent.
* @fp is checked against the value passed by ftrace_graph_caller().
*/
-ENTRY(return_to_handler)
+SYM_CODE_START(return_to_handler)
/* save return value regs */
sub sp, sp, #64
stp x0, x1, [sp]
@@ -340,5 +340,5 @@ ENTRY(return_to_handler)
add sp, sp, #64
ret
-END(return_to_handler)
+SYM_CODE_END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9461d812ae27..ddcde093c433 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -14,6 +14,7 @@
#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/asm_pointer_auth.h>
#include <asm/cpufeature.h>
#include <asm/errno.h>
#include <asm/esr.h>
@@ -177,6 +178,7 @@ alternative_cb_end
apply_ssbd 1, x22, x23
+ ptrauth_keys_install_kernel tsk, 1, x20, x22, x23
.else
add x21, sp, #S_FRAME_SIZE
get_current_task tsk
@@ -341,6 +343,9 @@ alternative_else_nop_endif
msr cntkctl_el1, x1
4:
#endif
+ /* No kernel C function calls after this as user keys are set. */
+ ptrauth_keys_install_user tsk, x0, x1, x2
+
apply_ssbd 0, x0, x1
.endif
@@ -465,7 +470,7 @@ alternative_endif
.pushsection ".entry.text", "ax"
.align 11
-ENTRY(vectors)
+SYM_CODE_START(vectors)
kernel_ventry 1, sync_invalid // Synchronous EL1t
kernel_ventry 1, irq_invalid // IRQ EL1t
kernel_ventry 1, fiq_invalid // FIQ EL1t
@@ -492,7 +497,7 @@ ENTRY(vectors)
kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif
-END(vectors)
+SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK
/*
@@ -534,57 +539,57 @@ __bad_stack:
ASM_BUG()
.endm
-el0_sync_invalid:
+SYM_CODE_START_LOCAL(el0_sync_invalid)
inv_entry 0, BAD_SYNC
-ENDPROC(el0_sync_invalid)
+SYM_CODE_END(el0_sync_invalid)
-el0_irq_invalid:
+SYM_CODE_START_LOCAL(el0_irq_invalid)
inv_entry 0, BAD_IRQ
-ENDPROC(el0_irq_invalid)
+SYM_CODE_END(el0_irq_invalid)
-el0_fiq_invalid:
+SYM_CODE_START_LOCAL(el0_fiq_invalid)
inv_entry 0, BAD_FIQ
-ENDPROC(el0_fiq_invalid)
+SYM_CODE_END(el0_fiq_invalid)
-el0_error_invalid:
+SYM_CODE_START_LOCAL(el0_error_invalid)
inv_entry 0, BAD_ERROR
-ENDPROC(el0_error_invalid)
+SYM_CODE_END(el0_error_invalid)
#ifdef CONFIG_COMPAT
-el0_fiq_invalid_compat:
+SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
inv_entry 0, BAD_FIQ, 32
-ENDPROC(el0_fiq_invalid_compat)
+SYM_CODE_END(el0_fiq_invalid_compat)
#endif
-el1_sync_invalid:
+SYM_CODE_START_LOCAL(el1_sync_invalid)
inv_entry 1, BAD_SYNC
-ENDPROC(el1_sync_invalid)
+SYM_CODE_END(el1_sync_invalid)
-el1_irq_invalid:
+SYM_CODE_START_LOCAL(el1_irq_invalid)
inv_entry 1, BAD_IRQ
-ENDPROC(el1_irq_invalid)
+SYM_CODE_END(el1_irq_invalid)
-el1_fiq_invalid:
+SYM_CODE_START_LOCAL(el1_fiq_invalid)
inv_entry 1, BAD_FIQ
-ENDPROC(el1_fiq_invalid)
+SYM_CODE_END(el1_fiq_invalid)
-el1_error_invalid:
+SYM_CODE_START_LOCAL(el1_error_invalid)
inv_entry 1, BAD_ERROR
-ENDPROC(el1_error_invalid)
+SYM_CODE_END(el1_error_invalid)
/*
* EL1 mode handlers.
*/
.align 6
-el1_sync:
+SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
kernel_entry 1
mov x0, sp
bl el1_sync_handler
kernel_exit 1
-ENDPROC(el1_sync)
+SYM_CODE_END(el1_sync)
.align 6
-el1_irq:
+SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1
gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f
@@ -639,42 +644,42 @@ alternative_else_nop_endif
#endif
kernel_exit 1
-ENDPROC(el1_irq)
+SYM_CODE_END(el1_irq)
/*
* EL0 mode handlers.
*/
.align 6
-el0_sync:
+SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
kernel_entry 0
mov x0, sp
bl el0_sync_handler
b ret_to_user
-ENDPROC(el0_sync)
+SYM_CODE_END(el0_sync)
#ifdef CONFIG_COMPAT
.align 6
-el0_sync_compat:
+SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
kernel_entry 0, 32
mov x0, sp
bl el0_sync_compat_handler
b ret_to_user
-ENDPROC(el0_sync_compat)
+SYM_CODE_END(el0_sync_compat)
.align 6
-el0_irq_compat:
+SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
kernel_entry 0, 32
b el0_irq_naked
-ENDPROC(el0_irq_compat)
+SYM_CODE_END(el0_irq_compat)
-el0_error_compat:
+SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
kernel_entry 0, 32
b el0_error_naked
-ENDPROC(el0_error_compat)
+SYM_CODE_END(el0_error_compat)
#endif
.align 6
-el0_irq:
+SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0
@@ -696,9 +701,9 @@ el0_irq_naked:
bl trace_hardirqs_on
#endif
b ret_to_user
-ENDPROC(el0_irq)
+SYM_CODE_END(el0_irq)
-el1_error:
+SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1
mrs x1, esr_el1
gic_prio_kentry_setup tmp=x2
@@ -706,9 +711,9 @@ el1_error:
mov x0, sp
bl do_serror
kernel_exit 1
-ENDPROC(el1_error)
+SYM_CODE_END(el1_error)
-el0_error:
+SYM_CODE_START_LOCAL(el0_error)
kernel_entry 0
el0_error_naked:
mrs x25, esr_el1
@@ -720,7 +725,7 @@ el0_error_naked:
bl do_serror
enable_da_f
b ret_to_user
-ENDPROC(el0_error)
+SYM_CODE_END(el0_error)
/*
* Ok, we need to do extra processing, enter the slow path.
@@ -832,7 +837,7 @@ alternative_else_nop_endif
.endm
.align 11
-ENTRY(tramp_vectors)
+SYM_CODE_START_NOALIGN(tramp_vectors)
.space 0x400
tramp_ventry
@@ -844,24 +849,24 @@ ENTRY(tramp_vectors)
tramp_ventry 32
tramp_ventry 32
tramp_ventry 32
-END(tramp_vectors)
+SYM_CODE_END(tramp_vectors)
-ENTRY(tramp_exit_native)
+SYM_CODE_START(tramp_exit_native)
tramp_exit
-END(tramp_exit_native)
+SYM_CODE_END(tramp_exit_native)
-ENTRY(tramp_exit_compat)
+SYM_CODE_START(tramp_exit_compat)
tramp_exit 32
-END(tramp_exit_compat)
+SYM_CODE_END(tramp_exit_compat)
.ltorg
.popsection // .entry.tramp.text
#ifdef CONFIG_RANDOMIZE_BASE
.pushsection ".rodata", "a"
.align PAGE_SHIFT
- .globl __entry_tramp_data_start
-__entry_tramp_data_start:
+SYM_DATA_START(__entry_tramp_data_start)
.quad vectors
+SYM_DATA_END(__entry_tramp_data_start)
.popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
@@ -874,7 +879,7 @@ __entry_tramp_data_start:
* Previous and next are guaranteed not to be the same.
*
*/
-ENTRY(cpu_switch_to)
+SYM_FUNC_START(cpu_switch_to)
mov x10, #THREAD_CPU_CONTEXT
add x8, x0, x10
mov x9, sp
@@ -895,21 +900,22 @@ ENTRY(cpu_switch_to)
ldr lr, [x8]
mov sp, x9
msr sp_el0, x1
+ ptrauth_keys_install_kernel x1, 1, x8, x9, x10
ret
-ENDPROC(cpu_switch_to)
+SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
/*
* This is how we return from a fork.
*/
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
bl schedule_tail
cbz x19, 1f // not a kernel thread
mov x0, x20
blr x19
1: get_current_task tsk
b ret_to_user
-ENDPROC(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
NOKPROBE(ret_from_fork)
#ifdef CONFIG_ARM_SDE_INTERFACE
@@ -938,7 +944,7 @@ NOKPROBE(ret_from_fork)
*/
.ltorg
.pushsection ".entry.tramp.text", "ax"
-ENTRY(__sdei_asm_entry_trampoline)
+SYM_CODE_START(__sdei_asm_entry_trampoline)
mrs x4, ttbr1_el1
tbz x4, #USER_ASID_BIT, 1f
@@ -960,7 +966,7 @@ ENTRY(__sdei_asm_entry_trampoline)
ldr x4, =__sdei_asm_handler
#endif
br x4
-ENDPROC(__sdei_asm_entry_trampoline)
+SYM_CODE_END(__sdei_asm_entry_trampoline)
NOKPROBE(__sdei_asm_entry_trampoline)
/*
@@ -970,21 +976,22 @@ NOKPROBE(__sdei_asm_entry_trampoline)
* x2: exit_mode
* x4: struct sdei_registered_event argument from registration time.
*/
-ENTRY(__sdei_asm_exit_trampoline)
+SYM_CODE_START(__sdei_asm_exit_trampoline)
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
cbnz x4, 1f
tramp_unmap_kernel tmp=x4
1: sdei_handler_exit exit_mode=x2
-ENDPROC(__sdei_asm_exit_trampoline)
+SYM_CODE_END(__sdei_asm_exit_trampoline)
NOKPROBE(__sdei_asm_exit_trampoline)
.ltorg
.popsection // .entry.tramp.text
#ifdef CONFIG_RANDOMIZE_BASE
.pushsection ".rodata", "a"
-__sdei_asm_trampoline_next_handler:
+SYM_DATA_START(__sdei_asm_trampoline_next_handler)
.quad __sdei_asm_handler
+SYM_DATA_END(__sdei_asm_trampoline_next_handler)
.popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
@@ -1002,7 +1009,7 @@ __sdei_asm_trampoline_next_handler:
* follow SMC-CC. We save (or retrieve) all the registers as the handler may
* want them.
*/
-ENTRY(__sdei_asm_handler)
+SYM_CODE_START(__sdei_asm_handler)
stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
@@ -1085,6 +1092,6 @@ alternative_else_nop_endif
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
br x5
#endif
-ENDPROC(__sdei_asm_handler)
+SYM_CODE_END(__sdei_asm_handler)
NOKPROBE(__sdei_asm_handler)
#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 989b1944cb71..57a91032b4c2 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -105,7 +105,7 @@ pe_header:
* x24 __primary_switch() .. relocate_kernel()
* current RELR displacement
*/
-ENTRY(stext)
+SYM_CODE_START(stext)
bl preserve_boot_args
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
adrp x23, __PHYS_OFFSET
@@ -118,14 +118,15 @@ ENTRY(stext)
* On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set.
*/
+ mov x0, #ARM64_CPU_BOOT_PRIMARY
bl __cpu_setup // initialise processor
b __primary_switch
-ENDPROC(stext)
+SYM_CODE_END(stext)
/*
* Preserve the arguments passed by the bootloader in x0 .. x3
*/
-preserve_boot_args:
+SYM_CODE_START_LOCAL(preserve_boot_args)
mov x21, x0 // x21=FDT
adr_l x0, boot_args // record the contents of
@@ -137,7 +138,7 @@ preserve_boot_args:
mov x1, #0x20 // 4 x 8 bytes
b __inval_dcache_area // tail call
-ENDPROC(preserve_boot_args)
+SYM_CODE_END(preserve_boot_args)
/*
* Macro to create a table entry to the next page.
@@ -275,7 +276,7 @@ ENDPROC(preserve_boot_args)
* - first few MB of the kernel linear mapping to jump to once the MMU has
* been enabled
*/
-__create_page_tables:
+SYM_FUNC_START_LOCAL(__create_page_tables)
mov x28, lr
/*
@@ -403,15 +404,14 @@ __create_page_tables:
bl __inval_dcache_area
ret x28
-ENDPROC(__create_page_tables)
- .ltorg
+SYM_FUNC_END(__create_page_tables)
/*
* The following fragment of code is executed with the MMU enabled.
*
* x0 = __PHYS_OFFSET
*/
-__primary_switched:
+SYM_FUNC_START_LOCAL(__primary_switched)
adrp x4, init_thread_union
add sp, x4, #THREAD_SIZE
adr_l x5, init_task
@@ -456,7 +456,14 @@ __primary_switched:
mov x29, #0
mov x30, #0
b start_kernel
-ENDPROC(__primary_switched)
+SYM_FUNC_END(__primary_switched)
+
+ .pushsection ".rodata", "a"
+SYM_DATA_START(kimage_vaddr)
+ .quad _text - TEXT_OFFSET
+SYM_DATA_END(kimage_vaddr)
+EXPORT_SYMBOL(kimage_vaddr)
+ .popsection
/*
* end early head section, begin head code that is also used for
@@ -464,10 +471,6 @@ ENDPROC(__primary_switched)
*/
.section ".idmap.text","awx"
-ENTRY(kimage_vaddr)
- .quad _text - TEXT_OFFSET
-EXPORT_SYMBOL(kimage_vaddr)
-
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
@@ -475,7 +478,7 @@ EXPORT_SYMBOL(kimage_vaddr)
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
* booted in EL1 or EL2 respectively.
*/
-ENTRY(el2_setup)
+SYM_FUNC_START(el2_setup)
msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
@@ -599,7 +602,7 @@ set_hcr:
isb
ret
-install_el2_stub:
+SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
/*
* When VHE is not in use, early init of EL2 and EL1 needs to be
* done here.
@@ -636,13 +639,13 @@ install_el2_stub:
msr elr_el2, lr
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
eret
-ENDPROC(el2_setup)
+SYM_FUNC_END(el2_setup)
/*
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
* in w0. See arch/arm64/include/asm/virt.h for more info.
*/
-set_cpu_boot_mode_flag:
+SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
adr_l x1, __boot_cpu_mode
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
@@ -651,7 +654,7 @@ set_cpu_boot_mode_flag:
dmb sy
dc ivac, x1 // Invalidate potentially stale cache line
ret
-ENDPROC(set_cpu_boot_mode_flag)
+SYM_FUNC_END(set_cpu_boot_mode_flag)
/*
* These values are written with the MMU off, but read with the MMU on.
@@ -667,15 +670,17 @@ ENDPROC(set_cpu_boot_mode_flag)
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
-ENTRY(__boot_cpu_mode)
+SYM_DATA_START(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL2
.long BOOT_CPU_MODE_EL1
+SYM_DATA_END(__boot_cpu_mode)
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
* with MMU turned off.
*/
-ENTRY(__early_cpu_boot_status)
+SYM_DATA_START(__early_cpu_boot_status)
.quad 0
+SYM_DATA_END(__early_cpu_boot_status)
.popsection
@@ -683,7 +688,7 @@ ENTRY(__early_cpu_boot_status)
* This provides a "holding pen" for platforms to hold all secondary
* cores are held until we're ready for them to initialise.
*/
-ENTRY(secondary_holding_pen)
+SYM_FUNC_START(secondary_holding_pen)
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
@@ -695,31 +700,32 @@ pen: ldr x4, [x3]
b.eq secondary_startup
wfe
b pen
-ENDPROC(secondary_holding_pen)
+SYM_FUNC_END(secondary_holding_pen)
/*
* Secondary entry point that jumps straight into the kernel. Only to
* be used where CPUs are brought online dynamically by the kernel.
*/
-ENTRY(secondary_entry)
+SYM_FUNC_START(secondary_entry)
bl el2_setup // Drop to EL1
bl set_cpu_boot_mode_flag
b secondary_startup
-ENDPROC(secondary_entry)
+SYM_FUNC_END(secondary_entry)
-secondary_startup:
+SYM_FUNC_START_LOCAL(secondary_startup)
/*
* Common entry point for secondary CPUs.
*/
bl __cpu_secondary_check52bitva
+ mov x0, #ARM64_CPU_BOOT_SECONDARY
bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir
bl __enable_mmu
ldr x8, =__secondary_switched
br x8
-ENDPROC(secondary_startup)
+SYM_FUNC_END(secondary_startup)
-__secondary_switched:
+SYM_FUNC_START_LOCAL(__secondary_switched)
adr_l x5, vectors
msr vbar_el1, x5
isb
@@ -734,13 +740,13 @@ __secondary_switched:
mov x29, #0
mov x30, #0
b secondary_start_kernel
-ENDPROC(__secondary_switched)
+SYM_FUNC_END(__secondary_switched)
-__secondary_too_slow:
+SYM_FUNC_START_LOCAL(__secondary_too_slow)
wfe
wfi
b __secondary_too_slow
-ENDPROC(__secondary_too_slow)
+SYM_FUNC_END(__secondary_too_slow)
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
@@ -772,7 +778,7 @@ ENDPROC(__secondary_too_slow)
* Checks if the selected granule size is supported by the CPU.
* If it isn't, park the CPU
*/
-ENTRY(__enable_mmu)
+SYM_FUNC_START(__enable_mmu)
mrs x2, ID_AA64MMFR0_EL1
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
@@ -796,9 +802,9 @@ ENTRY(__enable_mmu)
dsb nsh
isb
ret
-ENDPROC(__enable_mmu)
+SYM_FUNC_END(__enable_mmu)
-ENTRY(__cpu_secondary_check52bitva)
+SYM_FUNC_START(__cpu_secondary_check52bitva)
#ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x0, vabits_actual
cmp x0, #52
@@ -816,9 +822,9 @@ ENTRY(__cpu_secondary_check52bitva)
#endif
2: ret
-ENDPROC(__cpu_secondary_check52bitva)
+SYM_FUNC_END(__cpu_secondary_check52bitva)
-__no_granule_support:
+SYM_FUNC_START_LOCAL(__no_granule_support)
/* Indicate that this CPU can't boot and is stuck in the kernel */
update_early_cpu_boot_status \
CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
@@ -826,10 +832,10 @@ __no_granule_support:
wfe
wfi
b 1b
-ENDPROC(__no_granule_support)
+SYM_FUNC_END(__no_granule_support)
#ifdef CONFIG_RELOCATABLE
-__relocate_kernel:
+SYM_FUNC_START_LOCAL(__relocate_kernel)
/*
* Iterate over each entry in the relocation table, and apply the
* relocations in place.
@@ -931,10 +937,10 @@ __relocate_kernel:
#endif
ret
-ENDPROC(__relocate_kernel)
+SYM_FUNC_END(__relocate_kernel)
#endif
-__primary_switch:
+SYM_FUNC_START_LOCAL(__primary_switch)
#ifdef CONFIG_RANDOMIZE_BASE
mov x19, x0 // preserve new SCTLR_EL1 value
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
@@ -977,4 +983,4 @@ __primary_switch:
ldr x8, =__primary_switched
adrp x0, __PHYS_OFFSET
br x8
-ENDPROC(__primary_switch)
+SYM_FUNC_END(__primary_switch)
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index 38bcd4d4e43b..6532105b3e32 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -110,8 +110,6 @@ ENTRY(swsusp_arch_suspend_exit)
cbz x24, 3f /* Do we need to re-initialise EL2? */
hvc #0
3: ret
-
- .ltorg
ENDPROC(swsusp_arch_suspend_exit)
/*
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 590963c9c609..5b73e92c99e3 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -166,14 +166,11 @@ int arch_hibernation_header_restore(void *addr)
sleep_cpu = -EINVAL;
return -EINVAL;
}
- if (!cpu_online(sleep_cpu)) {
- pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
- ret = cpu_up(sleep_cpu);
- if (ret) {
- pr_err("Failed to bring hibernate-CPU up!\n");
- sleep_cpu = -EINVAL;
- return ret;
- }
+
+ ret = bringup_hibernate_cpu(sleep_cpu);
+ if (ret) {
+ sleep_cpu = -EINVAL;
+ return ret;
}
resume_hdr = *hdr;
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 73d46070b315..e473ead806ed 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -63,7 +63,7 @@ el1_sync:
beq 9f // Nothing to reset!
/* Someone called kvm_call_hyp() against the hyp-stub... */
- ldr x0, =HVC_STUB_ERR
+ mov_q x0, HVC_STUB_ERR
eret
9: mov x0, xzr
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 25a2a9b479c2..7f06ad93fc95 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -12,7 +12,9 @@
#ifdef CONFIG_EFI
-__efistub_stext_offset = stext - _text;
+__efistub_kernel_size = _edata - _text;
+__efistub_stext_offset = stext - _text;
+
/*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
@@ -33,7 +35,7 @@ __efistub_strnlen = __pi_strnlen;
__efistub_strcmp = __pi_strcmp;
__efistub_strncmp = __pi_strncmp;
__efistub_strrchr = __pi_strrchr;
-__efistub___flush_dcache_area = __pi___flush_dcache_area;
+__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
#ifdef CONFIG_KASAN
__efistub___memcpy = __pi_memcpy;
@@ -45,6 +47,7 @@ __efistub__text = _text;
__efistub__end = _end;
__efistub__edata = _edata;
__efistub_screen_info = screen_info;
+__efistub__ctype = _ctype;
#endif
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index dd3ae8081b38..b40c3b0def92 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -121,7 +121,7 @@ static int setup_dtb(struct kimage *image,
/* add kaslr-seed */
ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED);
- if (ret == -FDT_ERR_NOTFOUND)
+ if (ret == -FDT_ERR_NOTFOUND)
ret = 0;
else if (ret)
goto out;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index e40b65645c86..4d7879484cec 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -285,6 +285,17 @@ static struct attribute_group armv8_pmuv3_format_attr_group = {
#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
+
+/*
+ * We unconditionally enable ARMv8.5-PMU long event counter support
+ * (64-bit events) where supported. Indicate if this arm_pmu has long
+ * event counter support.
+ */
+static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
+{
+ return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5);
+}
+
/*
* We must chain two programmable counters for 64 bit events,
* except when we have allocated the 64bit cycle counter (for CPU
@@ -294,9 +305,11 @@ static struct attribute_group armv8_pmuv3_format_attr_group = {
static inline bool armv8pmu_event_is_chained(struct perf_event *event)
{
int idx = event->hw.idx;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
return !WARN_ON(idx < 0) &&
armv8pmu_event_is_64bit(event) &&
+ !armv8pmu_has_long_event(cpu_pmu) &&
(idx != ARMV8_IDX_CYCLE_COUNTER);
}
@@ -345,7 +358,7 @@ static inline void armv8pmu_select_counter(int idx)
isb();
}
-static inline u32 armv8pmu_read_evcntr(int idx)
+static inline u64 armv8pmu_read_evcntr(int idx)
{
armv8pmu_select_counter(idx);
return read_sysreg(pmxevcntr_el0);
@@ -362,6 +375,44 @@ static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
return val;
}
+/*
+ * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP
+ * is set the event counters also become 64-bit counters. Unless the
+ * user has requested a long counter (attr.config1) then we want to
+ * interrupt upon 32-bit overflow - we achieve this by applying a bias.
+ */
+static bool armv8pmu_event_needs_bias(struct perf_event *event)
+{
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (armv8pmu_event_is_64bit(event))
+ return false;
+
+ if (armv8pmu_has_long_event(cpu_pmu) ||
+ idx == ARMV8_IDX_CYCLE_COUNTER)
+ return true;
+
+ return false;
+}
+
+static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
+{
+ if (armv8pmu_event_needs_bias(event))
+ value |= GENMASK(63, 32);
+
+ return value;
+}
+
+static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
+{
+ if (armv8pmu_event_needs_bias(event))
+ value &= ~GENMASK(63, 32);
+
+ return value;
+}
+
static u64 armv8pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
@@ -377,10 +428,10 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
else
value = armv8pmu_read_hw_counter(event);
- return value;
+ return armv8pmu_unbias_long_counter(event, value);
}
-static inline void armv8pmu_write_evcntr(int idx, u32 value)
+static inline void armv8pmu_write_evcntr(int idx, u64 value)
{
armv8pmu_select_counter(idx);
write_sysreg(value, pmxevcntr_el0);
@@ -405,20 +456,14 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
+ value = armv8pmu_bias_long_counter(event, value);
+
if (!armv8pmu_counter_valid(cpu_pmu, idx))
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
- else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
- /*
- * The cycles counter is really a 64-bit counter.
- * When treating it as a 32-bit counter, we only count
- * the lower 32 bits, and set the upper 32-bits so that
- * we get an interrupt upon 32-bit overflow.
- */
- if (!armv8pmu_event_is_64bit(event))
- value |= 0xffffffff00000000ULL;
+ else if (idx == ARMV8_IDX_CYCLE_COUNTER)
write_sysreg(value, pmccntr_el0);
- } else
+ else
armv8pmu_write_hw_counter(event, value);
}
@@ -450,86 +495,74 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
}
}
-static inline int armv8pmu_enable_counter(int idx)
+static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- write_sysreg(BIT(counter), pmcntenset_el0);
- return idx;
+ int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
+ u32 mask = BIT(counter);
+
+ if (armv8pmu_event_is_chained(event))
+ mask |= BIT(counter - 1);
+ return mask;
+}
+
+static inline void armv8pmu_enable_counter(u32 mask)
+{
+ write_sysreg(mask, pmcntenset_el0);
}
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
- int idx = event->hw.idx;
- u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
+ u32 mask = armv8pmu_event_cnten_mask(event);
- if (armv8pmu_event_is_chained(event))
- counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
-
- kvm_set_pmu_events(counter_bits, attr);
+ kvm_set_pmu_events(mask, attr);
/* We rely on the hypervisor switch code to enable guest counters */
- if (!kvm_pmu_counter_deferred(attr)) {
- armv8pmu_enable_counter(idx);
- if (armv8pmu_event_is_chained(event))
- armv8pmu_enable_counter(idx - 1);
- }
+ if (!kvm_pmu_counter_deferred(attr))
+ armv8pmu_enable_counter(mask);
}
-static inline int armv8pmu_disable_counter(int idx)
+static inline void armv8pmu_disable_counter(u32 mask)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- write_sysreg(BIT(counter), pmcntenclr_el0);
- return idx;
+ write_sysreg(mask, pmcntenclr_el0);
}
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
{
- struct hw_perf_event *hwc = &event->hw;
struct perf_event_attr *attr = &event->attr;
- int idx = hwc->idx;
- u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
-
- if (armv8pmu_event_is_chained(event))
- counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+ u32 mask = armv8pmu_event_cnten_mask(event);
- kvm_clr_pmu_events(counter_bits);
+ kvm_clr_pmu_events(mask);
/* We rely on the hypervisor switch code to disable guest counters */
- if (!kvm_pmu_counter_deferred(attr)) {
- if (armv8pmu_event_is_chained(event))
- armv8pmu_disable_counter(idx - 1);
- armv8pmu_disable_counter(idx);
- }
+ if (!kvm_pmu_counter_deferred(attr))
+ armv8pmu_disable_counter(mask);
}
-static inline int armv8pmu_enable_intens(int idx)
+static inline void armv8pmu_enable_intens(u32 mask)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- write_sysreg(BIT(counter), pmintenset_el1);
- return idx;
+ write_sysreg(mask, pmintenset_el1);
}
-static inline int armv8pmu_enable_event_irq(struct perf_event *event)
+static inline void armv8pmu_enable_event_irq(struct perf_event *event)
{
- return armv8pmu_enable_intens(event->hw.idx);
+ u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
+ armv8pmu_enable_intens(BIT(counter));
}
-static inline int armv8pmu_disable_intens(int idx)
+static inline void armv8pmu_disable_intens(u32 mask)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- write_sysreg(BIT(counter), pmintenclr_el1);
+ write_sysreg(mask, pmintenclr_el1);
isb();
/* Clear the overflow flag in case an interrupt is pending. */
- write_sysreg(BIT(counter), pmovsclr_el0);
+ write_sysreg(mask, pmovsclr_el0);
isb();
-
- return idx;
}
-static inline int armv8pmu_disable_event_irq(struct perf_event *event)
+static inline void armv8pmu_disable_event_irq(struct perf_event *event)
{
- return armv8pmu_disable_intens(event->hw.idx);
+ u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
+ armv8pmu_disable_intens(BIT(counter));
}
static inline u32 armv8pmu_getreset_flags(void)
@@ -743,7 +776,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
/*
* Otherwise use events counters
*/
- if (armv8pmu_event_is_64bit(event))
+ if (armv8pmu_event_is_64bit(event) &&
+ !armv8pmu_has_long_event(cpu_pmu))
return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
else
return armv8pmu_get_single_idx(cpuc, cpu_pmu);
@@ -815,13 +849,11 @@ static int armv8pmu_filter_match(struct perf_event *event)
static void armv8pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
- u32 idx, nb_cnt = cpu_pmu->num_events;
+ u32 pmcr;
/* The counter and interrupt enable registers are unknown at reset. */
- for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
- armv8pmu_disable_counter(idx);
- armv8pmu_disable_intens(idx);
- }
+ armv8pmu_disable_counter(U32_MAX);
+ armv8pmu_disable_intens(U32_MAX);
/* Clear the counters we flip at guest entry/exit */
kvm_clr_pmu_events(U32_MAX);
@@ -830,8 +862,13 @@ static void armv8pmu_reset(void *info)
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
*/
- armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
- ARMV8_PMU_PMCR_LC);
+ pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC;
+
+ /* Enable long event counter support where available */
+ if (armv8pmu_has_long_event(cpu_pmu))
+ pmcr |= ARMV8_PMU_PMCR_LP;
+
+ armv8pmu_pmcr_write(pmcr);
}
static int __armv8_pmuv3_map_event(struct perf_event *event,
@@ -914,6 +951,7 @@ static void __armv8pmu_probe_pmu(void *info)
if (pmuver == 0xf || pmuver == 0)
return;
+ cpu_pmu->pmuver = pmuver;
probe->present = true;
/* Read the nb of CNTx counters supported from PMNC */
@@ -953,7 +991,10 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
return probe.present ? 0 : -ENODEV;
}
-static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
+static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
+ int (*map_event)(struct perf_event *event),
+ const struct attribute_group *events,
+ const struct attribute_group *format)
{
int ret = armv8pmu_probe_pmu(cpu_pmu);
if (ret)
@@ -972,144 +1013,127 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->filter_match = armv8pmu_filter_match;
+ cpu_pmu->name = name;
+ cpu_pmu->map_event = map_event;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
+ events : &armv8_pmuv3_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
+ format : &armv8_pmuv3_format_attr_group;
+
return 0;
}
static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
{
- int ret = armv8_pmu_init(cpu_pmu);
- if (ret)
- return ret;
-
- cpu_pmu->name = "armv8_pmuv3";
- cpu_pmu->map_event = armv8_pmuv3_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv8_pmuv3_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv8_pmuv3_format_attr_group;
+ return armv8_pmu_init(cpu_pmu, "armv8_pmuv3",
+ armv8_pmuv3_map_event, NULL, NULL);
+}
- return 0;
+static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a34",
+ armv8_pmuv3_map_event, NULL, NULL);
}
static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
{
- int ret = armv8_pmu_init(cpu_pmu);
- if (ret)
- return ret;
-
- cpu_pmu->name = "armv8_cortex_a35";
- cpu_pmu->map_event = armv8_a53_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv8_pmuv3_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv8_pmuv3_format_attr_group;
-
- return 0;
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35",
+ armv8_a53_map_event, NULL, NULL);
}
static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
{
- int ret = armv8_pmu_init(cpu_pmu);
- if (ret)
- return ret;
-
- cpu_pmu->name = "armv8_cortex_a53";
- cpu_pmu->map_event = armv8_a53_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv8_pmuv3_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv8_pmuv3_format_attr_group;
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a53",
+ armv8_a53_map_event, NULL, NULL);
+}
- return 0;
+static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a55",
+ armv8_pmuv3_map_event, NULL, NULL);
}
static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
{
- int ret = armv8_pmu_init(cpu_pmu);
- if (ret)
- return ret;
-
- cpu_pmu->name = "armv8_cortex_a57";
- cpu_pmu->map_event = armv8_a57_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv8_pmuv3_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv8_pmuv3_format_attr_group;
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57",
+ armv8_a57_map_event, NULL, NULL);
+}
- return 0;
+static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a65",
+ armv8_pmuv3_map_event, NULL, NULL);
}
static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
{
- int ret = armv8_pmu_init(cpu_pmu);
- if (ret)
- return ret;
-
- cpu_pmu->name = "armv8_cortex_a72";
- cpu_pmu->map_event = armv8_a57_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv8_pmuv3_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv8_pmuv3_format_attr_group;
-
- return 0;
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72",
+ armv8_a57_map_event, NULL, NULL);
}
static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
{
- int ret = armv8_pmu_init(cpu_pmu);
- if (ret)
- return ret;
-
- cpu_pmu->name = "armv8_cortex_a73";
- cpu_pmu->map_event = armv8_a73_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv8_pmuv3_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv8_pmuv3_format_attr_group;
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a73",
+ armv8_a73_map_event, NULL, NULL);
+}
- return 0;
+static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a75",
+ armv8_pmuv3_map_event, NULL, NULL);
}
-static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
+static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu)
{
- int ret = armv8_pmu_init(cpu_pmu);
- if (ret)
- return ret;
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a76",
+ armv8_pmuv3_map_event, NULL, NULL);
+}
- cpu_pmu->name = "armv8_cavium_thunder";
- cpu_pmu->map_event = armv8_thunder_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv8_pmuv3_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv8_pmuv3_format_attr_group;
+static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init(cpu_pmu, "armv8_cortex_a77",
+ armv8_pmuv3_map_event, NULL, NULL);
+}
- return 0;
+static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init(cpu_pmu, "armv8_neoverse_e1",
+ armv8_pmuv3_map_event, NULL, NULL);
}
-static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
+static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu)
{
- int ret = armv8_pmu_init(cpu_pmu);
- if (ret)
- return ret;
+ return armv8_pmu_init(cpu_pmu, "armv8_neoverse_n1",
+ armv8_pmuv3_map_event, NULL, NULL);
+}
- cpu_pmu->name = "armv8_brcm_vulcan";
- cpu_pmu->map_event = armv8_vulcan_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv8_pmuv3_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv8_pmuv3_format_attr_group;
+static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder",
+ armv8_thunder_map_event, NULL, NULL);
+}
- return 0;
+static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init(cpu_pmu, "armv8_brcm_vulcan",
+ armv8_vulcan_map_event, NULL, NULL);
}
static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
+ {.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init},
{.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
{.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
+ {.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init},
{.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
+ {.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init},
{.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
{.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
+ {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init},
+ {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init},
+ {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init},
+ {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init},
+ {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
{},
diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c
index c507b584259d..1e77736a4f66 100644
--- a/arch/arm64/kernel/pointer_auth.c
+++ b/arch/arm64/kernel/pointer_auth.c
@@ -9,7 +9,7 @@
int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
{
- struct ptrauth_keys *keys = &tsk->thread.keys_user;
+ struct ptrauth_keys_user *keys = &tsk->thread.keys_user;
unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY |
PR_PAC_APDAKEY | PR_PAC_APDBKEY;
unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY;
@@ -18,8 +18,7 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
return -EINVAL;
if (!arg) {
- ptrauth_keys_init(keys);
- ptrauth_keys_switch(keys);
+ ptrauth_keys_init_user(keys);
return 0;
}
@@ -41,7 +40,5 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
if (arg & PR_PAC_APGAKEY)
get_random_bytes(&keys->apga, sizeof(keys->apga));
- ptrauth_keys_switch(keys);
-
return 0;
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 00626057a384..56be4cbf771f 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -141,11 +141,11 @@ void arch_cpu_idle_dead(void)
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
* kexec'd kernel to use any and all RAM as it sees fit, without having to
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
- * functionality embodied in disable_nonboot_cpus() to achieve this.
+ * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
*/
void machine_shutdown(void)
{
- disable_nonboot_cpus();
+ smp_shutdown_nonboot_cpus(reboot_cpu);
}
/*
@@ -262,7 +262,7 @@ void __show_regs(struct pt_regs *regs)
if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc);
- printk("lr : %pS\n", (void *)lr);
+ printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
} else {
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);
@@ -376,6 +376,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
*/
fpsimd_flush_task_state(p);
+ ptrauth_thread_init_kernel(p);
+
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
@@ -512,7 +514,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
contextidr_thread_switch(next);
entry_task_switch(next);
uao_thread_switch(next);
- ptrauth_thread_switch(next);
ssbs_thread_switch(next);
/*
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index cd6e5fa48b9c..b3d3005d9515 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -999,7 +999,7 @@ static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
}
static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
- const struct ptrauth_keys *keys)
+ const struct ptrauth_keys_user *keys)
{
ukeys->apiakey = pac_key_to_user(&keys->apia);
ukeys->apibkey = pac_key_to_user(&keys->apib);
@@ -1007,7 +1007,7 @@ static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
ukeys->apdbkey = pac_key_to_user(&keys->apdb);
}
-static void pac_address_keys_from_user(struct ptrauth_keys *keys,
+static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
const struct user_pac_address_keys *ukeys)
{
keys->apia = pac_key_from_user(ukeys->apiakey);
@@ -1021,7 +1021,7 @@ static int pac_address_keys_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct ptrauth_keys_user *keys = &target->thread.keys_user;
struct user_pac_address_keys user_keys;
if (!system_supports_address_auth())
@@ -1038,7 +1038,7 @@ static int pac_address_keys_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct ptrauth_keys_user *keys = &target->thread.keys_user;
struct user_pac_address_keys user_keys;
int ret;
@@ -1056,12 +1056,12 @@ static int pac_address_keys_set(struct task_struct *target,
}
static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
- const struct ptrauth_keys *keys)
+ const struct ptrauth_keys_user *keys)
{
ukeys->apgakey = pac_key_to_user(&keys->apga);
}
-static void pac_generic_keys_from_user(struct ptrauth_keys *keys,
+static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
const struct user_pac_generic_keys *ukeys)
{
keys->apga = pac_key_from_user(ukeys->apgakey);
@@ -1072,7 +1072,7 @@ static int pac_generic_keys_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct ptrauth_keys_user *keys = &target->thread.keys_user;
struct user_pac_generic_keys user_keys;
if (!system_supports_generic_auth())
@@ -1089,7 +1089,7 @@ static int pac_generic_keys_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct ptrauth_keys_user *keys = &target->thread.keys_user;
struct user_pac_generic_keys user_keys;
int ret;
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index c1d7db71a726..c40ce496c78b 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -41,7 +41,7 @@ ENTRY(arm64_relocate_new_kernel)
cmp x0, #CurrentEL_EL2
b.ne 1f
mrs x0, sctlr_el2
- ldr x1, =SCTLR_ELx_FLAGS
+ mov_q x1, SCTLR_ELx_FLAGS
bic x0, x0, x1
pre_disable_mmu_workaround
msr sctlr_el2, x0
@@ -113,8 +113,6 @@ ENTRY(arm64_relocate_new_kernel)
ENDPROC(arm64_relocate_new_kernel)
-.ltorg
-
.align 3 /* To keep the 64-bit values below naturally aligned. */
.Lcopy_end:
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index a34890bf309f..3fd2c11c09fc 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -344,7 +344,7 @@ void __init setup_arch(char **cmdline_p)
else
psci_acpi_init();
- cpu_read_bootcpu_ops();
+ init_bootcpu_ops();
smp_init_cpus();
smp_build_mpidr_hash();
@@ -371,8 +371,10 @@ void __init setup_arch(char **cmdline_p)
static inline bool cpu_can_disable(unsigned int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable)
- return cpu_ops[cpu]->cpu_can_disable(cpu);
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ if (ops && ops->cpu_can_disable)
+ return ops->cpu_can_disable(cpu);
#endif
return false;
}
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index f5b04dd8a710..7b2f2e650c44 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
+#include <asm/smp.h>
.text
/*
@@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx"
ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
+ mov x0, #ARM64_CPU_RUNTIME
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 5407bf5d98ac..061f60fe452f 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -93,8 +93,10 @@ static inline int op_cpu_kill(unsigned int cpu)
*/
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- if (cpu_ops[cpu]->cpu_boot)
- return cpu_ops[cpu]->cpu_boot(cpu);
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ if (ops->cpu_boot)
+ return ops->cpu_boot(cpu);
return -EOPNOTSUPP;
}
@@ -112,63 +114,66 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
*/
secondary_data.task = idle;
secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
+#if defined(CONFIG_ARM64_PTR_AUTH)
+ secondary_data.ptrauth_key.apia.lo = idle->thread.keys_kernel.apia.lo;
+ secondary_data.ptrauth_key.apia.hi = idle->thread.keys_kernel.apia.hi;
+#endif
update_cpu_boot_status(CPU_MMU_OFF);
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
- /*
- * Now bring the CPU into our world.
- */
+ /* Now bring the CPU into our world */
ret = boot_secondary(cpu, idle);
- if (ret == 0) {
- /*
- * CPU was successfully started, wait for it to come online or
- * time out.
- */
- wait_for_completion_timeout(&cpu_running,
- msecs_to_jiffies(5000));
-
- if (!cpu_online(cpu)) {
- pr_crit("CPU%u: failed to come online\n", cpu);
- ret = -EIO;
- }
- } else {
+ if (ret) {
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
return ret;
}
+ /*
+ * CPU was successfully started, wait for it to come online or
+ * time out.
+ */
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(5000));
+ if (cpu_online(cpu))
+ return 0;
+
+ pr_crit("CPU%u: failed to come online\n", cpu);
secondary_data.task = NULL;
secondary_data.stack = NULL;
+#if defined(CONFIG_ARM64_PTR_AUTH)
+ secondary_data.ptrauth_key.apia.lo = 0;
+ secondary_data.ptrauth_key.apia.hi = 0;
+#endif
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
- if (ret && status) {
-
- if (status == CPU_MMU_OFF)
- status = READ_ONCE(__early_cpu_boot_status);
+ if (status == CPU_MMU_OFF)
+ status = READ_ONCE(__early_cpu_boot_status);
- switch (status & CPU_BOOT_STATUS_MASK) {
- default:
- pr_err("CPU%u: failed in unknown state : 0x%lx\n",
- cpu, status);
- cpus_stuck_in_kernel++;
- break;
- case CPU_KILL_ME:
- if (!op_cpu_kill(cpu)) {
- pr_crit("CPU%u: died during early boot\n", cpu);
- break;
- }
- pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
- /* Fall through */
- case CPU_STUCK_IN_KERNEL:
- pr_crit("CPU%u: is stuck in kernel\n", cpu);
- if (status & CPU_STUCK_REASON_52_BIT_VA)
- pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
- if (status & CPU_STUCK_REASON_NO_GRAN)
- pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K);
- cpus_stuck_in_kernel++;
+ switch (status & CPU_BOOT_STATUS_MASK) {
+ default:
+ pr_err("CPU%u: failed in unknown state : 0x%lx\n",
+ cpu, status);
+ cpus_stuck_in_kernel++;
+ break;
+ case CPU_KILL_ME:
+ if (!op_cpu_kill(cpu)) {
+ pr_crit("CPU%u: died during early boot\n", cpu);
break;
- case CPU_PANIC_KERNEL:
- panic("CPU%u detected unsupported configuration\n", cpu);
}
+ pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
+ /* Fall through */
+ case CPU_STUCK_IN_KERNEL:
+ pr_crit("CPU%u: is stuck in kernel\n", cpu);
+ if (status & CPU_STUCK_REASON_52_BIT_VA)
+ pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
+ if (status & CPU_STUCK_REASON_NO_GRAN) {
+ pr_crit("CPU%u: does not support %luK granule\n",
+ cpu, PAGE_SIZE / SZ_1K);
+ }
+ cpus_stuck_in_kernel++;
+ break;
+ case CPU_PANIC_KERNEL:
+ panic("CPU%u detected unsupported configuration\n", cpu);
}
return ret;
@@ -196,6 +201,7 @@ asmlinkage notrace void secondary_start_kernel(void)
{
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
+ const struct cpu_operations *ops;
unsigned int cpu;
cpu = task_cpu(current);
@@ -227,8 +233,9 @@ asmlinkage notrace void secondary_start_kernel(void)
*/
check_local_cpu_capabilities();
- if (cpu_ops[cpu]->cpu_postboot)
- cpu_ops[cpu]->cpu_postboot();
+ ops = get_cpu_ops(cpu);
+ if (ops->cpu_postboot)
+ ops->cpu_postboot();
/*
* Log the CPU info before it is marked online and might get read.
@@ -266,19 +273,21 @@ asmlinkage notrace void secondary_start_kernel(void)
#ifdef CONFIG_HOTPLUG_CPU
static int op_cpu_disable(unsigned int cpu)
{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
/*
* If we don't have a cpu_die method, abort before we reach the point
* of no return. CPU0 may not have an cpu_ops, so test for it.
*/
- if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+ if (!ops || !ops->cpu_die)
return -EOPNOTSUPP;
/*
* We may need to abort a hot unplug for some other mechanism-specific
* reason.
*/
- if (cpu_ops[cpu]->cpu_disable)
- return cpu_ops[cpu]->cpu_disable(cpu);
+ if (ops->cpu_disable)
+ return ops->cpu_disable(cpu);
return 0;
}
@@ -314,15 +323,17 @@ int __cpu_disable(void)
static int op_cpu_kill(unsigned int cpu)
{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
/*
* If we have no means of synchronising with the dying CPU, then assume
* that it is really dead. We can only wait for an arbitrary length of
* time and hope that it's dead, so let's skip the wait and just hope.
*/
- if (!cpu_ops[cpu]->cpu_kill)
+ if (!ops->cpu_kill)
return 0;
- return cpu_ops[cpu]->cpu_kill(cpu);
+ return ops->cpu_kill(cpu);
}
/*
@@ -357,6 +368,7 @@ void __cpu_die(unsigned int cpu)
void cpu_die(void)
{
unsigned int cpu = smp_processor_id();
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
idle_task_exit();
@@ -370,12 +382,22 @@ void cpu_die(void)
* mechanism must perform all required cache maintenance to ensure that
* no dirty lines are lost in the process of shutting down the CPU.
*/
- cpu_ops[cpu]->cpu_die(cpu);
+ ops->cpu_die(cpu);
BUG();
}
#endif
+static void __cpu_try_die(int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ if (ops && ops->cpu_die)
+ ops->cpu_die(cpu);
+#endif
+}
+
/*
* Kill the calling secondary CPU, early in bringup before it is turned
* online.
@@ -389,12 +411,11 @@ void cpu_die_early(void)
/* Mark this CPU absent */
set_cpu_present(cpu, 0);
-#ifdef CONFIG_HOTPLUG_CPU
- update_cpu_boot_status(CPU_KILL_ME);
- /* Check if we can park ourselves */
- if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
- cpu_ops[cpu]->cpu_die(cpu);
-#endif
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
+ update_cpu_boot_status(CPU_KILL_ME);
+ __cpu_try_die(cpu);
+ }
+
update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
cpu_park_loop();
@@ -488,10 +509,13 @@ static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
*/
static int __init smp_cpu_setup(int cpu)
{
- if (cpu_read_ops(cpu))
+ const struct cpu_operations *ops;
+
+ if (init_cpu_ops(cpu))
return -ENODEV;
- if (cpu_ops[cpu]->cpu_init(cpu))
+ ops = get_cpu_ops(cpu);
+ if (ops->cpu_init(cpu))
return -ENODEV;
set_cpu_possible(cpu, true);
@@ -714,6 +738,7 @@ void __init smp_init_cpus(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ const struct cpu_operations *ops;
int err;
unsigned int cpu;
unsigned int this_cpu;
@@ -744,10 +769,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (cpu == smp_processor_id())
continue;
- if (!cpu_ops[cpu])
+ ops = get_cpu_ops(cpu);
+ if (!ops)
continue;
- err = cpu_ops[cpu]->cpu_prepare(cpu);
+ err = ops->cpu_prepare(cpu);
if (err)
continue;
@@ -863,10 +889,8 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
local_irq_disable();
sdei_mask_local_cpu();
-#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_ops[cpu]->cpu_die)
- cpu_ops[cpu]->cpu_die(cpu);
-#endif
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+ __cpu_try_die(cpu);
/* just in case */
cpu_park_loop();
@@ -1059,8 +1083,9 @@ static bool have_cpu_die(void)
{
#ifdef CONFIG_HOTPLUG_CPU
int any_cpu = raw_smp_processor_id();
+ const struct cpu_operations *ops = get_cpu_ops(any_cpu);
- if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
+ if (ops && ops->cpu_die)
return true;
#endif
return false;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index a336cb124320..139679c745bf 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -14,6 +14,7 @@
#include <linux/stacktrace.h>
#include <asm/irq.h>
+#include <asm/pointer_auth.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
@@ -86,7 +87,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
- (frame->pc == (unsigned long)return_to_handler)) {
+ (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
struct ftrace_ret_stack *ret_stack;
/*
* This is a case where function graph tracer has
@@ -101,6 +102,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+ frame->pc = ptrauth_strip_insn_pac(frame->pc);
+
/*
* Frames created upon entry from EL0 have NULL FP and PC values, so
* don't bother reporting these. Frames created by __noreturn functions
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index fa9528dfd0ce..0801a0f3c156 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -14,6 +14,7 @@
#include <linux/acpi.h>
#include <linux/arch_topology.h>
#include <linux/cacheinfo.h>
+#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/percpu.h>
@@ -120,4 +121,183 @@ int __init parse_acpi_topology(void)
}
#endif
+#ifdef CONFIG_ARM64_AMU_EXTN
+#undef pr_fmt
+#define pr_fmt(fmt) "AMU: " fmt
+
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
+static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
+static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
+static cpumask_var_t amu_fie_cpus;
+
+/* Initialize counter reference per-cpu variables for the current CPU */
+void init_cpu_freq_invariance_counters(void)
+{
+ this_cpu_write(arch_core_cycles_prev,
+ read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0));
+ this_cpu_write(arch_const_cycles_prev,
+ read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
+}
+
+static int validate_cpu_freq_invariance_counters(int cpu)
+{
+ u64 max_freq_hz, ratio;
+
+ if (!cpu_has_amu_feat(cpu)) {
+ pr_debug("CPU%d: counters are not supported.\n", cpu);
+ return -EINVAL;
+ }
+
+ if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
+ !per_cpu(arch_core_cycles_prev, cpu))) {
+ pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
+ return -EINVAL;
+ }
+
+ /* Convert maximum frequency from KHz to Hz and validate */
+ max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000;
+ if (unlikely(!max_freq_hz)) {
+ pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
+ return -EINVAL;
+ }
+
+ /*
+ * Pre-compute the fixed ratio between the frequency of the constant
+ * counter and the maximum frequency of the CPU.
+ *
+ * const_freq
+ * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALEĀ²
+ * cpuinfo_max_freq
+ *
+ * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALEĀ²
+ * in order to ensure a good resolution for arch_max_freq_scale for
+ * very low arch timer frequencies (down to the KHz range which should
+ * be unlikely).
+ */
+ ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT);
+ ratio = div64_u64(ratio, max_freq_hz);
+ if (!ratio) {
+ WARN_ONCE(1, "System timer frequency too low.\n");
+ return -EINVAL;
+ }
+
+ per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
+
+ return 0;
+}
+
+static inline bool
+enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy) {
+ pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
+ return false;
+ }
+
+ if (cpumask_subset(policy->related_cpus, valid_cpus))
+ cpumask_or(amu_fie_cpus, policy->related_cpus,
+ amu_fie_cpus);
+
+ cpufreq_cpu_put(policy);
+
+ return true;
+}
+
+static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
+#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
+
+static int __init init_amu_fie(void)
+{
+ cpumask_var_t valid_cpus;
+ bool have_policy = false;
+ int ret = 0;
+ int cpu;
+
+ if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto free_valid_mask;
+ }
+
+ for_each_present_cpu(cpu) {
+ if (validate_cpu_freq_invariance_counters(cpu))
+ continue;
+ cpumask_set_cpu(cpu, valid_cpus);
+ have_policy |= enable_policy_freq_counters(cpu, valid_cpus);
+ }
+
+ /*
+ * If we are not restricted by cpufreq policies, we only enable
+ * the use of the AMU feature for FIE if all CPUs support AMU.
+ * Otherwise, enable_policy_freq_counters has already enabled
+ * policy cpus.
+ */
+ if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask))
+ cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus);
+
+ if (!cpumask_empty(amu_fie_cpus)) {
+ pr_info("CPUs[%*pbl]: counters will be used for FIE.",
+ cpumask_pr_args(amu_fie_cpus));
+ static_branch_enable(&amu_fie_key);
+ }
+
+free_valid_mask:
+ free_cpumask_var(valid_cpus);
+
+ return ret;
+}
+late_initcall_sync(init_amu_fie);
+
+bool arch_freq_counters_available(struct cpumask *cpus)
+{
+ return amu_freq_invariant() &&
+ cpumask_subset(cpus, amu_fie_cpus);
+}
+
+void topology_scale_freq_tick(void)
+{
+ u64 prev_core_cnt, prev_const_cnt;
+ u64 core_cnt, const_cnt, scale;
+ int cpu = smp_processor_id();
+
+ if (!amu_freq_invariant())
+ return;
+
+ if (!cpumask_test_cpu(cpu, amu_fie_cpus))
+ return;
+
+ const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
+ core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
+ prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
+ prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
+
+ if (unlikely(core_cnt <= prev_core_cnt ||
+ const_cnt <= prev_const_cnt))
+ goto store_and_exit;
+
+ /*
+ * /\core arch_max_freq_scale
+ * scale = ------- * --------------------
+ * /\const SCHED_CAPACITY_SCALE
+ *
+ * See validate_cpu_freq_invariance_counters() for details on
+ * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
+ */
+ scale = core_cnt - prev_core_cnt;
+ scale *= this_cpu_read(arch_max_freq_scale);
+ scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
+ const_cnt - prev_const_cnt);
+
+ scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
+ this_cpu_write(freq_scale, (unsigned long)scale);
+
+store_and_exit:
+ this_cpu_write(arch_core_cycles_prev, core_cnt);
+ this_cpu_write(arch_const_cycles_prev, const_cnt);
+}
+#endif /* CONFIG_ARM64_AMU_EXTN */
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 354b11e27c07..033a48f30dbb 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -260,18 +260,7 @@ static int __aarch32_alloc_vdso_pages(void)
if (ret)
return ret;
- ret = aarch32_alloc_kuser_vdso_page();
- if (ret) {
- unsigned long c_vvar =
- (unsigned long)page_to_virt(aarch32_vdso_pages[C_VVAR]);
- unsigned long c_vdso =
- (unsigned long)page_to_virt(aarch32_vdso_pages[C_VDSO]);
-
- free_page(c_vvar);
- free_page(c_vdso);
- }
-
- return ret;
+ return aarch32_alloc_kuser_vdso_page();
}
#else
static int __aarch32_alloc_vdso_pages(void)
diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore
index f8b69d84238e..652e31d82582 100644
--- a/arch/arm64/kernel/vdso/.gitignore
+++ b/arch/arm64/kernel/vdso/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index dd2514bb1511..3862cad2410c 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -32,7 +32,7 @@ UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
-CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny
+CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables
ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S
index 0723aa398d6e..12324863d5c2 100644
--- a/arch/arm64/kernel/vdso/sigreturn.S
+++ b/arch/arm64/kernel/vdso/sigreturn.S
@@ -14,7 +14,7 @@
.text
nop
-ENTRY(__kernel_rt_sigreturn)
+SYM_FUNC_START(__kernel_rt_sigreturn)
.cfi_startproc
.cfi_signal_frame
.cfi_def_cfa x29, 0
@@ -23,4 +23,4 @@ ENTRY(__kernel_rt_sigreturn)
mov x8, #__NR_rt_sigreturn
svc #0
.cfi_endproc
-ENDPROC(__kernel_rt_sigreturn)
+SYM_FUNC_END(__kernel_rt_sigreturn)
diff --git a/arch/arm64/kernel/vdso/vgettimeofday.c b/arch/arm64/kernel/vdso/vgettimeofday.c
index 747635501a14..4236cf34d7d9 100644
--- a/arch/arm64/kernel/vdso/vgettimeofday.c
+++ b/arch/arm64/kernel/vdso/vgettimeofday.c
@@ -5,8 +5,6 @@
* Copyright (C) 2018 ARM Limited
*
*/
-#include <linux/time.h>
-#include <linux/types.h>
int __kernel_clock_gettime(clockid_t clock,
struct __kernel_timespec *ts)
diff --git a/arch/arm64/kernel/vdso32/.gitignore b/arch/arm64/kernel/vdso32/.gitignore
index 4fea950fa5ed..3542fa24e26b 100644
--- a/arch/arm64/kernel/vdso32/.gitignore
+++ b/arch/arm64/kernel/vdso32/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
vdso.so.raw
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 04df57b43cb1..3964738ebbde 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -10,7 +10,18 @@ include $(srctree)/lib/vdso/Makefile
# Same as cc-*option, but using CC_COMPAT instead of CC
ifeq ($(CONFIG_CC_IS_CLANG), y)
+COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit))
+COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..)
+
+CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
+CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)
+CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments
+ifneq ($(COMPAT_GCC_TOOLCHAIN),)
+CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)
+endif
+
CC_COMPAT ?= $(CC)
+CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS)
else
CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc
endif
diff --git a/arch/arm64/kernel/vdso32/sigreturn.S b/arch/arm64/kernel/vdso32/sigreturn.S
index 1a81277c2d09..620524969696 100644
--- a/arch/arm64/kernel/vdso32/sigreturn.S
+++ b/arch/arm64/kernel/vdso32/sigreturn.S
@@ -10,13 +10,6 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
-#define ARM_ENTRY(name) \
- ENTRY(name)
-
-#define ARM_ENDPROC(name) \
- .type name, %function; \
- END(name)
-
.text
.arm
@@ -24,39 +17,39 @@
.save {r0-r15}
.pad #COMPAT_SIGFRAME_REGS_OFFSET
nop
-ARM_ENTRY(__kernel_sigreturn_arm)
+SYM_FUNC_START(__kernel_sigreturn_arm)
mov r7, #__NR_compat_sigreturn
svc #0
.fnend
-ARM_ENDPROC(__kernel_sigreturn_arm)
+SYM_FUNC_END(__kernel_sigreturn_arm)
.fnstart
.save {r0-r15}
.pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
nop
-ARM_ENTRY(__kernel_rt_sigreturn_arm)
+SYM_FUNC_START(__kernel_rt_sigreturn_arm)
mov r7, #__NR_compat_rt_sigreturn
svc #0
.fnend
-ARM_ENDPROC(__kernel_rt_sigreturn_arm)
+SYM_FUNC_END(__kernel_rt_sigreturn_arm)
.thumb
.fnstart
.save {r0-r15}
.pad #COMPAT_SIGFRAME_REGS_OFFSET
nop
-ARM_ENTRY(__kernel_sigreturn_thumb)
+SYM_FUNC_START(__kernel_sigreturn_thumb)
mov r7, #__NR_compat_sigreturn
svc #0
.fnend
-ARM_ENDPROC(__kernel_sigreturn_thumb)
+SYM_FUNC_END(__kernel_sigreturn_thumb)
.fnstart
.save {r0-r15}
.pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
nop
-ARM_ENTRY(__kernel_rt_sigreturn_thumb)
+SYM_FUNC_START(__kernel_rt_sigreturn_thumb)
mov r7, #__NR_compat_rt_sigreturn
svc #0
.fnend
-ARM_ENDPROC(__kernel_rt_sigreturn_thumb)
+SYM_FUNC_END(__kernel_rt_sigreturn_thumb)
diff --git a/arch/arm64/kernel/vdso32/vgettimeofday.c b/arch/arm64/kernel/vdso32/vgettimeofday.c
index 54fc1c2ce93f..5acff29c5991 100644
--- a/arch/arm64/kernel/vdso32/vgettimeofday.c
+++ b/arch/arm64/kernel/vdso32/vgettimeofday.c
@@ -5,26 +5,16 @@
* Copyright (C) 2018 ARM Limited
*
*/
-#include <linux/time.h>
-#include <linux/types.h>
int __vdso_clock_gettime(clockid_t clock,
struct old_timespec32 *ts)
{
- /* The checks below are required for ABI consistency with arm */
- if ((u32)ts >= TASK_SIZE_32)
- return -EFAULT;
-
return __cvdso_clock_gettime32(clock, ts);
}
int __vdso_clock_gettime64(clockid_t clock,
struct __kernel_timespec *ts)
{
- /* The checks below are required for ABI consistency with arm */
- if ((u32)ts >= TASK_SIZE_32)
- return -EFAULT;
-
return __cvdso_clock_gettime(clock, ts);
}
@@ -37,10 +27,6 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
int __vdso_clock_getres(clockid_t clock_id,
struct old_timespec32 *res)
{
- /* The checks below are required for ABI consistency with arm */
- if ((u32)res >= TASK_SIZE_32)
- return -EFAULT;
-
return __cvdso_clock_getres_time32(clock_id, res);
}