aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/cpufeature.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2019-07-10 23:24:10 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2019-07-10 23:24:10 -0700
commit597473720f4dc69749542bfcfed4a927a43d935e (patch)
tree711bf773910fb93d1dd9120c633adc807685e0d8 /arch/arm64/kernel/cpufeature.c
parentInput: atmel_mxt_ts - fix leak in mxt_update_cfg() (diff)
parentInput: gpio_keys_polled - allow specifying name of input device (diff)
downloadlinux-dev-597473720f4dc69749542bfcfed4a927a43d935e.tar.xz
linux-dev-597473720f4dc69749542bfcfed4a927a43d935e.zip
Merge branch 'next' into for-linus
Prepare input updates for 5.3 merge window.
Diffstat (limited to 'arch/arm64/kernel/cpufeature.c')
-rw-r--r--arch/arm64/kernel/cpufeature.c363
1 files changed, 276 insertions, 87 deletions
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index aec5ecb85737..4061de10cea6 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -52,6 +52,10 @@ unsigned int compat_elf_hwcap2 __read_mostly;
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
+static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
+
+/* Need also bit for ARM64_CB_PATCH */
+DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
/*
* Flag to indicate if we have computed the system wide
@@ -141,9 +145,18 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -518,6 +531,29 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
}
extern const struct arm64_cpu_capabilities arm64_errata[];
+static const struct arm64_cpu_capabilities arm64_features[];
+
+static void __init
+init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
+{
+ for (; caps->matches; caps++) {
+ if (WARN(caps->capability >= ARM64_NCAPS,
+ "Invalid capability %d\n", caps->capability))
+ continue;
+ if (WARN(cpu_hwcaps_ptrs[caps->capability],
+ "Duplicate entry for capability %d\n",
+ caps->capability))
+ continue;
+ cpu_hwcaps_ptrs[caps->capability] = caps;
+ }
+}
+
+static void __init init_cpu_hwcaps_indirect_list(void)
+{
+ init_cpu_hwcaps_indirect_list_from_array(arm64_features);
+ init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
+}
+
static void __init setup_boot_cpu_capabilities(void);
void __init init_cpu_features(struct cpuinfo_arm64 *info)
@@ -564,6 +600,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
}
/*
+ * Initialize the indirect array of CPU hwcaps capabilities pointers
+ * before we handle the boot CPU below.
+ */
+ init_cpu_hwcaps_indirect_list();
+
+ /*
* Detect and enable early CPU capabilities based on the boot CPU,
* after we have initialised the CPU feature infrastructure.
*/
@@ -915,6 +957,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
static const struct midr_range kpti_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
{ /* sentinel */ }
};
char const *str = "command line option";
@@ -938,7 +987,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
/* Useful for KASLR robustness */
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return true;
+ return kaslr_offset() > 0;
/* Don't force KPTI for CPUs that are not vulnerable */
if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
@@ -958,7 +1007,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
static bool kpti_applied = false;
int cpu = smp_processor_id();
- if (kpti_applied)
+ /*
+ * We don't need to rewrite the page-tables if either we've done
+ * it already or we have KASLR enabled and therefore have not
+ * created any global mappings at all.
+ */
+ if (kpti_applied || kaslr_offset() > 0)
return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
@@ -1068,7 +1122,7 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
* that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
* do anything here.
*/
- if (!alternatives_applied)
+ if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
}
#endif
@@ -1145,11 +1199,35 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
}
#endif /* CONFIG_ARM64_RAS_EXTN */
+#ifdef CONFIG_ARM64_PTR_AUTH
+static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
+{
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
+ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
+}
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+static bool enable_pseudo_nmi;
+
+static int __init early_enable_pseudo_nmi(char *p)
+{
+ return strtobool(p, &enable_pseudo_nmi);
+}
+early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
+
+static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
+}
+#endif
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT,
@@ -1368,22 +1446,130 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_cnp,
},
#endif
+ {
+ .desc = "Speculation barrier (SB)",
+ .capability = ARM64_HAS_SB,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .field_pos = ID_AA64ISAR1_SB_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
+#ifdef CONFIG_ARM64_PTR_AUTH
+ {
+ .desc = "Address authentication (architected algorithm)",
+ .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_APA_SHIFT,
+ .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_address_auth,
+ },
+ {
+ .desc = "Address authentication (IMP DEF algorithm)",
+ .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_API_SHIFT,
+ .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_address_auth,
+ },
+ {
+ .desc = "Generic authentication (architected algorithm)",
+ .capability = ARM64_HAS_GENERIC_AUTH_ARCH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_GPA_SHIFT,
+ .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
+ .matches = has_cpuid_feature,
+ },
+ {
+ .desc = "Generic authentication (IMP DEF algorithm)",
+ .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_GPI_SHIFT,
+ .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
+ .matches = has_cpuid_feature,
+ },
+#endif /* CONFIG_ARM64_PTR_AUTH */
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ {
+ /*
+ * Depends on having GICv3
+ */
+ .desc = "IRQ priority masking",
+ .capability = ARM64_HAS_IRQ_PRIO_MASKING,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = can_use_gic_priorities,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .field_pos = ID_AA64PFR0_GIC_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
+#endif
{},
};
-#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
- { \
- .desc = #cap, \
- .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
- .matches = has_cpuid_feature, \
- .sys_reg = reg, \
- .field_pos = field, \
- .sign = s, \
- .min_field_value = min_value, \
- .hwcap_type = cap_type, \
- .hwcap = cap, \
+#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
+ .matches = has_cpuid_feature, \
+ .sys_reg = reg, \
+ .field_pos = field, \
+ .sign = s, \
+ .min_field_value = min_value,
+
+#define __HWCAP_CAP(name, cap_type, cap) \
+ .desc = name, \
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
+ .hwcap_type = cap_type, \
+ .hwcap = cap, \
+
+#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ HWCAP_CPUID_MATCH(reg, field, s, min_value) \
}
+#define HWCAP_MULTI_CAP(list, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ .matches = cpucap_multi_entry_cap_matches, \
+ .match_list = list, \
+ }
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
+ },
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
+ },
+ {},
+};
+
+static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
+ },
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
+ },
+ {},
+};
+#endif
+
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
@@ -1409,11 +1595,16 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
#endif
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
+#ifdef CONFIG_ARM64_PTR_AUTH
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, HWCAP_PACA),
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, HWCAP_PACG),
+#endif
{},
};
@@ -1482,52 +1673,49 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
cap_set_elf_hwcap(hwcaps);
}
-/*
- * Check if the current CPU has a given feature capability.
- * Should be called from non-preemptible context.
- */
-static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
- unsigned int cap)
+static void update_cpu_capabilities(u16 scope_mask)
{
+ int i;
const struct arm64_cpu_capabilities *caps;
- if (WARN_ON(preemptible()))
- return false;
-
- for (caps = cap_array; caps->matches; caps++)
- if (caps->capability == cap)
- return caps->matches(caps, SCOPE_LOCAL_CPU);
-
- return false;
-}
-
-static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
- u16 scope_mask, const char *info)
-{
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
- for (; caps->matches; caps++) {
- if (!(caps->type & scope_mask) ||
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask) ||
+ cpus_have_cap(caps->capability) ||
!caps->matches(caps, cpucap_default_scope(caps)))
continue;
- if (!cpus_have_cap(caps->capability) && caps->desc)
- pr_info("%s %s\n", info, caps->desc);
+ if (caps->desc)
+ pr_info("detected: %s\n", caps->desc);
cpus_set_cap(caps->capability);
+
+ if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
+ set_bit(caps->capability, boot_capabilities);
}
}
-static void update_cpu_capabilities(u16 scope_mask)
+/*
+ * Enable all the available capabilities on this CPU. The capabilities
+ * with BOOT_CPU scope are handled separately and hence skipped here.
+ */
+static int cpu_enable_non_boot_scope_capabilities(void *__unused)
{
- __update_cpu_capabilities(arm64_errata, scope_mask,
- "enabling workaround for");
- __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
-}
+ int i;
+ u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
-static int __enable_cpu_capability(void *arg)
-{
- const struct arm64_cpu_capabilities *cap = arg;
+ for_each_available_cap(i) {
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
+
+ if (WARN_ON(!cap))
+ continue;
+
+ if (!(cap->type & non_boot_scope))
+ continue;
- cap->cpu_enable(cap);
+ if (cap->cpu_enable)
+ cap->cpu_enable(cap);
+ }
return 0;
}
@@ -1535,21 +1723,29 @@ static int __enable_cpu_capability(void *arg)
* Run through the enabled capabilities and enable() it on all active
* CPUs
*/
-static void __init
-__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
- u16 scope_mask)
+static void __init enable_cpu_capabilities(u16 scope_mask)
{
+ int i;
+ const struct arm64_cpu_capabilities *caps;
+ bool boot_scope;
+
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
- for (; caps->matches; caps++) {
- unsigned int num = caps->capability;
+ boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
- if (!(caps->type & scope_mask) || !cpus_have_cap(num))
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ unsigned int num;
+
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask))
+ continue;
+ num = caps->capability;
+ if (!cpus_have_cap(num))
continue;
/* Ensure cpus_have_const_cap(num) works */
static_branch_enable(&cpu_hwcap_keys[num]);
- if (caps->cpu_enable) {
+ if (boot_scope && caps->cpu_enable)
/*
* Capabilities with SCOPE_BOOT_CPU scope are finalised
* before any secondary CPU boots. Thus, each secondary
@@ -1558,25 +1754,19 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
* the boot CPU, for which the capability must be
* enabled here. This approach avoids costly
* stop_machine() calls for this case.
- *
- * Otherwise, use stop_machine() as it schedules the
- * work allowing us to modify PSTATE, instead of
- * on_each_cpu() which uses an IPI, giving us a PSTATE
- * that disappears when we return.
*/
- if (scope_mask & SCOPE_BOOT_CPU)
- caps->cpu_enable(caps);
- else
- stop_machine(__enable_cpu_capability,
- (void *)caps, cpu_online_mask);
- }
+ caps->cpu_enable(caps);
}
-}
-static void __init enable_cpu_capabilities(u16 scope_mask)
-{
- __enable_cpu_capabilities(arm64_errata, scope_mask);
- __enable_cpu_capabilities(arm64_features, scope_mask);
+ /*
+ * For all non-boot scope capabilities, use stop_machine()
+ * as it schedules the work allowing us to modify PSTATE,
+ * instead of on_each_cpu() which uses an IPI, giving us a
+ * PSTATE that disappears when we return.
+ */
+ if (!boot_scope)
+ stop_machine(cpu_enable_non_boot_scope_capabilities,
+ NULL, cpu_online_mask);
}
/*
@@ -1586,16 +1776,17 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
*
* Returns "false" on conflicts.
*/
-static bool
-__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
- u16 scope_mask)
+static bool verify_local_cpu_caps(u16 scope_mask)
{
+ int i;
bool cpu_has_cap, system_has_cap;
+ const struct arm64_cpu_capabilities *caps;
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
- for (; caps->matches; caps++) {
- if (!(caps->type & scope_mask))
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask))
continue;
cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
@@ -1626,7 +1817,7 @@ __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
}
}
- if (caps->matches) {
+ if (i < ARM64_NCAPS) {
pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
smp_processor_id(), caps->capability,
caps->desc, system_has_cap, cpu_has_cap);
@@ -1636,12 +1827,6 @@ __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
return true;
}
-static bool verify_local_cpu_caps(u16 scope_mask)
-{
- return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
- __verify_local_cpu_caps(arm64_features, scope_mask);
-}
-
/*
* Check for CPU features that are used in early boot
* based on the Boot CPU value.
@@ -1750,12 +1935,16 @@ static void __init mark_const_caps_ready(void)
static_branch_enable(&arm64_const_caps_ready);
}
-extern const struct arm64_cpu_capabilities arm64_errata[];
-
-bool this_cpu_has_cap(unsigned int cap)
+bool this_cpu_has_cap(unsigned int n)
{
- return (__this_cpu_has_cap(arm64_features, cap) ||
- __this_cpu_has_cap(arm64_errata, cap));
+ if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
+
+ if (cap)
+ return cap->matches(cap, SCOPE_LOCAL_CPU);
+ }
+
+ return false;
}
static void __init setup_system_capabilities(void)