aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2022-09-30 09:18:22 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2022-09-30 09:18:22 +0100
commitc704cf27a1adc5fa40fb8e40b5617bdca889a419 (patch)
tree47d4dc5c6a938ca90e370dc45767ce3971d49616 /arch/arm64/kernel
parentMerge branch 'for-next/kselftest' into for-next/core (diff)
parentarm64: fix the build with binutils 2.27 (diff)
downloadlinux-dev-c704cf27a1adc5fa40fb8e40b5617bdca889a419.tar.xz
linux-dev-c704cf27a1adc5fa40fb8e40b5617bdca889a419.zip
Merge branch 'for-next/alternatives' into for-next/core
* for-next/alternatives: : Alternatives (code patching) improvements arm64: fix the build with binutils 2.27 arm64: avoid BUILD_BUG_ON() in alternative-macros arm64: alternatives: add shared NOP callback arm64: alternatives: add alternative_has_feature_*() arm64: alternatives: have callbacks take a cap arm64: alternatives: make alt_region const arm64: alternatives: hoist print out of __apply_alternatives() arm64: alternatives: proton-pack: prepare for cap changes arm64: alternatives: kvm: prepare for cap changes arm64: cpufeature: make cpus_have_cap() noinstr-safe
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/alternative.c70
-rw-r--r--arch/arm64/kernel/cpufeature.c44
-rw-r--r--arch/arm64/kernel/entry.S8
-rw-r--r--arch/arm64/kernel/image-vars.h5
-rw-r--r--arch/arm64/kernel/proton-pack.c2
5 files changed, 63 insertions, 66 deletions
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index a97775963f35..64045e3ef03a 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -24,6 +24,9 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+#define ALT_CAP(a) ((a)->cpufeature & ~ARM64_CB_BIT)
+#define ALT_HAS_CB(a) ((a)->cpufeature & ARM64_CB_BIT)
+
/* Volatile, as we may be patching the guts of READ_ONCE() */
static volatile int all_alternatives_applied;
@@ -136,8 +139,9 @@ static void clean_dcache_range_nopatch(u64 start, u64 end)
} while (cur += d_size, cur < end);
}
-static void __nocfi __apply_alternatives(struct alt_region *region, bool is_module,
- unsigned long *feature_mask)
+static void __nocfi __apply_alternatives(const struct alt_region *region,
+ bool is_module,
+ unsigned long *feature_mask)
{
struct alt_instr *alt;
__le32 *origptr, *updptr;
@@ -145,30 +149,27 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
for (alt = region->begin; alt < region->end; alt++) {
int nr_inst;
+ int cap = ALT_CAP(alt);
- if (!test_bit(alt->cpufeature, feature_mask))
+ if (!test_bit(cap, feature_mask))
continue;
- /* Use ARM64_CB_PATCH as an unconditional patch */
- if (alt->cpufeature < ARM64_CB_PATCH &&
- !cpus_have_cap(alt->cpufeature))
+ if (!cpus_have_cap(cap))
continue;
- if (alt->cpufeature == ARM64_CB_PATCH)
+ if (ALT_HAS_CB(alt))
BUG_ON(alt->alt_len != 0);
else
BUG_ON(alt->alt_len != alt->orig_len);
- pr_info_once("patching kernel code\n");
-
origptr = ALT_ORIG_PTR(alt);
updptr = is_module ? origptr : lm_alias(origptr);
nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
- if (alt->cpufeature < ARM64_CB_PATCH)
- alt_cb = patch_alternative;
- else
+ if (ALT_HAS_CB(alt))
alt_cb = ALT_REPL_PTR(alt);
+ else
+ alt_cb = patch_alternative;
alt_cb(alt, origptr, updptr, nr_inst);
@@ -201,9 +202,9 @@ void apply_alternatives_vdso(void)
const struct elf64_hdr *hdr;
const struct elf64_shdr *shdr;
const struct elf64_shdr *alt;
- DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
+ DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
- bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
+ bitmap_fill(all_capabilities, ARM64_NCAPS);
hdr = (struct elf64_hdr *)vdso_start;
shdr = (void *)hdr + hdr->e_shoff;
@@ -219,30 +220,31 @@ void apply_alternatives_vdso(void)
__apply_alternatives(&region, false, &all_capabilities[0]);
}
+static const struct alt_region kernel_alternatives = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+};
+
/*
* We might be patching the stop_machine state machine, so implement a
* really simple polling protocol here.
*/
static int __apply_alternatives_multi_stop(void *unused)
{
- struct alt_region region = {
- .begin = (struct alt_instr *)__alt_instructions,
- .end = (struct alt_instr *)__alt_instructions_end,
- };
-
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
while (!all_alternatives_applied)
cpu_relax();
isb();
} else {
- DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
+ DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS);
bitmap_complement(remaining_capabilities, boot_capabilities,
- ARM64_NPATCHABLE);
+ ARM64_NCAPS);
BUG_ON(all_alternatives_applied);
- __apply_alternatives(&region, false, remaining_capabilities);
+ __apply_alternatives(&kernel_alternatives, false,
+ remaining_capabilities);
/* Barriers provided by the cache flushing */
all_alternatives_applied = 1;
}
@@ -252,6 +254,8 @@ static int __apply_alternatives_multi_stop(void *unused)
void __init apply_alternatives_all(void)
{
+ pr_info("applying system-wide alternatives\n");
+
apply_alternatives_vdso();
/* better not try code patching on a live SMP system */
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
@@ -264,15 +268,13 @@ void __init apply_alternatives_all(void)
*/
void __init apply_boot_alternatives(void)
{
- struct alt_region region = {
- .begin = (struct alt_instr *)__alt_instructions,
- .end = (struct alt_instr *)__alt_instructions_end,
- };
-
/* If called on non-boot cpu things could go wrong */
WARN_ON(smp_processor_id() != 0);
- __apply_alternatives(&region, false, &boot_capabilities[0]);
+ pr_info("applying boot alternatives\n");
+
+ __apply_alternatives(&kernel_alternatives, false,
+ &boot_capabilities[0]);
}
#ifdef CONFIG_MODULES
@@ -282,10 +284,18 @@ void apply_alternatives_module(void *start, size_t length)
.begin = start,
.end = start + length,
};
- DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
+ DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
- bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
+ bitmap_fill(all_capabilities, ARM64_NCAPS);
__apply_alternatives(&region, true, &all_capabilities[0]);
}
#endif
+
+noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
+ __le32 *updptr, int nr_inst)
+{
+ for (int i = 0; i < nr_inst; i++)
+ updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
+}
+EXPORT_SYMBOL(alt_cb_patch_nops);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index bff9280e184f..5d0527ba0804 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -108,8 +108,7 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
-/* Need also bit for ARM64_CB_PATCH */
-DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
+DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS);
bool arm64_use_ng_mappings = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
@@ -134,31 +133,12 @@ DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
*/
static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
-/*
- * Flag to indicate if we have computed the system wide
- * capabilities based on the boot time active CPUs. This
- * will be used to determine if a new booting CPU should
- * go through the verification process to make sure that it
- * supports the system capabilities, without using a hotplug
- * notifier. This is also used to decide if we could use
- * the fast path for checking constant CPU caps.
- */
-DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
-EXPORT_SYMBOL(arm64_const_caps_ready);
-static inline void finalize_system_capabilities(void)
-{
- static_branch_enable(&arm64_const_caps_ready);
-}
-
void dump_cpu_features(void)
{
/* file-wide pr_fmt adds "CPU features: " prefix */
pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
}
-DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
-EXPORT_SYMBOL(cpu_hwcap_keys);
-
#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
{ \
.sign = SIGNED, \
@@ -1392,6 +1372,12 @@ u64 __read_sysreg_by_encoding(u32 sys_id)
#include <linux/irqchip/arm-gic-v3.h>
static bool
+has_always(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ return true;
+}
+
+static bool
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
{
int val = cpuid_feature_extract_field_width(reg, entry->field_pos,
@@ -2111,6 +2097,16 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
static const struct arm64_cpu_capabilities arm64_features[] = {
{
+ .capability = ARM64_ALWAYS_BOOT,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+ .matches = has_always,
+ },
+ {
+ .capability = ARM64_ALWAYS_SYSTEM,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_always,
+ },
+ {
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
@@ -2953,9 +2949,6 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
if (!cpus_have_cap(num))
continue;
- /* Ensure cpus_have_const_cap(num) works */
- static_branch_enable(&cpu_hwcap_keys[num]);
-
if (boot_scope && caps->cpu_enable)
/*
* Capabilities with SCOPE_BOOT_CPU scope are finalised
@@ -3277,9 +3270,6 @@ void __init setup_cpu_features(void)
sme_setup();
minsigstksz_setup();
- /* Advertise that we have computed the system capabilities */
- finalize_system_capabilities();
-
/*
* Check for sane CTR_EL0.CWG value.
*/
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 2d73b3e793b2..e28137d64b76 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -114,7 +114,7 @@
* them if required.
*/
.macro apply_ssbd, state, tmp1, tmp2
-alternative_cb spectre_v4_patch_fw_mitigation_enable
+alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable
b .L__asm_ssbd_skip\@ // Patched to NOP
alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
@@ -123,7 +123,7 @@ alternative_cb_end
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state
-alternative_cb smccc_patch_fw_mitigation_conduit
+alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0
alternative_cb_end
.L__asm_ssbd_skip\@:
@@ -175,7 +175,7 @@ alternative_else_nop_endif
.macro mte_set_kernel_gcr, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS
-alternative_cb kasan_hw_tags_enable
+alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
b 1f
alternative_cb_end
mov \tmp, KERNEL_GCR_EL1
@@ -186,7 +186,7 @@ alternative_cb_end
.macro mte_set_user_gcr, tsk, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS
-alternative_cb kasan_hw_tags_enable
+alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
b 1f
alternative_cb_end
ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index afa69e04e75e..4aaa5f3d1f65 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -73,6 +73,7 @@ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
+KVM_NVHE_ALIAS(alt_cb_patch_nops);
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_vgic_global_state);
@@ -89,10 +90,6 @@ KVM_NVHE_ALIAS(__icache_flags);
/* VMID bits set by the KVM VMID allocator */
KVM_NVHE_ALIAS(kvm_arm_vmid_bits);
-/* Kernel symbols needed for cpus_have_final/const_caps checks. */
-KVM_NVHE_ALIAS(arm64_const_caps_ready);
-KVM_NVHE_ALIAS(cpu_hwcap_keys);
-
/* Static keys which are set if a vGIC trap should be handled in hyp. */
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index fe3bc4c1c5ac..ea659a382e24 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -586,7 +586,7 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
if (spectre_v4_mitigations_off())
return;
- if (cpus_have_final_cap(ARM64_SSBS))
+ if (cpus_have_cap(ARM64_SSBS))
return;
if (spectre_v4_mitigations_dynamic())