aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/cpufeature.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2019-01-08 16:19:01 +0000
committerWill Deacon <will.deacon@arm.com>2019-01-10 17:49:35 +0000
commitb89d82ef01b33bc50cbaa8ff05607879b40d0704 (patch)
treeae9df3844e5cb08338a8945d4a4fb9547ba27e26 /arch/arm64/kernel/cpufeature.c
parentarm64: asm-prototypes: Fix fat-fingered typo in comment (diff)
downloadlinux-dev-b89d82ef01b33bc50cbaa8ff05607879b40d0704.tar.xz
linux-dev-b89d82ef01b33bc50cbaa8ff05607879b40d0704.zip
arm64: kpti: Avoid rewriting early page tables when KASLR is enabled
A side effect of commit c55191e96caa ("arm64: mm: apply r/o permissions of VM areas to its linear alias as well") is that the linear map is created with page granularity, which means that transitioning the early page table from global to non-global mappings when enabling kpti can take a significant amount of time during boot. Given that most CPU implementations do not require kpti, this mainly impacts KASLR builds where kpti is forcefully enabled. However, in these situations we know early on that non-global mappings are required and can avoid the use of global mappings from the beginning. The only gotcha is Cavium erratum #27456, which we must detect based on the MIDR value of the boot CPU. Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reported-by: John Garry <john.garry@huawei.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kernel/cpufeature.c')
-rw-r--r--arch/arm64/kernel/cpufeature.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4f272399de89..f6d84e2c92fe 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -983,7 +983,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
/* Useful for KASLR robustness */
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return true;
+ return kaslr_offset() > 0;
/* Don't force KPTI for CPUs that are not vulnerable */
if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
@@ -1003,7 +1003,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
static bool kpti_applied = false;
int cpu = smp_processor_id();
- if (kpti_applied)
+ /*
+ * We don't need to rewrite the page-tables if either we've done
+ * it already or we have KASLR enabled and therefore have not
+ * created any global mappings at all.
+ */
+ if (kpti_applied || kaslr_offset() > 0)
return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);