aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/head.S
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-05-20 12:50:31 +0100
committerWill Deacon <will@kernel.org>2021-05-26 22:45:46 +0100
commit3d8c1a013d78f32ee266097496cbd89b734b5fcb (patch)
tree0d3c3057812148a28af6ba503a533e3acc7f9630 /arch/arm64/kernel/head.S
parentarm64: smp: unify task and sp setup (diff)
downloadlinux-dev-3d8c1a013d78f32ee266097496cbd89b734b5fcb.tar.xz
linux-dev-3d8c1a013d78f32ee266097496cbd89b734b5fcb.zip
arm64: smp: initialize cpu offset earlier
Now that we have a consistent place to initialize CPU context registers early in the boot path, let's also initialize the per-cpu offset here. This makes the primary and secondary boot paths more consistent, and allows for the use of per-cpu operations earlier, which will be necessary for instrumentation with KCSAN. Note that smp_prepare_boot_cpu() still needs to re-initialize CPU0's offset as immediately prior to this the per-cpu areas may be reallocated, and hence the boot-time offset may be stale. A comment is added to make this clear. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Suzuki Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20210520115031.18509-7-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/head.S')
-rw-r--r--arch/arm64/kernel/head.S17
1 files changed, 11 insertions, 6 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index e83b2899dce5..070ed53c049d 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -402,17 +402,22 @@ SYM_FUNC_END(__create_page_tables)
* its location in the task stack. We reserve the entire pt_regs space
* for consistency with user tasks and kthreads.
*/
- .macro init_cpu_task tsk, tmp
+ .macro init_cpu_task tsk, tmp1, tmp2
msr sp_el0, \tsk
- ldr \tmp, [\tsk, #TSK_STACK]
- add sp, \tmp, #THREAD_SIZE
+ ldr \tmp1, [\tsk, #TSK_STACK]
+ add sp, \tmp1, #THREAD_SIZE
sub sp, sp, #PT_REGS_SIZE
stp xzr, xzr, [sp, #S_STACKFRAME]
add x29, sp, #S_STACKFRAME
- scs_load \tsk, \tmp
+ scs_load \tsk, \tmp1
+
+ adr_l \tmp1, __per_cpu_offset
+ ldr w\tmp2, [\tsk, #TSK_CPU]
+ ldr \tmp1, [\tmp1, \tmp2, lsl #3]
+ set_this_cpu_offset \tmp1
.endm
/*
@@ -422,7 +427,7 @@ SYM_FUNC_END(__create_page_tables)
*/
SYM_FUNC_START_LOCAL(__primary_switched)
adr_l x4, init_task
- init_cpu_task x4, x5
+ init_cpu_task x4, x5, x6
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
@@ -650,7 +655,7 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
ldr x2, [x0, #CPU_BOOT_TASK]
cbz x2, __secondary_too_slow
- init_cpu_task x2, x1
+ init_cpu_task x2, x1, x3
#ifdef CONFIG_ARM64_PTR_AUTH
ptrauth_keys_init_cpu x2, x3, x4, x5