aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/entry.S
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-06-07 10:46:11 +0100
committerWill Deacon <will@kernel.org>2021-06-07 11:35:55 +0100
commit064dbfb4169141943ec7d9dbfd02974dd008f2ce (patch)
tree2e67ec533160b8bc74d99ca82c0a11cd551be86b /arch/arm64/kernel/entry.S
parentarm64: entry: add a call_on_irq_stack helper (diff)
downloadlinux-dev-064dbfb4169141943ec7d9dbfd02974dd008f2ce.tar.xz
linux-dev-064dbfb4169141943ec7d9dbfd02974dd008f2ce.zip
arm64: entry: convert IRQ+FIQ handlers to C
For various reasons we'd like to convert the bulk of arm64's exception triage logic to C. As a step towards that, this patch converts the EL1 and EL0 IRQ+FIQ triage logic to C. Separate C functions are added for the native and compat cases so that in subsequent patches we can handle native/compat differences in C. Since the triage functions can now call arm64_apply_bp_hardening() directly, the do_el0_irq_bp_hardening() wrapper function is removed. Since the user_exit_irqoff macro is now unused, it is removed. The user_enter_irqoff macro is still used by the ret_to_user code, and cannot be removed at this time. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-8-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r--arch/arm64/kernel/entry.S108
1 files changed, 14 insertions, 94 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 8ca74ce115ee..8eb3a0a51413 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -33,12 +33,6 @@
* Context tracking and irqflag tracing need to instrument transitions between
* user and kernel mode.
*/
- .macro user_exit_irqoff
-#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
- bl enter_from_user_mode
-#endif
- .endm
-
.macro user_enter_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl exit_to_user_mode
@@ -486,63 +480,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0)
SYM_CODE_END(__swpan_exit_el0)
#endif
- .macro irq_stack_entry
- mov x19, sp // preserve the original sp
-#ifdef CONFIG_SHADOW_CALL_STACK
- mov x24, scs_sp // preserve the original shadow stack
-#endif
-
- /*
- * Compare sp with the base of the task stack.
- * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
- * and should switch to the irq stack.
- */
- ldr x25, [tsk, TSK_STACK]
- eor x25, x25, x19
- and x25, x25, #~(THREAD_SIZE - 1)
- cbnz x25, 9998f
-
- ldr_this_cpu x25, irq_stack_ptr, x26
- mov x26, #IRQ_STACK_SIZE
- add x26, x25, x26
-
- /* switch to the irq stack */
- mov sp, x26
-
-#ifdef CONFIG_SHADOW_CALL_STACK
- /* also switch to the irq shadow stack */
- ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
-#endif
-
-9998:
- .endm
-
- /*
- * The callee-saved regs (x19-x29) should be preserved between
- * irq_stack_entry and irq_stack_exit, but note that kernel_entry
- * uses x20-x23 to store data for later use.
- */
- .macro irq_stack_exit
- mov sp, x19
-#ifdef CONFIG_SHADOW_CALL_STACK
- mov scs_sp, x24
-#endif
- .endm
-
/* GPRs used by entry code */
tsk .req x28 // current thread_info
/*
* Interrupt handling.
*/
- .macro irq_handler, handler:req
- ldr_l x1, \handler
- mov x0, sp
- irq_stack_entry
- blr x1
- irq_stack_exit
- .endm
-
.macro gic_prio_kentry_setup, tmp:req
#ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
@@ -552,35 +495,6 @@ tsk .req x28 // current thread_info
#endif
.endm
- .macro el1_interrupt_handler, handler:req
- enable_da
-
- mov x0, sp
- bl enter_el1_irq_or_nmi
-
- irq_handler \handler
-
-#ifdef CONFIG_PREEMPTION
- ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
- cbnz x24, 1f // preempt count != 0
- bl arm64_preempt_schedule_irq // irq en/disable is done inside
-1:
-#endif
-
- mov x0, sp
- bl exit_el1_irq_or_nmi
- .endm
-
- .macro el0_interrupt_handler, handler:req
- user_exit_irqoff
- enable_da
-
- tbz x22, #55, 1f
- bl do_el0_irq_bp_hardening
-1:
- irq_handler \handler
- .endm
-
.text
/*
@@ -704,13 +618,15 @@ SYM_CODE_END(el1_sync)
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1
- el1_interrupt_handler handle_arch_irq
+ mov x0, sp
+ bl el1_irq_handler
kernel_exit 1
SYM_CODE_END(el1_irq)
SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
kernel_entry 1
- el1_interrupt_handler handle_arch_fiq
+ mov x0, sp
+ bl el1_fiq_handler
kernel_exit 1
SYM_CODE_END(el1_fiq)
@@ -737,12 +653,16 @@ SYM_CODE_END(el0_sync_compat)
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
kernel_entry 0, 32
- b el0_irq_naked
+ mov x0, sp
+ bl el0_irq_compat_handler
+ b ret_to_user
SYM_CODE_END(el0_irq_compat)
SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
kernel_entry 0, 32
- b el0_fiq_naked
+ mov x0, sp
+ bl el0_fiq_compat_handler
+ b ret_to_user
SYM_CODE_END(el0_fiq_compat)
SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
@@ -756,15 +676,15 @@ SYM_CODE_END(el0_error_compat)
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
-el0_irq_naked:
- el0_interrupt_handler handle_arch_irq
+ mov x0, sp
+ bl el0_irq_handler
b ret_to_user
SYM_CODE_END(el0_irq)
SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
kernel_entry 0
-el0_fiq_naked:
- el0_interrupt_handler handle_arch_fiq
+ mov x0, sp
+ bl el0_fiq_handler
b ret_to_user
SYM_CODE_END(el0_fiq)