aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/entry-common.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/entry-common.c')
-rw-r--r--arch/arm64/kernel/entry-common.c105
1 files changed, 71 insertions, 34 deletions
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index ef7fcefb96bd..27369fa1c032 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -6,6 +6,7 @@
*/
#include <linux/context_tracking.h>
+#include <linux/kasan.h>
#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/ptrace.h>
@@ -40,7 +41,7 @@ static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
lockdep_hardirqs_off(CALLER_ADDR0);
- rcu_irq_enter();
+ ct_irq_enter();
trace_hardirqs_off_finish();
regs->exit_rcu = true;
@@ -56,6 +57,7 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
{
__enter_from_kernel_mode(regs);
mte_check_tfsr_entry();
+ mte_disable_tco_entry(current);
}
/*
@@ -73,8 +75,8 @@ static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
if (interrupts_enabled(regs)) {
if (regs->exit_rcu) {
trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- rcu_irq_exit();
+ lockdep_hardirqs_on_prepare();
+ ct_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
return;
}
@@ -82,7 +84,7 @@ static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
trace_hardirqs_on();
} else {
if (regs->exit_rcu)
- rcu_irq_exit();
+ ct_irq_exit();
}
}
@@ -103,6 +105,7 @@ static __always_inline void __enter_from_user_mode(void)
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit_irqoff();
trace_hardirqs_off_finish();
+ mte_disable_tco_entry(current);
}
static __always_inline void enter_from_user_mode(struct pt_regs *regs)
@@ -118,7 +121,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
static __always_inline void __exit_to_user_mode(void)
{
trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ lockdep_hardirqs_on_prepare();
user_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);
}
@@ -158,7 +161,7 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs)
__nmi_enter();
lockdep_hardirqs_off(CALLER_ADDR0);
lockdep_hardirq_enter();
- rcu_nmi_enter();
+ ct_nmi_enter();
trace_hardirqs_off_finish();
ftrace_nmi_enter();
@@ -176,10 +179,10 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs)
ftrace_nmi_exit();
if (restore) {
trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ lockdep_hardirqs_on_prepare();
}
- rcu_nmi_exit();
+ ct_nmi_exit();
lockdep_hardirq_exit();
if (restore)
lockdep_hardirqs_on(CALLER_ADDR0);
@@ -196,7 +199,7 @@ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
lockdep_hardirqs_off(CALLER_ADDR0);
- rcu_nmi_enter();
+ ct_nmi_enter();
trace_hardirqs_off_finish();
}
@@ -212,17 +215,34 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
if (restore) {
trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ lockdep_hardirqs_on_prepare();
}
- rcu_nmi_exit();
+ ct_nmi_exit();
if (restore)
lockdep_hardirqs_on(CALLER_ADDR0);
}
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+#define need_irq_preemption() \
+ (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+#else
+#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
+#endif
+
static void __sched arm64_preempt_schedule_irq(void)
{
- lockdep_assert_irqs_disabled();
+ if (!need_irq_preemption())
+ return;
+
+ /*
+ * Note: thread_info::preempt_count includes both thread_info::count
+ * and thread_info::need_resched, and is not equivalent to
+ * preempt_count().
+ */
+ if (READ_ONCE(current_thread_info()->preempt_count) != 0)
+ return;
/*
* DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
@@ -262,13 +282,13 @@ extern void (*handle_arch_irq)(struct pt_regs *);
extern void (*handle_arch_fiq)(struct pt_regs *);
static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
- unsigned int esr)
+ unsigned long esr)
{
arm64_enter_nmi(regs);
console_verbose();
- pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n",
+ pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
vector, smp_processor_id(), esr,
esr_get_class_string(esr));
@@ -309,7 +329,8 @@ static void cortex_a76_erratum_1463225_svc_handler(void)
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
}
-static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+static __always_inline bool
+cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{
if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
return false;
@@ -359,11 +380,20 @@ static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
exit_to_kernel_mode(regs);
}
-static void noinstr el1_undef(struct pt_regs *regs)
+static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
- do_undefinstr(regs);
+ do_undefinstr(regs, esr);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_kernel_mode(regs);
+ local_daif_inherit(regs);
+ do_el1_bti(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
@@ -382,7 +412,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
- do_ptrauth_fault(regs, esr);
+ do_el1_fpac(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
@@ -405,7 +435,10 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
break;
case ESR_ELx_EC_SYS64:
case ESR_ELx_EC_UNKNOWN:
- el1_undef(regs);
+ el1_undef(regs, esr);
+ break;
+ case ESR_ELx_EC_BTI:
+ el1_bti(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_CUR:
case ESR_ELx_EC_SOFTSTP_CUR:
@@ -438,14 +471,7 @@ static __always_inline void __el1_irq(struct pt_regs *regs,
do_interrupt_handler(regs, handler);
irq_exit_rcu();
- /*
- * Note: thread_info::preempt_count includes both thread_info::count
- * and thread_info::need_resched, and is not equivalent to
- * preempt_count().
- */
- if (IS_ENABLED(CONFIG_PREEMPTION) &&
- READ_ONCE(current_thread_info()->preempt_count) == 0)
- arm64_preempt_schedule_irq();
+ arm64_preempt_schedule_irq();
exit_to_kernel_mode(regs);
}
@@ -524,6 +550,14 @@ static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
exit_to_user_mode(regs);
}
+static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode(regs);
+ local_daif_restore(DAIF_PROCCTX);
+ do_sme_acc(esr, regs);
+ exit_to_user_mode(regs);
+}
+
static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
@@ -561,11 +595,11 @@ static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
exit_to_user_mode(regs);
}
-static void noinstr el0_undef(struct pt_regs *regs)
+static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
- do_undefinstr(regs);
+ do_undefinstr(regs, esr);
exit_to_user_mode(regs);
}
@@ -573,7 +607,7 @@ static void noinstr el0_bti(struct pt_regs *regs)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
- do_bti(regs);
+ do_el0_bti(regs);
exit_to_user_mode(regs);
}
@@ -608,7 +642,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
- do_ptrauth_fault(regs, esr);
+ do_el0_fpac(regs, esr);
exit_to_user_mode(regs);
}
@@ -632,6 +666,9 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_SVE:
el0_sve_acc(regs, esr);
break;
+ case ESR_ELx_EC_SME:
+ el0_sme_acc(regs, esr);
+ break;
case ESR_ELx_EC_FP_EXC64:
el0_fpsimd_exc(regs, esr);
break;
@@ -646,7 +683,7 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
el0_pc(regs, esr);
break;
case ESR_ELx_EC_UNKNOWN:
- el0_undef(regs);
+ el0_undef(regs, esr);
break;
case ESR_ELx_EC_BTI:
el0_bti(regs);
@@ -764,7 +801,7 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_CP14_MR:
case ESR_ELx_EC_CP14_LS:
case ESR_ELx_EC_CP14_64:
- el0_undef(regs);
+ el0_undef(regs, esr);
break;
case ESR_ELx_EC_CP15_32:
case ESR_ELx_EC_CP15_64:
@@ -805,7 +842,7 @@ UNHANDLED(el0t, 32, error)
#ifdef CONFIG_VMAP_STACK
asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
{
- unsigned int esr = read_sysreg(esr_el1);
+ unsigned long esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1);
arm64_enter_nmi(regs);