aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/powerpc/kernel/interrupt.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/interrupt.c')
-rw-r--r--arch/powerpc/kernel/interrupt.c223
1 files changed, 32 insertions, 191 deletions
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 784ea3289c84..e0c681d0b076 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -3,6 +3,7 @@
#include <linux/context_tracking.h>
#include <linux/err.h>
#include <linux/compat.h>
+#include <linux/rseq.h>
#include <linux/sched/debug.h> /* for show_regs */
#include <asm/kup.h>
@@ -24,7 +25,9 @@
unsigned long global_dbcr0[NR_CPUS];
#endif
-typedef long (*syscall_fn)(long, long, long, long, long, long);
+#if defined(CONFIG_PREEMPT_DYNAMIC)
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
@@ -52,16 +55,18 @@ static inline bool exit_must_hard_disable(void)
*/
static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
{
+ bool must_hard_disable = (exit_must_hard_disable() || !restartable);
+
/* This must be done with RI=1 because tracing may touch vmaps */
trace_hardirqs_on();
- if (exit_must_hard_disable() || !restartable)
+ if (must_hard_disable)
__hard_EE_RI_disable();
#ifdef CONFIG_PPC64
/* This pattern matches prep_irq_for_idle */
if (unlikely(lazy_irq_pending_nocheck())) {
- if (exit_must_hard_disable() || !restartable) {
+ if (must_hard_disable) {
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
__hard_RI_enable();
}
@@ -73,165 +78,6 @@ static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
return true;
}
-/* Has to run notrace because it is entered not completely "reconciled" */
-notrace long system_call_exception(long r3, long r4, long r5,
- long r6, long r7, long r8,
- unsigned long r0, struct pt_regs *regs)
-{
- syscall_fn f;
-
- kuap_lock();
-
- regs->orig_gpr3 = r3;
-
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
-
- trace_hardirqs_off(); /* finish reconciling */
-
- CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
- user_exit_irqoff();
-
- BUG_ON(regs_is_unrecoverable(regs));
- BUG_ON(!(regs->msr & MSR_PR));
- BUG_ON(arch_irq_disabled_regs(regs));
-
-#ifdef CONFIG_PPC_PKEY
- if (mmu_has_feature(MMU_FTR_PKEY)) {
- unsigned long amr, iamr;
- bool flush_needed = false;
- /*
- * When entering from userspace we mostly have the AMR/IAMR
- * different from kernel default values. Hence don't compare.
- */
- amr = mfspr(SPRN_AMR);
- iamr = mfspr(SPRN_IAMR);
- regs->amr = amr;
- regs->iamr = iamr;
- if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
- mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
- flush_needed = true;
- }
- if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
- mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
- flush_needed = true;
- }
- if (flush_needed)
- isync();
- } else
-#endif
- kuap_assert_locked();
-
- booke_restore_dbcr0();
-
- account_cpu_user_entry();
-
- account_stolen_time();
-
- /*
- * This is not required for the syscall exit path, but makes the
- * stack frame look nicer. If this was initialised in the first stack
- * frame, or if the unwinder was taught the first stack frame always
- * returns to user with IRQS_ENABLED, this store could be avoided!
- */
- irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
-
- /*
- * If system call is called with TM active, set _TIF_RESTOREALL to
- * prevent RFSCV being used to return to userspace, because POWER9
- * TM implementation has problems with this instruction returning to
- * transactional state. Final register values are not relevant because
- * the transaction will be aborted upon return anyway. Or in the case
- * of unsupported_scv SIGILL fault, the return state does not much
- * matter because it's an edge case.
- */
- if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
- unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
- set_bits(_TIF_RESTOREALL, &current_thread_info()->flags);
-
- /*
- * If the system call was made with a transaction active, doom it and
- * return without performing the system call. Unless it was an
- * unsupported scv vector, in which case it's treated like an illegal
- * instruction.
- */
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
- !trap_is_unsupported_scv(regs)) {
- /* Enable TM in the kernel, and disable EE (for scv) */
- hard_irq_disable();
- mtmsr(mfmsr() | MSR_TM);
-
- /* tabort, this dooms the transaction, nothing else */
- asm volatile(".long 0x7c00071d | ((%0) << 16)"
- :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
-
- /*
- * Userspace will never see the return value. Execution will
- * resume after the tbegin. of the aborted transaction with the
- * checkpointed register state. A context switch could occur
- * or signal delivered to the process before resuming the
- * doomed transaction context, but that should all be handled
- * as expected.
- */
- return -ENOSYS;
- }
-#endif // CONFIG_PPC_TRANSACTIONAL_MEM
-
- local_irq_enable();
-
- if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
- if (unlikely(trap_is_unsupported_scv(regs))) {
- /* Unsupported scv vector */
- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- return regs->gpr[3];
- }
- /*
- * We use the return value of do_syscall_trace_enter() as the
- * syscall number. If the syscall was rejected for any reason
- * do_syscall_trace_enter() returns an invalid syscall number
- * and the test against NR_syscalls will fail and the return
- * value to be used is in regs->gpr[3].
- */
- r0 = do_syscall_trace_enter(regs);
- if (unlikely(r0 >= NR_syscalls))
- return regs->gpr[3];
- r3 = regs->gpr[3];
- r4 = regs->gpr[4];
- r5 = regs->gpr[5];
- r6 = regs->gpr[6];
- r7 = regs->gpr[7];
- r8 = regs->gpr[8];
-
- } else if (unlikely(r0 >= NR_syscalls)) {
- if (unlikely(trap_is_unsupported_scv(regs))) {
- /* Unsupported scv vector */
- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- return regs->gpr[3];
- }
- return -ENOSYS;
- }
-
- /* May be faster to do array_index_nospec? */
- barrier_nospec();
-
- if (unlikely(is_compat_task())) {
- f = (void *)compat_sys_call_table[r0];
-
- r3 &= 0x00000000ffffffffULL;
- r4 &= 0x00000000ffffffffULL;
- r5 &= 0x00000000ffffffffULL;
- r6 &= 0x00000000ffffffffULL;
- r7 &= 0x00000000ffffffffULL;
- r8 &= 0x00000000ffffffffULL;
-
- } else {
- f = (void *)sys_call_table[r0];
- }
-
- return f(r3, r4, r5, r6, r7, r8);
-}
-
static notrace void booke_load_dbcr0(void)
{
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -254,7 +100,7 @@ static notrace void booke_load_dbcr0(void)
#endif
}
-static void check_return_regs_valid(struct pt_regs *regs)
+static notrace void check_return_regs_valid(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_BOOK3S_64
unsigned long trap, srr0, srr1;
@@ -284,7 +130,7 @@ static void check_return_regs_valid(struct pt_regs *regs)
case 0x1600:
case 0x1800:
validp = &local_paca->hsrr_valid;
- if (!*validp)
+ if (!READ_ONCE(*validp))
return;
srr0 = mfspr(SPRN_HSRR0);
@@ -294,7 +140,7 @@ static void check_return_regs_valid(struct pt_regs *regs)
break;
default:
validp = &local_paca->srr_valid;
- if (!*validp)
+ if (!READ_ONCE(*validp))
return;
srr0 = mfspr(SPRN_SRR0);
@@ -320,19 +166,17 @@ static void check_return_regs_valid(struct pt_regs *regs)
* such things will get caught most of the time, statistically
* enough to be able to get a warning out.
*/
- barrier();
-
- if (!*validp)
+ if (!READ_ONCE(*validp))
return;
- if (!warned) {
- warned = true;
+ if (!data_race(warned)) {
+ data_race(warned = true);
printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
show_regs(regs);
}
- *validp = 0; /* fixup */
+ WRITE_ONCE(*validp, 0); /* fixup */
#endif
}
@@ -345,7 +189,7 @@ again:
ti_flags = read_thread_flags();
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable();
- if (ti_flags & _TIF_NEED_RESCHED) {
+ if (ti_flags & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) {
schedule();
} else {
/*
@@ -426,7 +270,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
unsigned long ret = 0;
bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
- CT_WARN_ON(ct_state() == CONTEXT_USER);
+ CT_WARN_ON(ct_state() == CT_STATE_USER);
kuap_assert_locked();
@@ -504,7 +348,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
BUG_ON(regs_is_unrecoverable(regs));
BUG_ON(arch_irq_disabled_regs(regs));
- CT_WARN_ON(ct_state() == CONTEXT_USER);
+ CT_WARN_ON(ct_state() == CT_STATE_USER);
/*
* We don't need to restore AMR on the way back to userspace for KUAP.
@@ -527,7 +371,6 @@ void preempt_schedule_irq(void);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
{
- unsigned long flags;
unsigned long ret = 0;
unsigned long kuap;
bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
@@ -535,21 +378,29 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
if (regs_is_unrecoverable(regs))
unrecoverable_exception(regs);
/*
- * CT_WARN_ON comes here via program_check_exception,
- * so avoid recursion.
+ * CT_WARN_ON comes here via program_check_exception, so avoid
+ * recursion.
+ *
+ * Skip the assertion on PMIs on 64e to work around a problem caused
+ * by NMI PMIs incorrectly taking this interrupt return path, it's
+ * possible for this to hit after interrupt exit to user switches
+ * context to user. See also the comment in the performance monitor
+ * handler in exceptions-64e.S
*/
- if (TRAP(regs) != INTERRUPT_PROGRAM)
- CT_WARN_ON(ct_state() == CONTEXT_USER);
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) &&
+ TRAP(regs) != INTERRUPT_PROGRAM &&
+ TRAP(regs) != INTERRUPT_PERFMON)
+ CT_WARN_ON(ct_state() == CT_STATE_USER);
kuap = kuap_get_and_assert_locked();
- local_irq_save(flags);
+ local_irq_disable();
if (!arch_irq_disabled_regs(regs)) {
/* Returning to a kernel context with local irqs enabled. */
WARN_ON_ONCE(!(regs->msr & MSR_EE));
again:
- if (IS_ENABLED(CONFIG_PREEMPT)) {
+ if (need_irq_preemption()) {
/* Return to preemptible kernel context */
if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
if (preempt_count() == 0)
@@ -592,16 +443,6 @@ again:
if (unlikely(stack_store))
__hard_EE_RI_disable();
- /*
- * Returning to a kernel context with local irqs disabled.
- * Here, if EE was enabled in the interrupted context, enable
- * it on return as well. A problem exists here where a soft
- * masked interrupt may have cleared MSR[EE] and set HARD_DIS
- * here, and it will still exist on return to the caller. This
- * will be resolved by the masked interrupt firing again.
- */
- if (regs->msr & MSR_EE)
- local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
#endif /* CONFIG_PPC64 */
}