aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/powerpc/include/asm/interrupt.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/interrupt.h')
-rw-r--r--arch/powerpc/include/asm/interrupt.h86
1 files changed, 43 insertions, 43 deletions
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index b14f54d789d2..23638d4e73ac 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -69,10 +69,25 @@
#include <linux/context_tracking.h>
#include <linux/hardirq.h>
#include <asm/cputime.h>
+#include <asm/firmware.h>
#include <asm/ftrace.h>
#include <asm/kprobes.h>
#include <asm/runlatch.h>
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+/*
+ * WARN/BUG is handled with a program interrupt so minimise checks here to
+ * avoid recursion and maximise the chance of getting the first oops handled.
+ */
+#define INT_SOFT_MASK_BUG_ON(regs, cond) \
+do { \
+ if ((user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \
+ BUG_ON(cond); \
+} while (0)
+#else
+#define INT_SOFT_MASK_BUG_ON(regs, cond)
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
extern char __end_soft_masked[];
bool search_kernel_soft_mask_table(unsigned long addr);
@@ -82,7 +97,7 @@ DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
static inline bool is_implicit_soft_masked(struct pt_regs *regs)
{
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
return false;
if (regs->nip >= (unsigned long)__end_soft_masked)
@@ -137,28 +152,8 @@ static inline void booke_restore_dbcr0(void)
static inline void interrupt_enter_prepare(struct pt_regs *regs)
{
-#ifdef CONFIG_PPC32
- if (!arch_irq_disabled_regs(regs))
- trace_hardirqs_off();
-
- if (user_mode(regs))
- kuap_lock();
- else
- kuap_save_and_lock(regs);
-
- if (user_mode(regs))
- account_cpu_user_entry();
-#endif
-
#ifdef CONFIG_PPC64
- bool trace_enable = false;
-
- if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
- if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
- trace_enable = true;
- } else {
- irq_soft_mask_set(IRQS_ALL_DISABLED);
- }
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
/*
* If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
@@ -169,20 +164,20 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
* context.
*/
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(!(regs->msr & MSR_EE));
+ INT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE));
__hard_irq_enable();
} else {
__hard_RI_enable();
}
+ /* Enable MSR[RI] early, to support kernel SLB and hash faults */
+#endif
- /* Do this when RI=1 because it can cause SLB faults */
- if (trace_enable)
+ if (!arch_irq_disabled_regs(regs))
trace_hardirqs_off();
if (user_mode(regs)) {
kuap_lock();
- CT_WARN_ON(ct_state() != CONTEXT_USER);
+ CT_WARN_ON(ct_state() != CT_STATE_USER);
user_exit_irqoff();
account_cpu_user_entry();
@@ -193,20 +188,15 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
- if (TRAP(regs) != INTERRUPT_PROGRAM) {
- CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(is_implicit_soft_masked(regs));
- }
-
- /* Move this under a debugging check */
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) &&
- arch_irq_disabled_regs(regs))
- BUG_ON(search_kernel_restart_table(regs->nip));
+ if (TRAP(regs) != INTERRUPT_PROGRAM)
+ CT_WARN_ON(ct_state() != CT_STATE_KERNEL &&
+ ct_state() != CT_STATE_IDLE);
+ INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));
+ INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) &&
+ search_kernel_restart_table(regs->nip));
}
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
-#endif
+ INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) &&
+ !(regs->msr & MSR_EE));
booke_restore_dbcr0();
}
@@ -280,7 +270,7 @@ static inline bool nmi_disables_ftrace(struct pt_regs *regs)
if (TRAP(regs) == INTERRUPT_PERFMON)
return false;
}
- if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
if (TRAP(regs) == INTERRUPT_PERFMON)
return false;
}
@@ -346,6 +336,14 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
if (IS_ENABLED(CONFIG_KASAN))
return;
+ /*
+ * Likewise, do not use it in real mode if percpu first chunk is not
+ * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
+ * are chances where percpu allocation can come from vmalloc area.
+ */
+ if (percpu_first_chunk_is_paged)
+ return;
+
/* Otherwise, it should be safe to call it */
nmi_enter();
}
@@ -361,6 +359,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
// no nmi_exit for a pseries hash guest taking a real mode exception
} else if (IS_ENABLED(CONFIG_KASAN)) {
// no nmi_exit for KASAN in real mode
+ } else if (percpu_first_chunk_is_paged) {
+ // no nmi_exit if percpu first chunk is not embedded
} else {
nmi_exit();
}
@@ -593,6 +593,7 @@ ____##func(struct pt_regs *regs)
/* kernel/traps.c */
DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
#ifdef CONFIG_PPC_BOOK3S_64
+DECLARE_INTERRUPT_HANDLER_RAW(machine_check_early_boot);
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
#endif
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
@@ -664,8 +665,7 @@ static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
local_irq_enable();
}
-long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8,
- unsigned long r0, struct pt_regs *regs);
+long system_call_exception(struct pt_regs *regs, unsigned long r0);
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);