diff options
Diffstat (limited to '')
-rw-r--r-- | arch/powerpc/include/asm/interrupt.h | 192 |
1 files changed, 138 insertions, 54 deletions
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index a1d238255f07..4745bb9998bd 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -69,10 +69,24 @@ #include <linux/context_tracking.h> #include <linux/hardirq.h> #include <asm/cputime.h> +#include <asm/firmware.h> #include <asm/ftrace.h> #include <asm/kprobes.h> #include <asm/runlatch.h> +#ifdef CONFIG_PPC64 +/* + * WARN/BUG is handled with a program interrupt so minimise checks here to + * avoid recursion and maximise the chance of getting the first oops handled. + */ +#define INT_SOFT_MASK_BUG_ON(regs, cond) \ +do { \ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && \ + (user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \ + BUG_ON(cond); \ +} while (0) +#endif + #ifdef CONFIG_PPC_BOOK3S_64 extern char __end_soft_masked[]; bool search_kernel_soft_mask_table(unsigned long addr); @@ -97,6 +111,11 @@ static inline void srr_regs_clobbered(void) local_paca->hsrr_valid = 0; } #else +static inline unsigned long search_kernel_restart_table(unsigned long addr) +{ + return 0; +} + static inline bool is_implicit_soft_masked(struct pt_regs *regs) { return false; @@ -118,9 +137,6 @@ static inline void nap_adjust_return(struct pt_regs *regs) #endif } -struct interrupt_state { -}; - static inline void booke_restore_dbcr0(void) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS @@ -133,48 +149,72 @@ static inline void booke_restore_dbcr0(void) #endif } -static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) +static inline void interrupt_enter_prepare(struct pt_regs *regs) { #ifdef CONFIG_PPC32 if (!arch_irq_disabled_regs(regs)) trace_hardirqs_off(); - if (user_mode(regs)) { - kuep_lock(); - account_cpu_user_entry(); - } else { + if (user_mode(regs)) + kuap_lock(); + else kuap_save_and_lock(regs); - } + + if (user_mode(regs)) + account_cpu_user_entry(); #endif #ifdef CONFIG_PPC64 - if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) + bool trace_enable = false; + + if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) { + if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) + trace_enable = true; + } else { + irq_soft_mask_set(IRQS_ALL_DISABLED); + } + + /* + * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE]. + * Asynchronous interrupts get here with HARD_DIS set (see below), so + * this enables MSR[EE] for synchronous interrupts. IRQs remain + * soft-masked. The interrupt handler may later call + * interrupt_cond_local_irq_enable() to achieve a regular process + * context. + */ + if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) { + INT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE)); + __hard_irq_enable(); + } else { + __hard_RI_enable(); + } + + /* Do this when RI=1 because it can cause SLB faults */ + if (trace_enable) trace_hardirqs_off(); - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; if (user_mode(regs)) { + kuap_lock(); CT_WARN_ON(ct_state() != CONTEXT_USER); user_exit_irqoff(); account_cpu_user_entry(); account_stolen_time(); } else { + kuap_save_and_lock(regs); /* * CT_WARN_ON comes here via program_check_exception, * so avoid recursion. */ - if (TRAP(regs) != INTERRUPT_PROGRAM) { - CT_WARN_ON(ct_state() != CONTEXT_KERNEL); - BUG_ON(is_implicit_soft_masked(regs)); - } -#ifdef CONFIG_PPC_BOOK3S - /* Move this under a debugging check */ - if (arch_irq_disabled_regs(regs)) - BUG_ON(search_kernel_restart_table(regs->nip)); -#endif + if (TRAP(regs) != INTERRUPT_PROGRAM) + CT_WARN_ON(ct_state() != CONTEXT_KERNEL && + ct_state() != CONTEXT_IDLE); + INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs)); + INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) && + search_kernel_restart_table(regs->nip)); } - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); + INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) && + !(regs->msr & MSR_EE)); #endif booke_restore_dbcr0(); @@ -194,23 +234,30 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup * However interrupt_nmi_exit_prepare does return directly to regs, because * NMIs do not do "exit work" or replay soft-masked interrupts. */ -static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) +static inline void interrupt_exit_prepare(struct pt_regs *regs) { } -static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) +static inline void interrupt_async_enter_prepare(struct pt_regs *regs) { +#ifdef CONFIG_PPC64 + /* Ensure interrupt_enter_prepare does not enable MSR[EE] */ + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; +#endif + interrupt_enter_prepare(regs); #ifdef CONFIG_PPC_BOOK3S_64 + /* + * RI=1 is set by interrupt_enter_prepare, so this thread flags access + * has to come afterward (it can cause SLB faults). + */ if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_local_flags(_TLF_RUNLATCH)) __ppc64_runlatch_on(); #endif - - interrupt_enter_prepare(regs, state); irq_enter(); } -static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) +static inline void interrupt_async_exit_prepare(struct pt_regs *regs) { /* * Adjust at exit so the main handler sees the true NIA. This must @@ -221,7 +268,7 @@ static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct int nap_adjust_return(regs); irq_exit(); - interrupt_exit_prepare(regs, state); + interrupt_exit_prepare(regs); } struct interrupt_nmi_state { @@ -242,7 +289,7 @@ static inline bool nmi_disables_ftrace(struct pt_regs *regs) if (TRAP(regs) == INTERRUPT_PERFMON) return false; } - if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { + if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) { if (TRAP(regs) == INTERRUPT_PERFMON) return false; } @@ -276,6 +323,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte regs->softe = IRQS_ALL_DISABLED; } + __hard_RI_enable(); + /* Don't do any per-CPU operations until interrupt state is fixed */ if (nmi_disables_ftrace(regs)) { @@ -284,22 +333,46 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte } #endif + /* If data relocations are enabled, it's safe to use nmi_enter() */ + if (mfmsr() & MSR_DR) { + nmi_enter(); + return; + } + /* - * Do not use nmi_enter() for pseries hash guest taking a real-mode + * But do not use nmi_enter() for pseries hash guest taking a real-mode * NMI because not everything it touches is within the RMA limit. */ - if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || - !firmware_has_feature(FW_FEATURE_LPAR) || - radix_enabled() || (mfmsr() & MSR_DR)) - nmi_enter(); + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && + firmware_has_feature(FW_FEATURE_LPAR) && + !radix_enabled()) + return; + + /* + * Likewise, don't use it if we have some form of instrumentation (like + * KASAN shadow) that is not safe to access in real mode (even on radix) + */ + if (IS_ENABLED(CONFIG_KASAN)) + return; + + /* Otherwise, it should be safe to call it */ + nmi_enter(); } static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) { - if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || - !firmware_has_feature(FW_FEATURE_LPAR) || - radix_enabled() || (mfmsr() & MSR_DR)) + if (mfmsr() & MSR_DR) { + // nmi_exit if relocations are on + nmi_exit(); + } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && + firmware_has_feature(FW_FEATURE_LPAR) && + !radix_enabled()) { + // no nmi_exit for a pseries hash guest taking a real mode exception + } else if (IS_ENABLED(CONFIG_KASAN)) { + // no nmi_exit for KASAN in real mode + } else { nmi_exit(); + } /* * nmi does not call nap_adjust_return because nmi should not create @@ -367,19 +440,23 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter * Specific handlers may have additional restrictions. */ #define DEFINE_INTERRUPT_HANDLER_RAW(func) \ -static __always_inline long ____##func(struct pt_regs *regs); \ +static __always_inline __no_sanitize_address __no_kcsan long \ +____##func(struct pt_regs *regs); \ \ interrupt_handler long func(struct pt_regs *regs) \ { \ long ret; \ \ + __hard_RI_enable(); \ + \ ret = ____##func (regs); \ \ return ret; \ } \ NOKPROBE_SYMBOL(func); \ \ -static __always_inline long ____##func(struct pt_regs *regs) +static __always_inline __no_sanitize_address __no_kcsan long \ +____##func(struct pt_regs *regs) /** * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function @@ -402,13 +479,11 @@ static __always_inline void ____##func(struct pt_regs *regs); \ \ interrupt_handler void func(struct pt_regs *regs) \ { \ - struct interrupt_state state; \ - \ - interrupt_enter_prepare(regs, &state); \ + interrupt_enter_prepare(regs); \ \ ____##func (regs); \ \ - interrupt_exit_prepare(regs, &state); \ + interrupt_exit_prepare(regs); \ } \ NOKPROBE_SYMBOL(func); \ \ @@ -437,14 +512,13 @@ static __always_inline long ____##func(struct pt_regs *regs); \ \ interrupt_handler long func(struct pt_regs *regs) \ { \ - struct interrupt_state state; \ long ret; \ \ - interrupt_enter_prepare(regs, &state); \ + interrupt_enter_prepare(regs); \ \ ret = ____##func (regs); \ \ - interrupt_exit_prepare(regs, &state); \ + interrupt_exit_prepare(regs); \ \ return ret; \ } \ @@ -473,13 +547,11 @@ static __always_inline void ____##func(struct pt_regs *regs); \ \ interrupt_handler void func(struct pt_regs *regs) \ { \ - struct interrupt_state state; \ - \ - interrupt_async_enter_prepare(regs, &state); \ + interrupt_async_enter_prepare(regs); \ \ ____##func (regs); \ \ - interrupt_async_exit_prepare(regs, &state); \ + interrupt_async_exit_prepare(regs); \ } \ NOKPROBE_SYMBOL(func); \ \ @@ -504,7 +576,8 @@ static __always_inline void ____##func(struct pt_regs *regs) * body with a pair of curly brackets. */ #define DEFINE_INTERRUPT_HANDLER_NMI(func) \ -static __always_inline long ____##func(struct pt_regs *regs); \ +static __always_inline __no_sanitize_address __no_kcsan long \ +____##func(struct pt_regs *regs); \ \ interrupt_handler long func(struct pt_regs *regs) \ { \ @@ -521,7 +594,8 @@ interrupt_handler long func(struct pt_regs *regs) \ } \ NOKPROBE_SYMBOL(func); \ \ -static __always_inline long ____##func(struct pt_regs *regs) +static __always_inline __no_sanitize_address __no_kcsan long \ +____##func(struct pt_regs *regs) /* Interrupt handlers */ @@ -564,10 +638,10 @@ DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); /* slb.c */ DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); -DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault); +DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt); /* hash_utils.c */ -DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault); +DECLARE_INTERRUPT_HANDLER(do_hash_fault); /* fault.c */ DECLARE_INTERRUPT_HANDLER(do_page_fault); @@ -599,6 +673,16 @@ static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) local_irq_enable(); } +long system_call_exception(struct pt_regs *regs, unsigned long r0); +notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv); +notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs); +notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs); +#ifdef CONFIG_PPC64 +unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs); +unsigned long interrupt_exit_user_restart(struct pt_regs *regs); +unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs); +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_INTERRUPT_H */ |