diff options
Diffstat (limited to '')
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 164 |
1 files changed, 123 insertions, 41 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 21cc571ea9c2..77fa88c2aed0 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -61,7 +61,7 @@ static inline void __hard_irq_enable(void) { - if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) wrtee(MSR_EE); else if (IS_ENABLED(CONFIG_PPC_8xx)) wrtspr(SPRN_EIE); @@ -73,7 +73,7 @@ static inline void __hard_irq_enable(void) static inline void __hard_irq_disable(void) { - if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) wrtee(0); else if (IS_ENABLED(CONFIG_PPC_8xx)) wrtspr(SPRN_EID); @@ -85,7 +85,7 @@ static inline void __hard_irq_disable(void) static inline void __hard_EE_RI_disable(void) { - if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) wrtee(0); else if (IS_ENABLED(CONFIG_PPC_8xx)) wrtspr(SPRN_NRI); @@ -97,7 +97,7 @@ static inline void __hard_EE_RI_disable(void) static inline void __hard_RI_enable(void) { - if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) return; if (IS_ENABLED(CONFIG_PPC_8xx)) @@ -130,7 +130,6 @@ static inline notrace unsigned long irq_soft_mask_return(void) */ static inline notrace void irq_soft_mask_set(unsigned long mask) { -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG /* * The irq mask must always include the STD bit if any are set. * @@ -145,8 +144,8 @@ static inline notrace void irq_soft_mask_set(unsigned long mask) * unmasks to be replayed, among other things. For now, take * the simple approach. */ - WARN_ON(mask && !(mask & IRQS_DISABLED)); -#endif + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON(mask && !(mask & IRQS_DISABLED)); asm volatile( "stb %0,%1(13)" @@ -158,36 +157,18 @@ static inline notrace void irq_soft_mask_set(unsigned long mask) static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) { - unsigned long flags; + unsigned long flags = irq_soft_mask_return(); -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG - WARN_ON(mask && !(mask & IRQS_DISABLED)); -#endif - - asm volatile( - "lbz %0,%1(13); stb %2,%1(13)" - : "=&r" (flags) - : "i" (offsetof(struct paca_struct, irq_soft_mask)), - "r" (mask) - : "memory"); + irq_soft_mask_set(mask); return flags; } static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) { - unsigned long flags, tmp; - - asm volatile( - "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)" - : "=&r" (flags), "=r" (tmp) - : "i" (offsetof(struct paca_struct, irq_soft_mask)), - "r" (mask) - : "memory"); + unsigned long flags = irq_soft_mask_return(); -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG - WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED)); -#endif + irq_soft_mask_set(flags | mask); return flags; } @@ -224,6 +205,42 @@ static inline bool arch_irqs_disabled(void) return arch_irqs_disabled_flags(arch_local_save_flags()); } +static inline void set_pmi_irq_pending(void) +{ + /* + * Invoked from PMU callback functions to set PMI bit in the paca. + * This has to be called with irq's disabled (via hard_irq_disable()). + */ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON_ONCE(mfmsr() & MSR_EE); + + get_paca()->irq_happened |= PACA_IRQ_PMI; +} + +static inline void clear_pmi_irq_pending(void) +{ + /* + * Invoked from PMU callback functions to clear the pending PMI bit + * in the paca. + */ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON_ONCE(mfmsr() & MSR_EE); + + get_paca()->irq_happened &= ~PACA_IRQ_PMI; +} + +static inline bool pmi_irq_pending(void) +{ + /* + * Invoked from PMU callback functions to check if there is a pending + * PMI bit in the paca. + */ + if (get_paca()->irq_happened & PACA_IRQ_PMI) + return true; + + return false; +} + #ifdef CONFIG_PPC_BOOK3S /* * To support disabling and enabling of irq with PMI, set of @@ -276,9 +293,8 @@ static inline bool arch_irqs_disabled(void) flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ if (!arch_irqs_disabled_flags(flags)) { \ - asm ("stdx %%r1, 0, %1 ;" \ - : "=m" (local_paca->saved_r1) \ - : "b" (&local_paca->saved_r1)); \ + asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \ + : "r" (current_stack_pointer)); \ trace_hardirqs_off(); \ } \ } while(0) @@ -306,18 +322,56 @@ static inline bool lazy_irq_pending_nocheck(void) return __lazy_irq_pending(local_paca->irq_happened); } +bool power_pmu_wants_prompt_pmi(void); + /* - * This is called by asynchronous interrupts to conditionally - * re-enable hard interrupts after having cleared the source - * of the interrupt. They are kept disabled if there is a different - * soft-masked interrupt pending that requires hard masking. + * This is called by asynchronous interrupts to check whether to + * conditionally re-enable hard interrupts after having cleared + * the source of the interrupt. They are kept disabled if there + * is a different soft-masked interrupt pending that requires hard + * masking. */ -static inline void may_hard_irq_enable(void) +static inline bool should_hard_irq_enable(void) { - if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) { - get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; - __hard_irq_enable(); + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { + WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); + WARN_ON(mfmsr() & MSR_EE); } + + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) + return false; + /* + * If the PMU is not running, there is not much reason to enable + * MSR[EE] in irq handlers because any interrupts would just be + * soft-masked. + * + * TODO: Add test for 64e + */ + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi()) + return false; + + if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK) + return false; + + return true; +} + +/* + * Do the hard enabling, only call this if should_hard_irq_enable is true. + */ +static inline void do_hard_irq_enable(void) +{ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { + WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); + WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK); + WARN_ON(mfmsr() & MSR_EE); + } + /* + * This allows PMI interrupts (and watchdog soft-NMIs) through. + * There is no other reason to enable this way. + */ + get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; + __hard_irq_enable(); } static inline bool arch_irq_disabled_regs(struct pt_regs *regs) @@ -398,7 +452,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) return !(regs->msr & MSR_EE); } -static inline bool may_hard_irq_enable(void) +static __always_inline bool should_hard_irq_enable(void) { return false; } @@ -408,11 +462,39 @@ static inline void do_hard_irq_enable(void) BUILD_BUG(); } +static inline void clear_pmi_irq_pending(void) { } +static inline void set_pmi_irq_pending(void) { } +static inline bool pmi_irq_pending(void) { return false; } + static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) { } #endif /* CONFIG_PPC64 */ +static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr) +{ +#ifdef CONFIG_PPC64 + if (arch_irqs_disabled()) { + /* + * With soft-masking, MSR[EE] can change from 1 to 0 + * asynchronously when irqs are disabled, and we don't want to + * set MSR[EE] back to 1 here if that has happened. A race-free + * way to do this is ensure EE is already 0. Another way it + * could be done is with a RESTART_TABLE handler, but that's + * probably overkill here. + */ + msr &= ~MSR_EE; + mtmsr_isync(msr); + irq_soft_mask_set(IRQS_ALL_DISABLED); + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + } else +#endif + mtmsr_isync(msr); + + return msr; +} + + #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST #endif /* __ASSEMBLY__ */ |