diff options
Diffstat (limited to 'arch/powerpc/include/asm/hw_irq.h')
| -rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 43 | 
1 files changed, 30 insertions, 13 deletions
| diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 77fa88c2aed0..eb6d094083fd 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)  	return flags;  } +static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask) +{ +	unsigned long flags = irq_soft_mask_return(); + +	irq_soft_mask_set(flags & ~mask); + +	return flags; +} +  static inline unsigned long arch_local_save_flags(void)  {  	return irq_soft_mask_return(); @@ -192,7 +201,7 @@ static inline void arch_local_irq_enable(void)  static inline unsigned long arch_local_irq_save(void)  { -	return irq_soft_mask_set_return(IRQS_DISABLED); +	return irq_soft_mask_or_return(IRQS_DISABLED);  }  static inline bool arch_irqs_disabled_flags(unsigned long flags) @@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);   * is a different soft-masked interrupt pending that requires hard   * masking.   */ -static inline bool should_hard_irq_enable(void) +static inline bool should_hard_irq_enable(struct pt_regs *regs)  {  	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { -		WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); +		WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); +		WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));  		WARN_ON(mfmsr() & MSR_EE);  	} @@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)  	 *  	 * TODO: Add test for 64e  	 */ -	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi()) -		return false; +	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { +		if (!power_pmu_wants_prompt_pmi()) +			return false; +		/* +		 * If PMIs are disabled then IRQs should be disabled as well, +		 * so we shouldn't see this condition, check for it just in +		 * case because we are about to enable PMIs. +		 */ +		if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED)) +			return false; +	}  	if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)  		return false; @@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)  /*   * Do the hard enabling, only call this if should_hard_irq_enable is true. + * This allows PMI interrupts to profile irq handlers.   */  static inline void do_hard_irq_enable(void)  { -	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { -		WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); -		WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK); -		WARN_ON(mfmsr() & MSR_EE); -	}  	/* -	 * This allows PMI interrupts (and watchdog soft-NMIs) through. -	 * There is no other reason to enable this way. +	 * Asynch interrupts come in with IRQS_ALL_DISABLED, +	 * PACA_IRQ_HARD_DIS, and MSR[EE]=0.  	 */ +	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) +		irq_soft_mask_andc_return(IRQS_PMI_DISABLED);  	get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;  	__hard_irq_enable();  } @@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)  	return !(regs->msr & MSR_EE);  } -static __always_inline bool should_hard_irq_enable(void) +static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)  {  	return false;  } |