diff options
Diffstat (limited to 'arch/powerpc/sysdev/xics')
| -rw-r--r-- | arch/powerpc/sysdev/xics/icp-opal.c | 66 | 
1 files changed, 57 insertions, 9 deletions
| diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index d38e86fd5720..f9670eabfcfa 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -20,6 +20,7 @@  #include <asm/xics.h>  #include <asm/io.h>  #include <asm/opal.h> +#include <asm/kvm_ppc.h>  static void icp_opal_teardown_cpu(void)  { @@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)  	 * Should we be flagging idle loop instead?  	 * Or creating some task to be scheduled?  	 */ -	opal_int_eoi((0x00 << 24) | XICS_IPI); +	if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0) +		force_external_irq_replay(); +} + +static unsigned int icp_opal_get_xirr(void) +{ +	unsigned int kvm_xirr; +	__be32 hw_xirr; +	int64_t rc; + +	/* Handle an interrupt latched by KVM first */ +	kvm_xirr = kvmppc_get_xics_latch(); +	if (kvm_xirr) +		return kvm_xirr; + +	/* Then ask OPAL */ +	rc = opal_int_get_xirr(&hw_xirr, false); +	if (rc < 0) +		return 0; +	return be32_to_cpu(hw_xirr);  }  static unsigned int icp_opal_get_irq(void) @@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)  	unsigned int xirr;  	unsigned int vec;  	unsigned int irq; -	int64_t rc; -	rc = opal_int_get_xirr(&xirr, false); -	if (rc < 0) -		return 0; -	xirr = be32_to_cpu(xirr); +	xirr = icp_opal_get_xirr();  	vec = xirr & 0x00ffffff;  	if (vec == XICS_IRQ_SPURIOUS)  		return 0; @@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)  	xics_mask_unknown_vec(vec);  	/* We might learn about it later, so EOI it */ -	opal_int_eoi(xirr); +	if (opal_int_eoi(xirr) > 0) +		force_external_irq_replay();  	return 0;  } @@ -103,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)  {  	int hw_cpu = get_hard_smp_processor_id(cpu); +	kvmppc_set_host_ipi(cpu, 1);  	opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);  }  static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)  { -	int hw_cpu = hard_smp_processor_id(); +	int cpu = smp_processor_id(); -	opal_int_set_mfrr(hw_cpu, 0xff); +	kvmppc_set_host_ipi(cpu, 0); +	opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);  	return smp_ipi_demux();  } +/* + * Called when an interrupt is received on an off-line CPU to + * clear the interrupt, so that the CPU can go back to nap mode. + */ +void icp_opal_flush_interrupt(void) +{ +	unsigned int xirr; +	unsigned int vec; + +	do { +		xirr = icp_opal_get_xirr(); +		vec = xirr & 0x00ffffff; +		if (vec == XICS_IRQ_SPURIOUS) +			break; +		if (vec == XICS_IPI) { +			/* Clear pending IPI */ +			int cpu = smp_processor_id(); +			kvmppc_set_host_ipi(cpu, 0); +			opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); +		} else { +			pr_err("XICS: hw interrupt 0x%x to offline cpu, " +			       "disabling\n", vec); +			xics_mask_unknown_vec(vec); +		} + +		/* EOI the interrupt */ +	} while (opal_int_eoi(xirr) > 0); +} +  #endif /* CONFIG_SMP */  static const struct icp_ops icp_opal_ops = { |