diff options
Diffstat (limited to 'arch/powerpc/sysdev/xive/common.c')
| -rw-r--r-- | arch/powerpc/sysdev/xive/common.c | 90 | 
1 files changed, 71 insertions, 19 deletions
| diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 6595462b1fc8..f387318678b9 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -40,7 +40,8 @@  #undef DEBUG_ALL  #ifdef DEBUG_ALL -#define DBG_VERBOSE(fmt...)	pr_devel(fmt) +#define DBG_VERBOSE(fmt, ...)	pr_devel("cpu %d - " fmt, \ +					 smp_processor_id(), ## __VA_ARGS__)  #else  #define DBG_VERBOSE(fmt...)	do { } while(0)  #endif @@ -190,7 +191,7 @@ static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)   * This is used to perform the magic loads from an ESB   * described in xive.h   */ -static u8 xive_poke_esb(struct xive_irq_data *xd, u32 offset) +static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)  {  	u64 val; @@ -198,13 +199,28 @@ static u8 xive_poke_esb(struct xive_irq_data *xd, u32 offset)  	if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)  		offset |= offset << 4; -	val = in_be64(xd->eoi_mmio + offset); +	if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) +		val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); +	else +		val = in_be64(xd->eoi_mmio + offset);  	return (u8)val;  } +static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) +{ +	/* Handle HW errata */ +	if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) +		offset |= offset << 4; + +	if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) +		xive_ops->esb_rw(xd->hw_irq, offset, data, 1); +	else +		out_be64(xd->eoi_mmio + offset, data); +} +  #ifdef CONFIG_XMON -static void xive_dump_eq(const char *name, struct xive_q *q) +static notrace void xive_dump_eq(const char *name, struct xive_q *q)  {  	u32 i0, i1, idx; @@ -218,7 +234,7 @@ static void xive_dump_eq(const char *name, struct xive_q *q)  		    q->toggle, i0, i1);  } -void xmon_xive_do_dump(int cpu) +notrace void xmon_xive_do_dump(int cpu)  {  	struct xive_cpu *xc = per_cpu(xive_cpu, cpu); @@ -227,7 +243,7 @@ void xmon_xive_do_dump(int cpu)  	xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);  #ifdef CONFIG_SMP  	{ -		u64 val = xive_poke_esb(&xc->ipi_data, XIVE_ESB_GET); +		u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);  		xmon_printf("  IPI state: %x:%c%c\n", xc->hw_ipi,  			val & XIVE_ESB_VAL_P ? 'P' : 'p',  			val & XIVE_ESB_VAL_P ? 'Q' : 'q'); @@ -297,7 +313,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)  {  	/* If the XIVE supports the new "store EOI facility, use it */  	if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) -		out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0); +		xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);  	else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {  		/*  		 * The FW told us to call it. This happens for some @@ -326,10 +342,10 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)  		 * properly.  		 */  		if (xd->flags & XIVE_IRQ_FLAG_LSI) -			in_be64(xd->eoi_mmio); +			xive_esb_read(xd, XIVE_ESB_LOAD_EOI);  		else { -			eoi_val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_00); -			DBG_VERBOSE("eoi_val=%x\n", offset, eoi_val); +			eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); +			DBG_VERBOSE("eoi_val=%x\n", eoi_val);  			/* Re-trigger if needed */  			if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) @@ -383,12 +399,12 @@ static void xive_do_source_set_mask(struct xive_irq_data *xd,  	 * ESB accordingly on unmask.  	 */  	if (mask) { -		val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_01); +		val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);  		xd->saved_p = !!(val & XIVE_ESB_VAL_P);  	} else if (xd->saved_p) -		xive_poke_esb(xd, XIVE_ESB_SET_PQ_10); +		xive_esb_read(xd, XIVE_ESB_SET_PQ_10);  	else -		xive_poke_esb(xd, XIVE_ESB_SET_PQ_00); +		xive_esb_read(xd, XIVE_ESB_SET_PQ_00);  }  /* @@ -447,7 +463,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,  	int cpu, first, num, i;  	/* Pick up a starting point CPU in the mask based on  fuzz */ -	num = cpumask_weight(mask); +	num = min_t(int, cpumask_weight(mask), nr_cpu_ids);  	first = fuzz % num;  	/* Locate it */ @@ -672,6 +688,10 @@ static int xive_irq_set_affinity(struct irq_data *d,  	if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)  		return -EINVAL; +	/* Don't do anything if the interrupt isn't started */ +	if (!irqd_is_started(d)) +		return IRQ_SET_MASK_OK; +  	/*  	 * If existing target is already in the new mask, and is  	 * online then do nothing. @@ -768,7 +788,7 @@ static int xive_irq_retrigger(struct irq_data *d)  	 * To perform a retrigger, we first set the PQ bits to  	 * 11, then perform an EOI.  	 */ -	xive_poke_esb(xd, XIVE_ESB_SET_PQ_11); +	xive_esb_read(xd, XIVE_ESB_SET_PQ_11);  	/*  	 * Note: We pass "0" to the hw_irq argument in order to @@ -803,7 +823,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)  		irqd_set_forwarded_to_vcpu(d);  		/* Set it to PQ=10 state to prevent further sends */ -		pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_10); +		pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);  		/* No target ? nothing to do */  		if (xd->target == XIVE_INVALID_TARGET) { @@ -832,7 +852,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)  		 * for sure the queue slot is no longer in use.  		 */  		if (pq & 2) { -			pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_11); +			pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11);  			xd->saved_p = true;  			/* @@ -989,6 +1009,9 @@ static void xive_ipi_eoi(struct irq_data *d)  {  	struct xive_cpu *xc = __this_cpu_read(xive_cpu); +	DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", +		    d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); +  	/* Handle possible race with unplug and drop stale IPIs */  	if (!xc)  		return; @@ -1368,6 +1391,19 @@ void xive_flush_interrupt(void)  #endif /* CONFIG_SMP */ +void xive_teardown_cpu(void) +{ +	struct xive_cpu *xc = __this_cpu_read(xive_cpu); +	unsigned int cpu = smp_processor_id(); + +	/* Set CPPR to 0 to disable flow of interrupts */ +	xc->cppr = 0; +	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); + +	if (xive_ops->teardown_cpu) +		xive_ops->teardown_cpu(cpu, xc); +} +  void xive_kexec_teardown_cpu(int secondary)  {  	struct xive_cpu *xc = __this_cpu_read(xive_cpu); @@ -1395,8 +1431,8 @@ void xive_shutdown(void)  	xive_ops->shutdown();  } -bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset, -		    u8 max_prio) +bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset, +			   u8 max_prio)  {  	xive_tima = area;  	xive_tima_offset = offset; @@ -1424,6 +1460,22 @@ bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,  	return true;  } +__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift) +{ +	unsigned int alloc_order; +	struct page *pages; +	__be32 *qpage; + +	alloc_order = xive_alloc_order(queue_shift); +	pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); +	if (!pages) +		return ERR_PTR(-ENOMEM); +	qpage = (__be32 *)page_address(pages); +	memset(qpage, 0, 1 << queue_shift); + +	return qpage; +} +  static int __init xive_off(char *arg)  {  	xive_cmdline_disabled = true; |