diff options
| author | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
| commit | 1ac731c529cd4d6adbce134754b51ff7d822b145 (patch) | |
| tree | 143ab3f35ca5f3b69f583c84e6964b17139c2ec1 /drivers/irqchip/irq-mips-gic.c | |
| parent | 07b4c950f27bef0362dc6ad7ee713aab61d58149 (diff) | |
| parent | 54116d442e001e1b6bd482122043b1870998a1f3 (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.6 merge window.
Diffstat (limited to 'drivers/irqchip/irq-mips-gic.c')
| -rw-r--r-- | drivers/irqchip/irq-mips-gic.c | 58 | 
1 files changed, 17 insertions, 41 deletions
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 1a6a7a672ad7..6d5ecc10a870 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -50,11 +50,10 @@ void __iomem *mips_gic_base;  static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); -static DEFINE_SPINLOCK(gic_lock); +static DEFINE_RAW_SPINLOCK(gic_lock);  static struct irq_domain *gic_irq_domain;  static int gic_shared_intrs;  static unsigned int gic_cpu_pin; -static unsigned int timer_cpu_pin;  static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;  #ifdef CONFIG_GENERIC_IRQ_IPI @@ -211,7 +210,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)  	irq = GIC_HWIRQ_TO_SHARED(d->hwirq); -	spin_lock_irqsave(&gic_lock, flags); +	raw_spin_lock_irqsave(&gic_lock, flags);  	switch (type & IRQ_TYPE_SENSE_MASK) {  	case IRQ_TYPE_EDGE_FALLING:  		pol = GIC_POL_FALLING_EDGE; @@ -251,7 +250,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)  	else  		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,  						 handle_level_irq, NULL); -	spin_unlock_irqrestore(&gic_lock, flags); +	raw_spin_unlock_irqrestore(&gic_lock, flags);  	return 0;  } @@ -269,7 +268,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,  		return -EINVAL;  	/* Assumption : cpumask refers to a single CPU */ -	spin_lock_irqsave(&gic_lock, flags); +	raw_spin_lock_irqsave(&gic_lock, flags);  	/* Re-route this IRQ */  	write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); @@ -280,7 +279,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,  		set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));  	irq_data_update_effective_affinity(d, cpumask_of(cpu)); -	spin_unlock_irqrestore(&gic_lock, flags); +	raw_spin_unlock_irqrestore(&gic_lock, flags);  	return IRQ_SET_MASK_OK;  } @@ -358,12 +357,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)  	cd = irq_data_get_irq_chip_data(d);  	cd->mask = false; -	spin_lock_irqsave(&gic_lock, flags); +	raw_spin_lock_irqsave(&gic_lock, flags);  	for_each_online_cpu(cpu) {  		write_gic_vl_other(mips_cm_vp_id(cpu));  		write_gic_vo_rmask(BIT(intr));  	} -	spin_unlock_irqrestore(&gic_lock, flags); +	raw_spin_unlock_irqrestore(&gic_lock, flags);  }  static void gic_unmask_local_irq_all_vpes(struct irq_data *d) @@ -376,12 +375,12 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)  	cd = irq_data_get_irq_chip_data(d);  	cd->mask = true; -	spin_lock_irqsave(&gic_lock, flags); +	raw_spin_lock_irqsave(&gic_lock, flags);  	for_each_online_cpu(cpu) {  		write_gic_vl_other(mips_cm_vp_id(cpu));  		write_gic_vo_smask(BIT(intr));  	} -	spin_unlock_irqrestore(&gic_lock, flags); +	raw_spin_unlock_irqrestore(&gic_lock, flags);  }  static void gic_all_vpes_irq_cpu_online(void) @@ -394,19 +393,21 @@ static void gic_all_vpes_irq_cpu_online(void)  	unsigned long flags;  	int i; -	spin_lock_irqsave(&gic_lock, flags); +	raw_spin_lock_irqsave(&gic_lock, flags);  	for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {  		unsigned int intr = local_intrs[i];  		struct gic_all_vpes_chip_data *cd; +		if (!gic_local_irq_is_routable(intr)) +			continue;  		cd = &gic_all_vpes_chip_data[intr];  		write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);  		if (cd->mask)  			write_gic_vl_smask(BIT(intr));  	} -	spin_unlock_irqrestore(&gic_lock, flags); +	raw_spin_unlock_irqrestore(&gic_lock, flags);  }  static struct irq_chip gic_all_vpes_local_irq_controller = { @@ -436,11 +437,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,  	data = irq_get_irq_data(virq); -	spin_lock_irqsave(&gic_lock, flags); +	raw_spin_lock_irqsave(&gic_lock, flags);  	write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);  	write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));  	irq_data_update_effective_affinity(data, cpumask_of(cpu)); -	spin_unlock_irqrestore(&gic_lock, flags); +	raw_spin_unlock_irqrestore(&gic_lock, flags);  	return 0;  } @@ -499,9 +500,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,  	 */  	switch (intr) {  	case GIC_LOCAL_INT_TIMER: -		/* CONFIG_MIPS_CMP workaround (see __gic_init) */ -		map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; -		fallthrough;  	case GIC_LOCAL_INT_PERFCTR:  	case GIC_LOCAL_INT_FDC:  		/* @@ -535,12 +533,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,  	if (!gic_local_irq_is_routable(intr))  		return -EPERM; -	spin_lock_irqsave(&gic_lock, flags); +	raw_spin_lock_irqsave(&gic_lock, flags);  	for_each_online_cpu(cpu) {  		write_gic_vl_other(mips_cm_vp_id(cpu));  		write_gic_vo_map(mips_gic_vx_map_reg(intr), map);  	} -	spin_unlock_irqrestore(&gic_lock, flags); +	raw_spin_unlock_irqrestore(&gic_lock, flags);  	return 0;  } @@ -795,34 +793,12 @@ static int __init gic_of_init(struct device_node *node,  	if (cpu_has_veic) {  		/* Always use vector 1 in EIC mode */  		gic_cpu_pin = 0; -		timer_cpu_pin = gic_cpu_pin;  		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,  			       __gic_irq_dispatch);  	} else {  		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;  		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,  					gic_irq_dispatch); -		/* -		 * With the CMP implementation of SMP (deprecated), other CPUs -		 * are started by the bootloader and put into a timer based -		 * waiting poll loop. We must not re-route those CPU's local -		 * timer interrupts as the wait instruction will never finish, -		 * so just handle whatever CPU interrupt it is routed to by -		 * default. -		 * -		 * This workaround should be removed when CMP support is -		 * dropped. -		 */ -		if (IS_ENABLED(CONFIG_MIPS_CMP) && -		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { -			timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP; -			irq_set_chained_handler(MIPS_CPU_IRQ_BASE + -						GIC_CPU_PIN_OFFSET + -						timer_cpu_pin, -						gic_irq_dispatch); -		} else { -			timer_cpu_pin = gic_cpu_pin; -		}  	}  	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +  |