diff options
-rw-r--r-- | Documentation/arm64/silicon-errata.rst | 4 | ||||
-rw-r--r-- | drivers/irqchip/irq-gic-v3-its.c | 2 | ||||
-rw-r--r-- | drivers/irqchip/irq-gic-v3.c | 45 | ||||
-rw-r--r-- | include/linux/irq.h | 59 | ||||
-rw-r--r-- | kernel/irq/chip.c | 16 | ||||
-rw-r--r-- | kernel/irq/debugfs.c | 2 | ||||
-rw-r--r-- | kernel/irq/internals.h | 7 |
7 files changed, 99 insertions, 36 deletions
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 9e311bc43e05..d6430ade349d 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -214,3 +214,7 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 | +----------------+-----------------+-----------------+-----------------------------+ + ++----------------+-----------------+-----------------+-----------------------------+ +| ASR | ASR8601 | #8601001 | N/A | ++----------------+-----------------+-----------------+-----------------------------+ diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 0ec2b1e1df75..1994541eaef8 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3585,6 +3585,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, irqd = irq_get_irq_data(virq + i); irqd_set_single_target(irqd); irqd_set_affinity_on_activate(irqd); + irqd_set_resend_when_in_progress(irqd); pr_debug("ID:%d pID:%d vID:%d\n", (int)(hwirq + i - its_dev->event_map.lpi_base), (int)(hwirq + i), virq + i); @@ -4523,6 +4524,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq irq_domain_set_hwirq_and_chip(domain, virq + i, i, irqchip, vm->vpes[i]); set_bit(i, bitmap); + irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i)); } if (err) { diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index a605aa79435a..0c6c1af9a5b7 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -40,6 +40,7 @@ #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) #define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2) +#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3) #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) @@ -656,10 +657,16 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) return 0; } -static u64 gic_mpidr_to_affinity(unsigned long mpidr) +static u64 gic_cpu_to_affinity(int cpu) { + u64 mpidr = cpu_logical_map(cpu); u64 aff; + /* ASR8601 needs to have its affinities shifted down... */ + if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001)) + mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) | + (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8)); + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | @@ -914,7 +921,7 @@ static void __init gic_dist_init(void) * Set all global interrupts to the boot CPU only. ARE must be * enabled. */ - affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); + affinity = gic_cpu_to_affinity(smp_processor_id()); for (i = 32; i < GIC_LINE_NR; i++) gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); @@ -963,7 +970,7 @@ static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) { - unsigned long mpidr = cpu_logical_map(smp_processor_id()); + unsigned long mpidr; u64 typer; u32 aff; @@ -971,6 +978,8 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) * Convert affinity to a 32bit value that can be matched to * GICR_TYPER bits [63:32]. */ + mpidr = gic_cpu_to_affinity(smp_processor_id()); + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | @@ -1084,7 +1093,7 @@ static inline bool gic_dist_security_disabled(void) static void gic_cpu_sys_reg_init(void) { int i, cpu = smp_processor_id(); - u64 mpidr = cpu_logical_map(cpu); + u64 mpidr = gic_cpu_to_affinity(cpu); u64 need_rss = MPIDR_RS(mpidr); bool group0; u32 pribits; @@ -1183,11 +1192,11 @@ static void gic_cpu_sys_reg_init(void) for_each_online_cpu(i) { bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); - need_rss |= MPIDR_RS(cpu_logical_map(i)); + need_rss |= MPIDR_RS(gic_cpu_to_affinity(i)); if (need_rss && (!have_rss)) pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", cpu, (unsigned long)mpidr, - i, (unsigned long)cpu_logical_map(i)); + i, (unsigned long)gic_cpu_to_affinity(i)); } /** @@ -1263,9 +1272,11 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, unsigned long cluster_id) { int next_cpu, cpu = *base_cpu; - unsigned long mpidr = cpu_logical_map(cpu); + unsigned long mpidr; u16 tlist = 0; + mpidr = gic_cpu_to_affinity(cpu); + while (cpu < nr_cpu_ids) { tlist |= 1 << (mpidr & 0xf); @@ -1274,7 +1285,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, goto out; cpu = next_cpu; - mpidr = cpu_logical_map(cpu); + mpidr = gic_cpu_to_affinity(cpu); if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { cpu--; @@ -1319,7 +1330,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) dsb(ishst); for_each_cpu(cpu, mask) { - u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); u16 tlist; tlist = gic_compute_target_list(&cpu, mask, cluster_id); @@ -1377,7 +1388,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, offset = convert_offset_index(d, GICD_IROUTER, &index); reg = gic_dist_base(d) + offset + (index * 8); - val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); + val = gic_cpu_to_affinity(cpu); gic_write_irouter(val, reg); @@ -1796,6 +1807,15 @@ static bool gic_enable_quirk_nvidia_t241(void *data) return true; } +static bool gic_enable_quirk_asr8601(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001; + + return true; +} + static const struct gic_quirk gic_quirks[] = { { .desc = "GICv3: Qualcomm MSM8996 broken firmware", @@ -1803,6 +1823,11 @@ static const struct gic_quirk gic_quirks[] = { .init = gic_enable_quirk_msm8996, }, { + .desc = "GICv3: ASR erratum 8601001", + .compatible = "asr,asr8601-gic-v3", + .init = gic_enable_quirk_asr8601, + }, + { .desc = "GICv3: Mediatek Chromebook GICR save problem", .property = "mediatek,broken-save-restore-fw", .init = gic_enable_quirk_mtk_gicr, diff --git a/include/linux/irq.h b/include/linux/irq.h index b1b28affb32a..d8a6fdce9373 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -223,32 +223,35 @@ struct irq_data { * irq_chip::irq_set_affinity() when deactivated. * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set. + * IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which + * case it must be resent at the next available opportunity. */ enum { IRQD_TRIGGER_MASK = 0xf, - IRQD_SETAFFINITY_PENDING = (1 << 8), - IRQD_ACTIVATED = (1 << 9), - IRQD_NO_BALANCING = (1 << 10), - IRQD_PER_CPU = (1 << 11), - IRQD_AFFINITY_SET = (1 << 12), - IRQD_LEVEL = (1 << 13), - IRQD_WAKEUP_STATE = (1 << 14), - IRQD_MOVE_PCNTXT = (1 << 15), - IRQD_IRQ_DISABLED = (1 << 16), - IRQD_IRQ_MASKED = (1 << 17), - IRQD_IRQ_INPROGRESS = (1 << 18), - IRQD_WAKEUP_ARMED = (1 << 19), - IRQD_FORWARDED_TO_VCPU = (1 << 20), - IRQD_AFFINITY_MANAGED = (1 << 21), - IRQD_IRQ_STARTED = (1 << 22), - IRQD_MANAGED_SHUTDOWN = (1 << 23), - IRQD_SINGLE_TARGET = (1 << 24), - IRQD_DEFAULT_TRIGGER_SET = (1 << 25), - IRQD_CAN_RESERVE = (1 << 26), - IRQD_MSI_NOMASK_QUIRK = (1 << 27), - IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), - IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), - IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30), + IRQD_SETAFFINITY_PENDING = BIT(8), + IRQD_ACTIVATED = BIT(9), + IRQD_NO_BALANCING = BIT(10), + IRQD_PER_CPU = BIT(11), + IRQD_AFFINITY_SET = BIT(12), + IRQD_LEVEL = BIT(13), + IRQD_WAKEUP_STATE = BIT(14), + IRQD_MOVE_PCNTXT = BIT(15), + IRQD_IRQ_DISABLED = BIT(16), + IRQD_IRQ_MASKED = BIT(17), + IRQD_IRQ_INPROGRESS = BIT(18), + IRQD_WAKEUP_ARMED = BIT(19), + IRQD_FORWARDED_TO_VCPU = BIT(20), + IRQD_AFFINITY_MANAGED = BIT(21), + IRQD_IRQ_STARTED = BIT(22), + IRQD_MANAGED_SHUTDOWN = BIT(23), + IRQD_SINGLE_TARGET = BIT(24), + IRQD_DEFAULT_TRIGGER_SET = BIT(25), + IRQD_CAN_RESERVE = BIT(26), + IRQD_MSI_NOMASK_QUIRK = BIT(27), + IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28), + IRQD_AFFINITY_ON_ACTIVATE = BIT(29), + IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30), + IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -448,6 +451,16 @@ static inline bool irqd_affinity_on_activate(struct irq_data *d) return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; } +static inline void irqd_set_resend_when_in_progress(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS; +} + +static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 49e7bc871fec..57cd8f475302 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -692,8 +692,16 @@ void handle_fasteoi_irq(struct irq_desc *desc) raw_spin_lock(&desc->lock); - if (!irq_may_run(desc)) + /* + * When an affinity change races with IRQ handling, the next interrupt + * can arrive on the new CPU before the original CPU has completed + * handling the previous one - it may need to be resent. + */ + if (!irq_may_run(desc)) { + if (irqd_needs_resend_when_in_progress(&desc->irq_data)) + desc->istate |= IRQS_PENDING; goto out; + } desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -715,6 +723,12 @@ void handle_fasteoi_irq(struct irq_desc *desc) cond_unmask_eoi_irq(desc, chip); + /* + * When the race described above happens this will resend the interrupt. + */ + if (unlikely(desc->istate & IRQS_PENDING)) + check_irq_resend(desc, false); + raw_spin_unlock(&desc->lock); return; out: diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index bbcaac64038e..5971a66be034 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -133,6 +133,8 @@ static const struct irq_bit_descr irqdata_states[] = { BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX), BIT_MASK_DESCR(IRQD_IRQ_ENABLED_ON_SUSPEND), + + BIT_MASK_DESCR(IRQD_RESEND_WHEN_IN_PROGRESS), }; static const struct irq_bit_descr irqdesc_states[] = { diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 5fdc0b557579..c443a0ddc07e 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -47,9 +47,12 @@ enum { * detection * IRQS_POLL_INPROGRESS - polling in progress * IRQS_ONESHOT - irq is not unmasked in primary handler - * IRQS_REPLAY - irq is replayed + * IRQS_REPLAY - irq has been resent and will not be resent + * again until the handler has run and cleared + * this flag. * IRQS_WAITING - irq is waiting - * IRQS_PENDING - irq is pending and replayed later + * IRQS_PENDING - irq needs to be resent and should be resent + * at the next available opportunity. * IRQS_SUSPENDED - irq is suspended * IRQS_NMI - irq line is used to deliver NMIs * IRQS_SYSFS - descriptor has been added to sysfs |