diff options
author | Rodrigo Vivi <[email protected]> | 2018-07-23 09:13:12 -0700 |
---|---|---|
committer | Rodrigo Vivi <[email protected]> | 2018-07-23 09:13:12 -0700 |
commit | c74a7469f97c0f40b46e82ee979f9fb1bb6e847c (patch) | |
tree | f2690a1a916b73ef94657fbf0e0141ae57701825 /kernel/irq/manage.c | |
parent | 6f15a7de86c8cf2dc09fc9e6d07047efa40ef809 (diff) | |
parent | 500775074f88d9cf5416bed2ca19592812d62c41 (diff) |
Merge drm/drm-next into drm-intel-next-queued
We need a backmerge to get DP_DPCD_REV_14 before we push other
i915 changes to dinq that could break compilation.
Signed-off-by: Rodrigo Vivi <[email protected]>
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 38 |
1 files changed, 36 insertions, 2 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index e3336d904f64..daeabd791d58 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -24,6 +24,7 @@ #ifdef CONFIG_IRQ_FORCED_THREADING __read_mostly bool force_irqthreads; +EXPORT_SYMBOL_GPL(force_irqthreads); static int __init setup_forced_irqthreads(char *arg) { @@ -204,6 +205,39 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, return ret; } +#ifdef CONFIG_GENERIC_PENDING_IRQ +static inline int irq_set_affinity_pending(struct irq_data *data, + const struct cpumask *dest) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + irqd_set_move_pending(data); + irq_copy_pending(desc, dest); + return 0; +} +#else +static inline int irq_set_affinity_pending(struct irq_data *data, + const struct cpumask *dest) +{ + return -EBUSY; +} +#endif + +static int irq_try_set_affinity(struct irq_data *data, + const struct cpumask *dest, bool force) +{ + int ret = irq_do_set_affinity(data, dest, force); + + /* + * In case that the underlying vector management is busy and the + * architecture supports the generic pending mechanism then utilize + * this to avoid returning an error to user space. + */ + if (ret == -EBUSY && !force) + ret = irq_set_affinity_pending(data, dest); + return ret; +} + int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -214,8 +248,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, if (!chip || !chip->irq_set_affinity) return -EINVAL; - if (irq_can_move_pcntxt(data)) { - ret = irq_do_set_affinity(data, mask, force); + if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { + ret = irq_try_set_affinity(data, mask, force); } else { irqd_set_move_pending(data); irq_copy_pending(desc, mask); |