Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | // SPDX-License-Identifier: GPL-2.0 #include <linux/irq.h> #include <linux/interrupt.h> #include "internals.h" /** * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU * @desc: Interrupt descriptor to clean up * @force_clear: If set clear the move pending bit unconditionally. * If not set, clear it only when the dying CPU is the * last one in the pending mask. * * Returns true if the pending bit was set and the pending mask contains an * online CPU other than the dying CPU. */ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear) { struct irq_data *data = irq_desc_get_irq_data(desc); if (!irqd_is_setaffinity_pending(data)) return false; /* * The outgoing CPU might be the last online target in a pending * interrupt move. If that's the case clear the pending move bit. */ if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) { irqd_clr_move_pending(data); return false; } if (force_clear) irqd_clr_move_pending(data); return true; } void irq_move_masked_irq(struct irq_data *idata) { struct irq_desc *desc = irq_data_to_desc(idata); struct irq_data *data = &desc->irq_data; struct irq_chip *chip = data->chip; if (likely(!irqd_is_setaffinity_pending(data))) return; irqd_clr_move_pending(data); /* * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. */ if (irqd_is_per_cpu(data)) { WARN_ON(1); return; } if (unlikely(cpumask_empty(desc->pending_mask))) return; if (!chip->irq_set_affinity) return; assert_raw_spin_locked(&desc->lock); /* * If there was a valid mask to work with, please * do the disable, re-program, enable sequence. * This is *not* particularly important for level triggered * but in a edge trigger case, we might be setting rte * when an active trigger is coming in. This could * cause some ioapics to mal-function. * Being paranoid i guess! * * For correct operation this depends on the caller * masking the irqs. */ if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) { int ret; ret = irq_do_set_affinity(data, desc->pending_mask, false); /* * If the there is a cleanup pending in the underlying * vector management, reschedule the move for the next * interrupt. Leave desc->pending_mask intact. */ if (ret == -EBUSY) { irqd_set_move_pending(data); return; } } cpumask_clear(desc->pending_mask); } void __irq_move_irq(struct irq_data *idata) { bool masked; /* * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled, * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here. */ idata = irq_desc_get_irq_data(irq_data_to_desc(idata)); if (unlikely(irqd_irq_disabled(idata))) return; /* * Be careful vs. already masked interrupts. If this is a * threaded interrupt with ONESHOT set, we can end up with an * interrupt storm. */ masked = irqd_irq_masked(idata); if (!masked) idata->chip->irq_mask(idata); irq_move_masked_irq(idata); if (!masked) idata->chip->irq_unmask(idata); } |