lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/irq/autoprobe.c |
| 3 | * |
| 4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar |
| 5 | * |
| 6 | * This file contains the interrupt probing code and driver APIs. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/irq.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/delay.h> |
| 13 | #include <linux/async.h> |
| 14 | |
| 15 | #include "internals.h" |
| 16 | |
| 17 | /* |
| 18 | * Autodetection depends on the fact that any interrupt that |
| 19 | * comes in on to an unassigned handler will get stuck with |
| 20 | * "IRQS_WAITING" cleared and the interrupt disabled. |
| 21 | */ |
| 22 | static DEFINE_MUTEX(probing_active); |
| 23 | |
| 24 | /** |
| 25 | * probe_irq_on - begin an interrupt autodetect |
| 26 | * |
| 27 | * Commence probing for an interrupt. The interrupts are scanned |
| 28 | * and a mask of potential interrupt lines is returned. |
| 29 | * |
| 30 | */ |
| 31 | unsigned long probe_irq_on(void) |
| 32 | { |
| 33 | struct irq_desc *desc; |
| 34 | unsigned long mask = 0; |
| 35 | int i; |
| 36 | |
| 37 | /* |
| 38 | * quiesce the kernel, or at least the asynchronous portion |
| 39 | */ |
| 40 | async_synchronize_full(); |
| 41 | mutex_lock(&probing_active); |
| 42 | /* |
| 43 | * something may have generated an irq long ago and we want to |
| 44 | * flush such a longstanding irq before considering it as spurious. |
| 45 | */ |
| 46 | for_each_irq_desc_reverse(i, desc) { |
| 47 | raw_spin_lock_irq(&desc->lock); |
| 48 | if (!desc->action && irq_settings_can_probe(desc)) { |
| 49 | /* |
| 50 | * Some chips need to know about probing in |
| 51 | * progress: |
| 52 | */ |
| 53 | if (desc->irq_data.chip->irq_set_type) |
| 54 | desc->irq_data.chip->irq_set_type(&desc->irq_data, |
| 55 | IRQ_TYPE_PROBE); |
| 56 | irq_startup(desc, false); |
| 57 | } |
| 58 | raw_spin_unlock_irq(&desc->lock); |
| 59 | } |
| 60 | |
| 61 | /* Wait for longstanding interrupts to trigger. */ |
| 62 | msleep(20); |
| 63 | |
| 64 | /* |
| 65 | * enable any unassigned irqs |
| 66 | * (we must startup again here because if a longstanding irq |
| 67 | * happened in the previous stage, it may have masked itself) |
| 68 | */ |
| 69 | for_each_irq_desc_reverse(i, desc) { |
| 70 | raw_spin_lock_irq(&desc->lock); |
| 71 | if (!desc->action && irq_settings_can_probe(desc)) { |
| 72 | desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; |
| 73 | if (irq_startup(desc, false)) |
| 74 | desc->istate |= IRQS_PENDING; |
| 75 | } |
| 76 | raw_spin_unlock_irq(&desc->lock); |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * Wait for spurious interrupts to trigger |
| 81 | */ |
| 82 | msleep(100); |
| 83 | |
| 84 | /* |
| 85 | * Now filter out any obviously spurious interrupts |
| 86 | */ |
| 87 | for_each_irq_desc(i, desc) { |
| 88 | raw_spin_lock_irq(&desc->lock); |
| 89 | |
| 90 | if (desc->istate & IRQS_AUTODETECT) { |
| 91 | /* It triggered already - consider it spurious. */ |
| 92 | if (!(desc->istate & IRQS_WAITING)) { |
| 93 | desc->istate &= ~IRQS_AUTODETECT; |
| 94 | irq_shutdown(desc); |
| 95 | } else |
| 96 | if (i < 32) |
| 97 | mask |= 1 << i; |
| 98 | } |
| 99 | raw_spin_unlock_irq(&desc->lock); |
| 100 | } |
| 101 | |
| 102 | return mask; |
| 103 | } |
| 104 | EXPORT_SYMBOL(probe_irq_on); |
| 105 | |
| 106 | /** |
| 107 | * probe_irq_mask - scan a bitmap of interrupt lines |
| 108 | * @val: mask of interrupts to consider |
| 109 | * |
| 110 | * Scan the interrupt lines and return a bitmap of active |
| 111 | * autodetect interrupts. The interrupt probe logic state |
| 112 | * is then returned to its previous value. |
| 113 | * |
| 114 | * Note: we need to scan all the irq's even though we will |
| 115 | * only return autodetect irq numbers - just so that we reset |
| 116 | * them all to a known state. |
| 117 | */ |
| 118 | unsigned int probe_irq_mask(unsigned long val) |
| 119 | { |
| 120 | unsigned int mask = 0; |
| 121 | struct irq_desc *desc; |
| 122 | int i; |
| 123 | |
| 124 | for_each_irq_desc(i, desc) { |
| 125 | raw_spin_lock_irq(&desc->lock); |
| 126 | if (desc->istate & IRQS_AUTODETECT) { |
| 127 | if (i < 16 && !(desc->istate & IRQS_WAITING)) |
| 128 | mask |= 1 << i; |
| 129 | |
| 130 | desc->istate &= ~IRQS_AUTODETECT; |
| 131 | irq_shutdown(desc); |
| 132 | } |
| 133 | raw_spin_unlock_irq(&desc->lock); |
| 134 | } |
| 135 | mutex_unlock(&probing_active); |
| 136 | |
| 137 | return mask & val; |
| 138 | } |
| 139 | EXPORT_SYMBOL(probe_irq_mask); |
| 140 | |
| 141 | /** |
| 142 | * probe_irq_off - end an interrupt autodetect |
| 143 | * @val: mask of potential interrupts (unused) |
| 144 | * |
| 145 | * Scans the unused interrupt lines and returns the line which |
| 146 | * appears to have triggered the interrupt. If no interrupt was |
| 147 | * found then zero is returned. If more than one interrupt is |
| 148 | * found then minus the first candidate is returned to indicate |
| 149 | * their is doubt. |
| 150 | * |
| 151 | * The interrupt probe logic state is returned to its previous |
| 152 | * value. |
| 153 | * |
| 154 | * BUGS: When used in a module (which arguably shouldn't happen) |
| 155 | * nothing prevents two IRQ probe callers from overlapping. The |
| 156 | * results of this are non-optimal. |
| 157 | */ |
| 158 | int probe_irq_off(unsigned long val) |
| 159 | { |
| 160 | int i, irq_found = 0, nr_of_irqs = 0; |
| 161 | struct irq_desc *desc; |
| 162 | |
| 163 | for_each_irq_desc(i, desc) { |
| 164 | raw_spin_lock_irq(&desc->lock); |
| 165 | |
| 166 | if (desc->istate & IRQS_AUTODETECT) { |
| 167 | if (!(desc->istate & IRQS_WAITING)) { |
| 168 | if (!nr_of_irqs) |
| 169 | irq_found = i; |
| 170 | nr_of_irqs++; |
| 171 | } |
| 172 | desc->istate &= ~IRQS_AUTODETECT; |
| 173 | irq_shutdown(desc); |
| 174 | } |
| 175 | raw_spin_unlock_irq(&desc->lock); |
| 176 | } |
| 177 | mutex_unlock(&probing_active); |
| 178 | |
| 179 | if (nr_of_irqs > 1) |
| 180 | irq_found = -irq_found; |
| 181 | |
| 182 | return irq_found; |
| 183 | } |
| 184 | EXPORT_SYMBOL(probe_irq_off); |
| 185 | |