blob: 8e3ab791828de2623213cca219953e10dd1f7890 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 *
6 * This file contains the core interrupt handling code, for irq-chip based
7 * architectures. Detailed information is available in
8 * Documentation/core-api/genericirq.rst
9 */
10
11#include <linux/irq.h>
12#include <linux/msi.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/irqdomain.h>
17#include <linux/wakeup_reason.h>
18
19#include <trace/events/irq.h>
20
21#include "internals.h"
22
23static irqreturn_t bad_chained_irq(int irq, void *dev_id)
24{
25 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
26 return IRQ_NONE;
27}
28
29/*
30 * Chained handlers should never call action on their IRQ. This default
31 * action will emit warning if such thing happens.
32 */
33struct irqaction chained_action = {
34 .handler = bad_chained_irq,
35};
36
37/**
38 * irq_set_chip - set the irq chip for an irq
39 * @irq: irq number
40 * @chip: pointer to irq chip description structure
41 */
42int irq_set_chip(unsigned int irq, const struct irq_chip *chip)
43{
44 unsigned long flags;
45 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
46
47 if (!desc)
48 return -EINVAL;
49
50 desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip);
51 irq_put_desc_unlock(desc, flags);
52 /*
53 * For !CONFIG_SPARSE_IRQ make the irq show up in
54 * allocated_irqs.
55 */
56 irq_mark_irq(irq);
57 return 0;
58}
59EXPORT_SYMBOL(irq_set_chip);
60
61/**
62 * irq_set_type - set the irq trigger type for an irq
63 * @irq: irq number
64 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
65 */
66int irq_set_irq_type(unsigned int irq, unsigned int type)
67{
68 unsigned long flags;
69 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
70 int ret = 0;
71
72 if (!desc)
73 return -EINVAL;
74
75 ret = __irq_set_trigger(desc, type);
76 irq_put_desc_busunlock(desc, flags);
77 return ret;
78}
79EXPORT_SYMBOL(irq_set_irq_type);
80
81/**
82 * irq_set_handler_data - set irq handler data for an irq
83 * @irq: Interrupt number
84 * @data: Pointer to interrupt specific data
85 *
86 * Set the hardware irq controller data for an irq
87 */
88int irq_set_handler_data(unsigned int irq, void *data)
89{
90 unsigned long flags;
91 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
92
93 if (!desc)
94 return -EINVAL;
95 desc->irq_common_data.handler_data = data;
96 irq_put_desc_unlock(desc, flags);
97 return 0;
98}
99EXPORT_SYMBOL(irq_set_handler_data);
100
101/**
102 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
103 * @irq_base: Interrupt number base
104 * @irq_offset: Interrupt number offset
105 * @entry: Pointer to MSI descriptor data
106 *
107 * Set the MSI descriptor entry for an irq at offset
108 */
109int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
110 struct msi_desc *entry)
111{
112 unsigned long flags;
113 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
114
115 if (!desc)
116 return -EINVAL;
117 desc->irq_common_data.msi_desc = entry;
118 if (entry && !irq_offset)
119 entry->irq = irq_base;
120 irq_put_desc_unlock(desc, flags);
121 return 0;
122}
123
124/**
125 * irq_set_msi_desc - set MSI descriptor data for an irq
126 * @irq: Interrupt number
127 * @entry: Pointer to MSI descriptor data
128 *
129 * Set the MSI descriptor entry for an irq
130 */
131int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
132{
133 return irq_set_msi_desc_off(irq, 0, entry);
134}
135
136/**
137 * irq_set_chip_data - set irq chip data for an irq
138 * @irq: Interrupt number
139 * @data: Pointer to chip specific data
140 *
141 * Set the hardware irq chip data for an irq
142 */
143int irq_set_chip_data(unsigned int irq, void *data)
144{
145 unsigned long flags;
146 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
147
148 if (!desc)
149 return -EINVAL;
150 desc->irq_data.chip_data = data;
151 irq_put_desc_unlock(desc, flags);
152 return 0;
153}
154EXPORT_SYMBOL(irq_set_chip_data);
155
156struct irq_data *irq_get_irq_data(unsigned int irq)
157{
158 struct irq_desc *desc = irq_to_desc(irq);
159
160 return desc ? &desc->irq_data : NULL;
161}
162EXPORT_SYMBOL_GPL(irq_get_irq_data);
163
164static void irq_state_clr_disabled(struct irq_desc *desc)
165{
166 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
167}
168
169static void irq_state_clr_masked(struct irq_desc *desc)
170{
171 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
172}
173
174static void irq_state_clr_started(struct irq_desc *desc)
175{
176 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
177}
178
179static void irq_state_set_started(struct irq_desc *desc)
180{
181 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
182}
183
184enum {
185 IRQ_STARTUP_NORMAL,
186 IRQ_STARTUP_MANAGED,
187 IRQ_STARTUP_ABORT,
188};
189
190#ifdef CONFIG_SMP
191static int
192__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
193{
194 struct irq_data *d = irq_desc_get_irq_data(desc);
195
196 if (!irqd_affinity_is_managed(d))
197 return IRQ_STARTUP_NORMAL;
198
199 irqd_clr_managed_shutdown(d);
200
201 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
202 /*
203 * Catch code which fiddles with enable_irq() on a managed
204 * and potentially shutdown IRQ. Chained interrupt
205 * installment or irq auto probing should not happen on
206 * managed irqs either.
207 */
208 if (WARN_ON_ONCE(force))
209 return IRQ_STARTUP_ABORT;
210 /*
211 * The interrupt was requested, but there is no online CPU
212 * in it's affinity mask. Put it into managed shutdown
213 * state and let the cpu hotplug mechanism start it up once
214 * a CPU in the mask becomes available.
215 */
216 return IRQ_STARTUP_ABORT;
217 }
218 /*
219 * Managed interrupts have reserved resources, so this should not
220 * happen.
221 */
222 if (WARN_ON(irq_domain_activate_irq(d, false)))
223 return IRQ_STARTUP_ABORT;
224 return IRQ_STARTUP_MANAGED;
225}
226#else
227static __always_inline int
228__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
229{
230 return IRQ_STARTUP_NORMAL;
231}
232#endif
233
234static int __irq_startup(struct irq_desc *desc)
235{
236 struct irq_data *d = irq_desc_get_irq_data(desc);
237 int ret = 0;
238
239 /* Warn if this interrupt is not activated but try nevertheless */
240 WARN_ON_ONCE(!irqd_is_activated(d));
241
242 if (d->chip->irq_startup) {
243 ret = d->chip->irq_startup(d);
244 irq_state_clr_disabled(desc);
245 irq_state_clr_masked(desc);
246 } else {
247 irq_enable(desc);
248 }
249 irq_state_set_started(desc);
250 return ret;
251}
252
253int irq_startup(struct irq_desc *desc, bool resend, bool force)
254{
255 struct irq_data *d = irq_desc_get_irq_data(desc);
256 struct cpumask *aff = irq_data_get_affinity_mask(d);
257 int ret = 0;
258
259 desc->depth = 0;
260
261 if (irqd_is_started(d)) {
262 irq_enable(desc);
263 } else {
264 switch (__irq_startup_managed(desc, aff, force)) {
265 case IRQ_STARTUP_NORMAL:
266 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
267 irq_setup_affinity(desc);
268 ret = __irq_startup(desc);
269 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
270 irq_setup_affinity(desc);
271 break;
272 case IRQ_STARTUP_MANAGED:
273 irq_do_set_affinity(d, aff, false);
274 ret = __irq_startup(desc);
275 break;
276 case IRQ_STARTUP_ABORT:
277 irqd_set_managed_shutdown(d);
278 return 0;
279 }
280 }
281 if (resend)
282 check_irq_resend(desc);
283
284 return ret;
285}
286
287int irq_activate(struct irq_desc *desc)
288{
289 struct irq_data *d = irq_desc_get_irq_data(desc);
290
291 if (!irqd_affinity_is_managed(d))
292 return irq_domain_activate_irq(d, false);
293 return 0;
294}
295
296int irq_activate_and_startup(struct irq_desc *desc, bool resend)
297{
298 if (WARN_ON(irq_activate(desc)))
299 return 0;
300 return irq_startup(desc, resend, IRQ_START_FORCE);
301}
302
303static void __irq_disable(struct irq_desc *desc, bool mask);
304
305void irq_shutdown(struct irq_desc *desc)
306{
307 if (irqd_is_started(&desc->irq_data)) {
308 desc->depth = 1;
309 if (desc->irq_data.chip->irq_shutdown) {
310 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
311 irq_state_set_disabled(desc);
312 irq_state_set_masked(desc);
313 } else {
314 __irq_disable(desc, true);
315 }
316 irq_state_clr_started(desc);
317 }
318}
319
320
321void irq_shutdown_and_deactivate(struct irq_desc *desc)
322{
323 irq_shutdown(desc);
324 /*
325 * This must be called even if the interrupt was never started up,
326 * because the activation can happen before the interrupt is
327 * available for request/startup. It has it's own state tracking so
328 * it's safe to call it unconditionally.
329 */
330 irq_domain_deactivate_irq(&desc->irq_data);
331}
332
333void irq_enable(struct irq_desc *desc)
334{
335 if (!irqd_irq_disabled(&desc->irq_data)) {
336 unmask_irq(desc);
337 } else {
338 irq_state_clr_disabled(desc);
339 if (desc->irq_data.chip->irq_enable) {
340 desc->irq_data.chip->irq_enable(&desc->irq_data);
341 irq_state_clr_masked(desc);
342 } else {
343 unmask_irq(desc);
344 }
345 }
346}
347
348static void __irq_disable(struct irq_desc *desc, bool mask)
349{
350 if (irqd_irq_disabled(&desc->irq_data)) {
351 if (mask)
352 mask_irq(desc);
353 } else {
354 irq_state_set_disabled(desc);
355 if (desc->irq_data.chip->irq_disable) {
356 desc->irq_data.chip->irq_disable(&desc->irq_data);
357 irq_state_set_masked(desc);
358 } else if (mask) {
359 mask_irq(desc);
360 }
361 }
362}
363
364/**
365 * irq_disable - Mark interrupt disabled
366 * @desc: irq descriptor which should be disabled
367 *
368 * If the chip does not implement the irq_disable callback, we
369 * use a lazy disable approach. That means we mark the interrupt
370 * disabled, but leave the hardware unmasked. That's an
371 * optimization because we avoid the hardware access for the
372 * common case where no interrupt happens after we marked it
373 * disabled. If an interrupt happens, then the interrupt flow
374 * handler masks the line at the hardware level and marks it
375 * pending.
376 *
377 * If the interrupt chip does not implement the irq_disable callback,
378 * a driver can disable the lazy approach for a particular irq line by
379 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
380 * be used for devices which cannot disable the interrupt at the
381 * device level under certain circumstances and have to use
382 * disable_irq[_nosync] instead.
383 */
384void irq_disable(struct irq_desc *desc)
385{
386 __irq_disable(desc, irq_settings_disable_unlazy(desc));
387}
388
389void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
390{
391 if (desc->irq_data.chip->irq_enable)
392 desc->irq_data.chip->irq_enable(&desc->irq_data);
393 else
394 desc->irq_data.chip->irq_unmask(&desc->irq_data);
395 cpumask_set_cpu(cpu, desc->percpu_enabled);
396}
397
398void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
399{
400 if (desc->irq_data.chip->irq_disable)
401 desc->irq_data.chip->irq_disable(&desc->irq_data);
402 else
403 desc->irq_data.chip->irq_mask(&desc->irq_data);
404 cpumask_clear_cpu(cpu, desc->percpu_enabled);
405}
406
407static inline void mask_ack_irq(struct irq_desc *desc)
408{
409 if (desc->irq_data.chip->irq_mask_ack) {
410 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
411 irq_state_set_masked(desc);
412 } else {
413 mask_irq(desc);
414 if (desc->irq_data.chip->irq_ack)
415 desc->irq_data.chip->irq_ack(&desc->irq_data);
416 }
417}
418
419void mask_irq(struct irq_desc *desc)
420{
421 if (irqd_irq_masked(&desc->irq_data))
422 return;
423
424 if (desc->irq_data.chip->irq_mask) {
425 desc->irq_data.chip->irq_mask(&desc->irq_data);
426 irq_state_set_masked(desc);
427 }
428}
429
430void unmask_irq(struct irq_desc *desc)
431{
432 if (!irqd_irq_masked(&desc->irq_data))
433 return;
434
435 if (desc->irq_data.chip->irq_unmask) {
436 desc->irq_data.chip->irq_unmask(&desc->irq_data);
437 irq_state_clr_masked(desc);
438 }
439}
440
441void unmask_threaded_irq(struct irq_desc *desc)
442{
443 struct irq_chip *chip = desc->irq_data.chip;
444
445 if (chip->flags & IRQCHIP_EOI_THREADED)
446 chip->irq_eoi(&desc->irq_data);
447
448 unmask_irq(desc);
449}
450
451/*
452 * handle_nested_irq - Handle a nested irq from a irq thread
453 * @irq: the interrupt number
454 *
455 * Handle interrupts which are nested into a threaded interrupt
456 * handler. The handler function is called inside the calling
457 * threads context.
458 */
459void handle_nested_irq(unsigned int irq)
460{
461 struct irq_desc *desc = irq_to_desc(irq);
462 struct irqaction *action;
463 irqreturn_t action_ret;
464
465 might_sleep();
466
467 raw_spin_lock_irq(&desc->lock);
468
469 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
470
471 action = desc->action;
472 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
473 desc->istate |= IRQS_PENDING;
474 goto out_unlock;
475 }
476
477 kstat_incr_irqs_this_cpu(desc);
478 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
479 raw_spin_unlock_irq(&desc->lock);
480
481 action_ret = IRQ_NONE;
482 for_each_action_of_desc(desc, action)
483 action_ret |= action->thread_fn(action->irq, action->dev_id);
484
485 if (!noirqdebug)
486 note_interrupt(desc, action_ret);
487
488 raw_spin_lock_irq(&desc->lock);
489 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
490
491out_unlock:
492 raw_spin_unlock_irq(&desc->lock);
493}
494EXPORT_SYMBOL_GPL(handle_nested_irq);
495
496static bool irq_check_poll(struct irq_desc *desc)
497{
498 if (!(desc->istate & IRQS_POLL_INPROGRESS))
499 return false;
500 return irq_wait_for_poll(desc);
501}
502
503static bool irq_may_run(struct irq_desc *desc)
504{
505 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
506
507 /*
508 * If the interrupt is not in progress and is not an armed
509 * wakeup interrupt, proceed.
510 */
511 if (!irqd_has_set(&desc->irq_data, mask)) {
512#ifdef CONFIG_PM_SLEEP
513 if (unlikely(desc->no_suspend_depth &&
514 irqd_is_wakeup_set(&desc->irq_data))) {
515 unsigned int irq = irq_desc_get_irq(desc);
516 const char *name = "(unnamed)";
517
518 if (desc->action && desc->action->name)
519 name = desc->action->name;
520
521 log_abnormal_wakeup_reason("misconfigured IRQ %u %s",
522 irq, name);
523 }
524#endif
525 return true;
526 }
527
528 /*
529 * If the interrupt is an armed wakeup source, mark it pending
530 * and suspended, disable it and notify the pm core about the
531 * event.
532 */
533 if (irq_pm_check_wakeup(desc))
534 return false;
535
536 /*
537 * Handle a potential concurrent poll on a different core.
538 */
539 return irq_check_poll(desc);
540}
541
542/**
543 * handle_simple_irq - Simple and software-decoded IRQs.
544 * @desc: the interrupt description structure for this irq
545 *
546 * Simple interrupts are either sent from a demultiplexing interrupt
547 * handler or come from hardware, where no interrupt hardware control
548 * is necessary.
549 *
550 * Note: The caller is expected to handle the ack, clear, mask and
551 * unmask issues if necessary.
552 */
553void handle_simple_irq(struct irq_desc *desc)
554{
555 raw_spin_lock(&desc->lock);
556
557 if (!irq_may_run(desc))
558 goto out_unlock;
559
560 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
561
562 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
563 desc->istate |= IRQS_PENDING;
564 goto out_unlock;
565 }
566
567 kstat_incr_irqs_this_cpu(desc);
568 handle_irq_event(desc);
569
570out_unlock:
571 raw_spin_unlock(&desc->lock);
572}
573EXPORT_SYMBOL_GPL(handle_simple_irq);
574
575/**
576 * handle_untracked_irq - Simple and software-decoded IRQs.
577 * @desc: the interrupt description structure for this irq
578 *
579 * Untracked interrupts are sent from a demultiplexing interrupt
580 * handler when the demultiplexer does not know which device it its
581 * multiplexed irq domain generated the interrupt. IRQ's handled
582 * through here are not subjected to stats tracking, randomness, or
583 * spurious interrupt detection.
584 *
585 * Note: Like handle_simple_irq, the caller is expected to handle
586 * the ack, clear, mask and unmask issues if necessary.
587 */
588void handle_untracked_irq(struct irq_desc *desc)
589{
590 unsigned int flags = 0;
591
592 raw_spin_lock(&desc->lock);
593
594 if (!irq_may_run(desc))
595 goto out_unlock;
596
597 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
598
599 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
600 desc->istate |= IRQS_PENDING;
601 goto out_unlock;
602 }
603
604 desc->istate &= ~IRQS_PENDING;
605 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
606 raw_spin_unlock(&desc->lock);
607
608 __handle_irq_event_percpu(desc, &flags);
609
610 raw_spin_lock(&desc->lock);
611 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
612
613out_unlock:
614 raw_spin_unlock(&desc->lock);
615}
616EXPORT_SYMBOL_GPL(handle_untracked_irq);
617
618/*
619 * Called unconditionally from handle_level_irq() and only for oneshot
620 * interrupts from handle_fasteoi_irq()
621 */
622static void cond_unmask_irq(struct irq_desc *desc)
623{
624 /*
625 * We need to unmask in the following cases:
626 * - Standard level irq (IRQF_ONESHOT is not set)
627 * - Oneshot irq which did not wake the thread (caused by a
628 * spurious interrupt or a primary handler handling it
629 * completely).
630 */
631 if (!irqd_irq_disabled(&desc->irq_data) &&
632 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
633 unmask_irq(desc);
634}
635
636/**
637 * handle_level_irq - Level type irq handler
638 * @desc: the interrupt description structure for this irq
639 *
640 * Level type interrupts are active as long as the hardware line has
641 * the active level. This may require to mask the interrupt and unmask
642 * it after the associated handler has acknowledged the device, so the
643 * interrupt line is back to inactive.
644 */
645void handle_level_irq(struct irq_desc *desc)
646{
647 raw_spin_lock(&desc->lock);
648 mask_ack_irq(desc);
649
650 if (!irq_may_run(desc))
651 goto out_unlock;
652
653 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
654
655 /*
656 * If its disabled or no action available
657 * keep it masked and get out of here
658 */
659 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
660 desc->istate |= IRQS_PENDING;
661 goto out_unlock;
662 }
663
664 kstat_incr_irqs_this_cpu(desc);
665 handle_irq_event(desc);
666
667 cond_unmask_irq(desc);
668
669out_unlock:
670 raw_spin_unlock(&desc->lock);
671}
672EXPORT_SYMBOL_GPL(handle_level_irq);
673
674#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
675static inline void preflow_handler(struct irq_desc *desc)
676{
677 if (desc->preflow_handler)
678 desc->preflow_handler(&desc->irq_data);
679}
680#else
681static inline void preflow_handler(struct irq_desc *desc) { }
682#endif
683
684static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
685{
686 if (!(desc->istate & IRQS_ONESHOT)) {
687 chip->irq_eoi(&desc->irq_data);
688 return;
689 }
690 /*
691 * We need to unmask in the following cases:
692 * - Oneshot irq which did not wake the thread (caused by a
693 * spurious interrupt or a primary handler handling it
694 * completely).
695 */
696 if (!irqd_irq_disabled(&desc->irq_data) &&
697 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
698 chip->irq_eoi(&desc->irq_data);
699 unmask_irq(desc);
700 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
701 chip->irq_eoi(&desc->irq_data);
702 }
703}
704
705/**
706 * handle_fasteoi_irq - irq handler for transparent controllers
707 * @desc: the interrupt description structure for this irq
708 *
709 * Only a single callback will be issued to the chip: an ->eoi()
710 * call when the interrupt has been serviced. This enables support
711 * for modern forms of interrupt handlers, which handle the flow
712 * details in hardware, transparently.
713 */
714void handle_fasteoi_irq(struct irq_desc *desc)
715{
716 struct irq_chip *chip = desc->irq_data.chip;
717
718 raw_spin_lock(&desc->lock);
719
720 if (!irq_may_run(desc))
721 goto out;
722
723 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
724
725 /*
726 * If its disabled or no action available
727 * then mask it and get out of here:
728 */
729 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
730 desc->istate |= IRQS_PENDING;
731 mask_irq(desc);
732 goto out;
733 }
734
735 kstat_incr_irqs_this_cpu(desc);
736 if (desc->istate & IRQS_ONESHOT)
737 mask_irq(desc);
738
739 preflow_handler(desc);
740 handle_irq_event(desc);
741
742 cond_unmask_eoi_irq(desc, chip);
743
744 raw_spin_unlock(&desc->lock);
745 return;
746out:
747 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
748 chip->irq_eoi(&desc->irq_data);
749 raw_spin_unlock(&desc->lock);
750}
751EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
752
753/**
754 * handle_fasteoi_nmi - irq handler for NMI interrupt lines
755 * @desc: the interrupt description structure for this irq
756 *
757 * A simple NMI-safe handler, considering the restrictions
758 * from request_nmi.
759 *
760 * Only a single callback will be issued to the chip: an ->eoi()
761 * call when the interrupt has been serviced. This enables support
762 * for modern forms of interrupt handlers, which handle the flow
763 * details in hardware, transparently.
764 */
765void handle_fasteoi_nmi(struct irq_desc *desc)
766{
767 struct irq_chip *chip = irq_desc_get_chip(desc);
768 struct irqaction *action = desc->action;
769 unsigned int irq = irq_desc_get_irq(desc);
770 irqreturn_t res;
771
772 __kstat_incr_irqs_this_cpu(desc);
773
774 trace_irq_handler_entry(irq, action);
775 /*
776 * NMIs cannot be shared, there is only one action.
777 */
778 res = action->handler(irq, action->dev_id);
779 trace_irq_handler_exit(irq, action, res);
780
781 if (chip->irq_eoi)
782 chip->irq_eoi(&desc->irq_data);
783}
784EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
785
786/**
787 * handle_edge_irq - edge type IRQ handler
788 * @desc: the interrupt description structure for this irq
789 *
790 * Interrupt occures on the falling and/or rising edge of a hardware
791 * signal. The occurrence is latched into the irq controller hardware
792 * and must be acked in order to be reenabled. After the ack another
793 * interrupt can happen on the same source even before the first one
794 * is handled by the associated event handler. If this happens it
795 * might be necessary to disable (mask) the interrupt depending on the
796 * controller hardware. This requires to reenable the interrupt inside
797 * of the loop which handles the interrupts which have arrived while
798 * the handler was running. If all pending interrupts are handled, the
799 * loop is left.
800 */
801void handle_edge_irq(struct irq_desc *desc)
802{
803 raw_spin_lock(&desc->lock);
804
805 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
806
807 if (!irq_may_run(desc)) {
808 desc->istate |= IRQS_PENDING;
809 mask_ack_irq(desc);
810 goto out_unlock;
811 }
812
813 /*
814 * If its disabled or no action available then mask it and get
815 * out of here.
816 */
817 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
818 desc->istate |= IRQS_PENDING;
819 mask_ack_irq(desc);
820 goto out_unlock;
821 }
822
823 kstat_incr_irqs_this_cpu(desc);
824
825 /* Start handling the irq */
826 desc->irq_data.chip->irq_ack(&desc->irq_data);
827
828 do {
829 if (unlikely(!desc->action)) {
830 mask_irq(desc);
831 goto out_unlock;
832 }
833
834 /*
835 * When another irq arrived while we were handling
836 * one, we could have masked the irq.
837 * Renable it, if it was not disabled in meantime.
838 */
839 if (unlikely(desc->istate & IRQS_PENDING)) {
840 if (!irqd_irq_disabled(&desc->irq_data) &&
841 irqd_irq_masked(&desc->irq_data))
842 unmask_irq(desc);
843 }
844
845 handle_irq_event(desc);
846
847 } while ((desc->istate & IRQS_PENDING) &&
848 !irqd_irq_disabled(&desc->irq_data));
849
850out_unlock:
851 raw_spin_unlock(&desc->lock);
852}
853EXPORT_SYMBOL(handle_edge_irq);
854
855#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
856/**
857 * handle_edge_eoi_irq - edge eoi type IRQ handler
858 * @desc: the interrupt description structure for this irq
859 *
860 * Similar as the above handle_edge_irq, but using eoi and w/o the
861 * mask/unmask logic.
862 */
863void handle_edge_eoi_irq(struct irq_desc *desc)
864{
865 struct irq_chip *chip = irq_desc_get_chip(desc);
866
867 raw_spin_lock(&desc->lock);
868
869 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
870
871 if (!irq_may_run(desc)) {
872 desc->istate |= IRQS_PENDING;
873 goto out_eoi;
874 }
875
876 /*
877 * If its disabled or no action available then mask it and get
878 * out of here.
879 */
880 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
881 desc->istate |= IRQS_PENDING;
882 goto out_eoi;
883 }
884
885 kstat_incr_irqs_this_cpu(desc);
886
887 do {
888 if (unlikely(!desc->action))
889 goto out_eoi;
890
891 handle_irq_event(desc);
892
893 } while ((desc->istate & IRQS_PENDING) &&
894 !irqd_irq_disabled(&desc->irq_data));
895
896out_eoi:
897 chip->irq_eoi(&desc->irq_data);
898 raw_spin_unlock(&desc->lock);
899}
900#endif
901
902/**
903 * handle_percpu_irq - Per CPU local irq handler
904 * @desc: the interrupt description structure for this irq
905 *
906 * Per CPU interrupts on SMP machines without locking requirements
907 */
908void handle_percpu_irq(struct irq_desc *desc)
909{
910 struct irq_chip *chip = irq_desc_get_chip(desc);
911
912 /*
913 * PER CPU interrupts are not serialized. Do not touch
914 * desc->tot_count.
915 */
916 __kstat_incr_irqs_this_cpu(desc);
917
918 if (chip->irq_ack)
919 chip->irq_ack(&desc->irq_data);
920
921 handle_irq_event_percpu(desc);
922
923 if (chip->irq_eoi)
924 chip->irq_eoi(&desc->irq_data);
925}
926
927/**
928 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
929 * @desc: the interrupt description structure for this irq
930 *
931 * Per CPU interrupts on SMP machines without locking requirements. Same as
932 * handle_percpu_irq() above but with the following extras:
933 *
934 * action->percpu_dev_id is a pointer to percpu variables which
935 * contain the real device id for the cpu on which this handler is
936 * called
937 */
938void handle_percpu_devid_irq(struct irq_desc *desc)
939{
940 struct irq_chip *chip = irq_desc_get_chip(desc);
941 struct irqaction *action = desc->action;
942 unsigned int irq = irq_desc_get_irq(desc);
943 irqreturn_t res;
944
945 /*
946 * PER CPU interrupts are not serialized. Do not touch
947 * desc->tot_count.
948 */
949 __kstat_incr_irqs_this_cpu(desc);
950
951 if (chip->irq_ack)
952 chip->irq_ack(&desc->irq_data);
953
954 if (likely(action)) {
955 trace_irq_handler_entry(irq, action);
956 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
957 trace_irq_handler_exit(irq, action, res);
958 } else {
959 unsigned int cpu = smp_processor_id();
960 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
961
962 if (enabled)
963 irq_percpu_disable(desc, cpu);
964
965 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
966 enabled ? " and unmasked" : "", irq, cpu);
967 }
968
969 if (chip->irq_eoi)
970 chip->irq_eoi(&desc->irq_data);
971}
972
973/**
974 * handle_percpu_devid_fasteoi_ipi - Per CPU local IPI handler with per cpu
975 * dev ids
976 * @desc: the interrupt description structure for this irq
977 *
978 * The biggest difference with the IRQ version is that the interrupt is
979 * EOIed early, as the IPI could result in a context switch, and we need to
980 * make sure the IPI can fire again. We also assume that the arch code has
981 * registered an action. If not, we are positively doomed.
982 */
983void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc)
984{
985 struct irq_chip *chip = irq_desc_get_chip(desc);
986 struct irqaction *action = desc->action;
987 unsigned int irq = irq_desc_get_irq(desc);
988 irqreturn_t res;
989
990 __kstat_incr_irqs_this_cpu(desc);
991
992 if (chip->irq_eoi)
993 chip->irq_eoi(&desc->irq_data);
994
995 trace_irq_handler_entry(irq, action);
996 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
997 trace_irq_handler_exit(irq, action, res);
998}
999
1000/**
1001 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
1002 * dev ids
1003 * @desc: the interrupt description structure for this irq
1004 *
1005 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
1006 * as a percpu pointer.
1007 */
1008void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
1009{
1010 struct irq_chip *chip = irq_desc_get_chip(desc);
1011 struct irqaction *action = desc->action;
1012 unsigned int irq = irq_desc_get_irq(desc);
1013 irqreturn_t res;
1014
1015 __kstat_incr_irqs_this_cpu(desc);
1016
1017 trace_irq_handler_entry(irq, action);
1018 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
1019 trace_irq_handler_exit(irq, action, res);
1020
1021 if (chip->irq_eoi)
1022 chip->irq_eoi(&desc->irq_data);
1023}
1024
1025static void
1026__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
1027 int is_chained, const char *name)
1028{
1029 if (!handle) {
1030 handle = handle_bad_irq;
1031 } else {
1032 struct irq_data *irq_data = &desc->irq_data;
1033#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1034 /*
1035 * With hierarchical domains we might run into a
1036 * situation where the outermost chip is not yet set
1037 * up, but the inner chips are there. Instead of
1038 * bailing we install the handler, but obviously we
1039 * cannot enable/startup the interrupt at this point.
1040 */
1041 while (irq_data) {
1042 if (irq_data->chip != &no_irq_chip)
1043 break;
1044 /*
1045 * Bail out if the outer chip is not set up
1046 * and the interrupt supposed to be started
1047 * right away.
1048 */
1049 if (WARN_ON(is_chained))
1050 return;
1051 /* Try the parent */
1052 irq_data = irq_data->parent_data;
1053 }
1054#endif
1055 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
1056 return;
1057 }
1058
1059 /* Uninstall? */
1060 if (handle == handle_bad_irq) {
1061 if (desc->irq_data.chip != &no_irq_chip)
1062 mask_ack_irq(desc);
1063 irq_state_set_disabled(desc);
1064 if (is_chained)
1065 desc->action = NULL;
1066 desc->depth = 1;
1067 }
1068 desc->handle_irq = handle;
1069 desc->name = name;
1070
1071 if (handle != handle_bad_irq && is_chained) {
1072 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
1073
1074 /*
1075 * We're about to start this interrupt immediately,
1076 * hence the need to set the trigger configuration.
1077 * But the .set_type callback may have overridden the
1078 * flow handler, ignoring that we're dealing with a
1079 * chained interrupt. Reset it immediately because we
1080 * do know better.
1081 */
1082 if (type != IRQ_TYPE_NONE) {
1083 __irq_set_trigger(desc, type);
1084 desc->handle_irq = handle;
1085 }
1086
1087 irq_settings_set_noprobe(desc);
1088 irq_settings_set_norequest(desc);
1089 irq_settings_set_nothread(desc);
1090 desc->action = &chained_action;
1091 irq_activate_and_startup(desc, IRQ_RESEND);
1092 }
1093}
1094
1095void
1096__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
1097 const char *name)
1098{
1099 unsigned long flags;
1100 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1101
1102 if (!desc)
1103 return;
1104
1105 __irq_do_set_handler(desc, handle, is_chained, name);
1106 irq_put_desc_busunlock(desc, flags);
1107}
1108EXPORT_SYMBOL_GPL(__irq_set_handler);
1109
1110void
1111irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1112 void *data)
1113{
1114 unsigned long flags;
1115 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1116
1117 if (!desc)
1118 return;
1119
1120 desc->irq_common_data.handler_data = data;
1121 __irq_do_set_handler(desc, handle, 1, NULL);
1122
1123 irq_put_desc_busunlock(desc, flags);
1124}
1125EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1126
1127void
1128irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
1129 irq_flow_handler_t handle, const char *name)
1130{
1131 irq_set_chip(irq, chip);
1132 __irq_set_handler(irq, handle, 0, name);
1133}
1134EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1135
1136void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1137{
1138 unsigned long flags, trigger, tmp;
1139 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1140
1141 if (!desc)
1142 return;
1143
1144 /*
1145 * Warn when a driver sets the no autoenable flag on an already
1146 * active interrupt.
1147 */
1148 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1149
1150 irq_settings_clr_and_set(desc, clr, set);
1151
1152 trigger = irqd_get_trigger_type(&desc->irq_data);
1153
1154 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1155 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1156 if (irq_settings_has_no_balance_set(desc))
1157 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1158 if (irq_settings_is_per_cpu(desc))
1159 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1160 if (irq_settings_can_move_pcntxt(desc))
1161 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1162 if (irq_settings_is_level(desc))
1163 irqd_set(&desc->irq_data, IRQD_LEVEL);
1164
1165 tmp = irq_settings_get_trigger_mask(desc);
1166 if (tmp != IRQ_TYPE_NONE)
1167 trigger = tmp;
1168
1169 irqd_set(&desc->irq_data, trigger);
1170
1171 irq_put_desc_unlock(desc, flags);
1172}
1173EXPORT_SYMBOL_GPL(irq_modify_status);
1174
1175/**
1176 * irq_cpu_online - Invoke all irq_cpu_online functions.
1177 *
1178 * Iterate through all irqs and invoke the chip.irq_cpu_online()
1179 * for each.
1180 */
1181void irq_cpu_online(void)
1182{
1183 struct irq_desc *desc;
1184 struct irq_chip *chip;
1185 unsigned long flags;
1186 unsigned int irq;
1187
1188 for_each_active_irq(irq) {
1189 desc = irq_to_desc(irq);
1190 if (!desc)
1191 continue;
1192
1193 raw_spin_lock_irqsave(&desc->lock, flags);
1194
1195 chip = irq_data_get_irq_chip(&desc->irq_data);
1196 if (chip && chip->irq_cpu_online &&
1197 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1198 !irqd_irq_disabled(&desc->irq_data)))
1199 chip->irq_cpu_online(&desc->irq_data);
1200
1201 raw_spin_unlock_irqrestore(&desc->lock, flags);
1202 }
1203}
1204
1205/**
1206 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
1207 *
1208 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
1209 * for each.
1210 */
1211void irq_cpu_offline(void)
1212{
1213 struct irq_desc *desc;
1214 struct irq_chip *chip;
1215 unsigned long flags;
1216 unsigned int irq;
1217
1218 for_each_active_irq(irq) {
1219 desc = irq_to_desc(irq);
1220 if (!desc)
1221 continue;
1222
1223 raw_spin_lock_irqsave(&desc->lock, flags);
1224
1225 chip = irq_data_get_irq_chip(&desc->irq_data);
1226 if (chip && chip->irq_cpu_offline &&
1227 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1228 !irqd_irq_disabled(&desc->irq_data)))
1229 chip->irq_cpu_offline(&desc->irq_data);
1230
1231 raw_spin_unlock_irqrestore(&desc->lock, flags);
1232 }
1233}
1234
1235#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1236
1237#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1238/**
1239 * handle_fasteoi_ack_irq - irq handler for edge hierarchy
1240 * stacked on transparent controllers
1241 *
1242 * @desc: the interrupt description structure for this irq
1243 *
1244 * Like handle_fasteoi_irq(), but for use with hierarchy where
1245 * the irq_chip also needs to have its ->irq_ack() function
1246 * called.
1247 */
1248void handle_fasteoi_ack_irq(struct irq_desc *desc)
1249{
1250 struct irq_chip *chip = desc->irq_data.chip;
1251
1252 raw_spin_lock(&desc->lock);
1253
1254 if (!irq_may_run(desc))
1255 goto out;
1256
1257 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1258
1259 /*
1260 * If its disabled or no action available
1261 * then mask it and get out of here:
1262 */
1263 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1264 desc->istate |= IRQS_PENDING;
1265 mask_irq(desc);
1266 goto out;
1267 }
1268
1269 kstat_incr_irqs_this_cpu(desc);
1270 if (desc->istate & IRQS_ONESHOT)
1271 mask_irq(desc);
1272
1273 /* Start handling the irq */
1274 desc->irq_data.chip->irq_ack(&desc->irq_data);
1275
1276 preflow_handler(desc);
1277 handle_irq_event(desc);
1278
1279 cond_unmask_eoi_irq(desc, chip);
1280
1281 raw_spin_unlock(&desc->lock);
1282 return;
1283out:
1284 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1285 chip->irq_eoi(&desc->irq_data);
1286 raw_spin_unlock(&desc->lock);
1287}
1288EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1289
1290/**
1291 * handle_fasteoi_mask_irq - irq handler for level hierarchy
1292 * stacked on transparent controllers
1293 *
1294 * @desc: the interrupt description structure for this irq
1295 *
1296 * Like handle_fasteoi_irq(), but for use with hierarchy where
1297 * the irq_chip also needs to have its ->irq_mask_ack() function
1298 * called.
1299 */
1300void handle_fasteoi_mask_irq(struct irq_desc *desc)
1301{
1302 struct irq_chip *chip = desc->irq_data.chip;
1303
1304 raw_spin_lock(&desc->lock);
1305 mask_ack_irq(desc);
1306
1307 if (!irq_may_run(desc))
1308 goto out;
1309
1310 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1311
1312 /*
1313 * If its disabled or no action available
1314 * then mask it and get out of here:
1315 */
1316 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1317 desc->istate |= IRQS_PENDING;
1318 mask_irq(desc);
1319 goto out;
1320 }
1321
1322 kstat_incr_irqs_this_cpu(desc);
1323 if (desc->istate & IRQS_ONESHOT)
1324 mask_irq(desc);
1325
1326 preflow_handler(desc);
1327 handle_irq_event(desc);
1328
1329 cond_unmask_eoi_irq(desc, chip);
1330
1331 raw_spin_unlock(&desc->lock);
1332 return;
1333out:
1334 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1335 chip->irq_eoi(&desc->irq_data);
1336 raw_spin_unlock(&desc->lock);
1337}
1338EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1339
1340#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1341
1342/**
1343 * irq_chip_set_parent_state - set the state of a parent interrupt.
1344 *
1345 * @data: Pointer to interrupt specific data
1346 * @which: State to be restored (one of IRQCHIP_STATE_*)
1347 * @val: Value corresponding to @which
1348 *
1349 * Conditional success, if the underlying irqchip does not implement it.
1350 */
1351int irq_chip_set_parent_state(struct irq_data *data,
1352 enum irqchip_irq_state which,
1353 bool val)
1354{
1355 data = data->parent_data;
1356
1357 if (!data || !data->chip->irq_set_irqchip_state)
1358 return 0;
1359
1360 return data->chip->irq_set_irqchip_state(data, which, val);
1361}
1362EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
1363
1364/**
1365 * irq_chip_get_parent_state - get the state of a parent interrupt.
1366 *
1367 * @data: Pointer to interrupt specific data
1368 * @which: one of IRQCHIP_STATE_* the caller wants to know
1369 * @state: a pointer to a boolean where the state is to be stored
1370 *
1371 * Conditional success, if the underlying irqchip does not implement it.
1372 */
1373int irq_chip_get_parent_state(struct irq_data *data,
1374 enum irqchip_irq_state which,
1375 bool *state)
1376{
1377 data = data->parent_data;
1378
1379 if (!data || !data->chip->irq_get_irqchip_state)
1380 return 0;
1381
1382 return data->chip->irq_get_irqchip_state(data, which, state);
1383}
1384EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
1385
1386/**
1387 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1388 * NULL)
1389 * @data: Pointer to interrupt specific data
1390 */
1391void irq_chip_enable_parent(struct irq_data *data)
1392{
1393 data = data->parent_data;
1394 if (data->chip->irq_enable)
1395 data->chip->irq_enable(data);
1396 else
1397 data->chip->irq_unmask(data);
1398}
1399EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1400
1401/**
1402 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1403 * NULL)
1404 * @data: Pointer to interrupt specific data
1405 */
1406void irq_chip_disable_parent(struct irq_data *data)
1407{
1408 data = data->parent_data;
1409 if (data->chip->irq_disable)
1410 data->chip->irq_disable(data);
1411 else
1412 data->chip->irq_mask(data);
1413}
1414EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1415
1416/**
1417 * irq_chip_ack_parent - Acknowledge the parent interrupt
1418 * @data: Pointer to interrupt specific data
1419 */
1420void irq_chip_ack_parent(struct irq_data *data)
1421{
1422 data = data->parent_data;
1423 data->chip->irq_ack(data);
1424}
1425EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1426
1427/**
1428 * irq_chip_mask_parent - Mask the parent interrupt
1429 * @data: Pointer to interrupt specific data
1430 */
1431void irq_chip_mask_parent(struct irq_data *data)
1432{
1433 data = data->parent_data;
1434 data->chip->irq_mask(data);
1435}
1436EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1437
1438/**
1439 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1440 * @data: Pointer to interrupt specific data
1441 */
1442void irq_chip_mask_ack_parent(struct irq_data *data)
1443{
1444 data = data->parent_data;
1445 data->chip->irq_mask_ack(data);
1446}
1447EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
1448
1449/**
1450 * irq_chip_unmask_parent - Unmask the parent interrupt
1451 * @data: Pointer to interrupt specific data
1452 */
1453void irq_chip_unmask_parent(struct irq_data *data)
1454{
1455 data = data->parent_data;
1456 data->chip->irq_unmask(data);
1457}
1458EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1459
1460/**
1461 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1462 * @data: Pointer to interrupt specific data
1463 */
1464void irq_chip_eoi_parent(struct irq_data *data)
1465{
1466 data = data->parent_data;
1467 data->chip->irq_eoi(data);
1468}
1469EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1470
1471/**
1472 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1473 * @data: Pointer to interrupt specific data
1474 * @dest: The affinity mask to set
1475 * @force: Flag to enforce setting (disable online checks)
1476 *
1477 * Conditinal, as the underlying parent chip might not implement it.
1478 */
1479int irq_chip_set_affinity_parent(struct irq_data *data,
1480 const struct cpumask *dest, bool force)
1481{
1482 data = data->parent_data;
1483 if (data->chip->irq_set_affinity)
1484 return data->chip->irq_set_affinity(data, dest, force);
1485
1486 return -ENOSYS;
1487}
1488EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1489
1490/**
1491 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1492 * @data: Pointer to interrupt specific data
1493 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1494 *
1495 * Conditional, as the underlying parent chip might not implement it.
1496 */
1497int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1498{
1499 data = data->parent_data;
1500
1501 if (data->chip->irq_set_type)
1502 return data->chip->irq_set_type(data, type);
1503
1504 return -ENOSYS;
1505}
1506EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1507
1508/**
1509 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1510 * @data: Pointer to interrupt specific data
1511 *
1512 * Iterate through the domain hierarchy of the interrupt and check
1513 * whether a hw retrigger function exists. If yes, invoke it.
1514 */
1515int irq_chip_retrigger_hierarchy(struct irq_data *data)
1516{
1517 for (data = data->parent_data; data; data = data->parent_data)
1518 if (data->chip && data->chip->irq_retrigger)
1519 return data->chip->irq_retrigger(data);
1520
1521 return 0;
1522}
1523EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy);
1524
1525/**
1526 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1527 * @data: Pointer to interrupt specific data
1528 * @vcpu_info: The vcpu affinity information
1529 */
1530int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1531{
1532 data = data->parent_data;
1533 if (data->chip->irq_set_vcpu_affinity)
1534 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1535
1536 return -ENOSYS;
1537}
1538EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent);
1539/**
1540 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1541 * @data: Pointer to interrupt specific data
1542 * @on: Whether to set or reset the wake-up capability of this irq
1543 *
1544 * Conditional, as the underlying parent chip might not implement it.
1545 */
1546int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1547{
1548 data = data->parent_data;
1549
1550 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1551 return 0;
1552
1553 if (data->chip->irq_set_wake)
1554 return data->chip->irq_set_wake(data, on);
1555
1556 return -ENOSYS;
1557}
1558EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
1559
1560/**
1561 * irq_chip_request_resources_parent - Request resources on the parent interrupt
1562 * @data: Pointer to interrupt specific data
1563 */
1564int irq_chip_request_resources_parent(struct irq_data *data)
1565{
1566 data = data->parent_data;
1567
1568 if (data->chip->irq_request_resources)
1569 return data->chip->irq_request_resources(data);
1570
1571 /* no error on missing optional irq_chip::irq_request_resources */
1572 return 0;
1573}
1574EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
1575
1576/**
1577 * irq_chip_release_resources_parent - Release resources on the parent interrupt
1578 * @data: Pointer to interrupt specific data
1579 */
1580void irq_chip_release_resources_parent(struct irq_data *data)
1581{
1582 data = data->parent_data;
1583 if (data->chip->irq_release_resources)
1584 data->chip->irq_release_resources(data);
1585}
1586EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
1587#endif
1588
1589/**
1590 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1591 * @data: Pointer to interrupt specific data
1592 * @msg: Pointer to the MSI message
1593 *
1594 * For hierarchical domains we find the first chip in the hierarchy
1595 * which implements the irq_compose_msi_msg callback. For non
1596 * hierarchical we use the top level chip.
1597 */
1598int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1599{
1600 struct irq_data *pos = NULL;
1601
1602#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1603 for (; data; data = data->parent_data)
1604#endif
1605 if (data->chip && data->chip->irq_compose_msi_msg)
1606 pos = data;
1607 if (!pos)
1608 return -ENOSYS;
1609
1610 pos->chip->irq_compose_msi_msg(pos, msg);
1611
1612 return 0;
1613}
1614
1615static struct device *irq_get_parent_device(struct irq_data *data)
1616{
1617 if (data->chip->parent_device)
1618 return data->chip->parent_device;
1619
1620 if (data->domain)
1621 return data->domain->dev;
1622
1623 return NULL;
1624}
1625
1626/**
1627 * irq_chip_pm_get - Enable power for an IRQ chip
1628 * @data: Pointer to interrupt specific data
1629 *
1630 * Enable the power to the IRQ chip referenced by the interrupt data
1631 * structure.
1632 */
1633int irq_chip_pm_get(struct irq_data *data)
1634{
1635 struct device *dev = irq_get_parent_device(data);
1636 int retval;
1637
1638 if (IS_ENABLED(CONFIG_PM) && dev) {
1639 retval = pm_runtime_get_sync(dev);
1640 if (retval < 0) {
1641 pm_runtime_put_noidle(dev);
1642 return retval;
1643 }
1644 }
1645
1646 return 0;
1647}
1648
1649/**
1650 * irq_chip_pm_put - Disable power for an IRQ chip
1651 * @data: Pointer to interrupt specific data
1652 *
1653 * Disable the power to the IRQ chip referenced by the interrupt data
1654 * structure, belongs. Note that power will only be disabled, once this
1655 * function has been called for all IRQs that have called irq_chip_pm_get().
1656 */
1657int irq_chip_pm_put(struct irq_data *data)
1658{
1659 struct device *dev = irq_get_parent_device(data);
1660 int retval = 0;
1661
1662 if (IS_ENABLED(CONFIG_PM) && dev)
1663 retval = pm_runtime_put(dev);
1664
1665 return (retval < 0) ? retval : 0;
1666}