yuezonghe | 824eb0c | 2024-06-27 02:32:26 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/kernel/irq.c |
| 3 | * |
| 4 | * Copyright (C) 1992 Linus Torvalds |
| 5 | * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. |
| 6 | * |
| 7 | * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. |
| 8 | * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and |
| 9 | * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License version 2 as |
| 13 | * published by the Free Software Foundation. |
| 14 | * |
| 15 | * This file contains the code used by various IRQ handling routines: |
| 16 | * asking for different IRQ's should be done through these routines |
| 17 | * instead of just grabbing them. Thus setups with different IRQ numbers |
| 18 | * shouldn't result in any weird surprises, and installing new handlers |
| 19 | * should be easier. |
| 20 | * |
| 21 | * IRQ's are in fact implemented a bit like signal handlers for the kernel. |
| 22 | * Naturally it's not a 1:1 relation, but there are similarities. |
| 23 | */ |
| 24 | #include <linux/kernel_stat.h> |
| 25 | #include <linux/signal.h> |
| 26 | #include <linux/ioport.h> |
| 27 | #include <linux/interrupt.h> |
| 28 | #include <linux/irq.h> |
| 29 | #include <linux/random.h> |
| 30 | #include <linux/smp.h> |
| 31 | #include <linux/init.h> |
| 32 | #include <linux/seq_file.h> |
| 33 | #include <linux/errno.h> |
| 34 | #include <linux/list.h> |
| 35 | #include <linux/kallsyms.h> |
| 36 | #include <linux/proc_fs.h> |
| 37 | |
| 38 | #include <asm/exception.h> |
| 39 | #include <asm/mach/arch.h> |
| 40 | #include <asm/mach/irq.h> |
| 41 | #include <asm/mach/time.h> |
| 42 | |
| 43 | /* |
| 44 | * No architecture-specific irq_finish function defined in arm/arch/irqs.h. |
| 45 | */ |
| 46 | #ifndef irq_finish |
| 47 | #define irq_finish(irq) do { } while (0) |
| 48 | #endif |
| 49 | |
| 50 | unsigned long irq_err_count; |
| 51 | |
| 52 | int arch_show_interrupts(struct seq_file *p, int prec) |
| 53 | { |
| 54 | #ifdef CONFIG_FIQ |
| 55 | show_fiq_list(p, prec); |
| 56 | #endif |
| 57 | #ifdef CONFIG_SMP |
| 58 | show_ipi_list(p, prec); |
| 59 | #endif |
| 60 | seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); |
| 61 | return 0; |
| 62 | } |
| 63 | |
| 64 | /* |
| 65 | * handle_IRQ handles all hardware IRQ's. Decoded IRQs should |
| 66 | * not come via this function. Instead, they should provide their |
| 67 | * own 'handler'. Used by platform code implementing C-based 1st |
| 68 | * level decoding. |
| 69 | */ |
| 70 | void handle_IRQ(unsigned int irq, struct pt_regs *regs) |
| 71 | { |
| 72 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 73 | |
| 74 | irq_enter(); |
| 75 | zxic_trace_irq_enter(irq); |
| 76 | |
| 77 | /* |
| 78 | * Some hardware gives randomly wrong interrupts. Rather |
| 79 | * than crashing, do something sensible. |
| 80 | */ |
| 81 | if (unlikely(irq >= nr_irqs)) { |
| 82 | if (printk_ratelimit()) |
| 83 | printk(KERN_WARNING "Bad IRQ%u\n", irq); |
| 84 | ack_bad_irq(irq); |
| 85 | } else { |
| 86 | generic_handle_irq(irq); |
| 87 | } |
| 88 | |
| 89 | /* AT91 specific workaround */ |
| 90 | irq_finish(irq); |
| 91 | |
| 92 | zxic_trace_irq_exit(irq); |
| 93 | irq_exit(); |
| 94 | set_irq_regs(old_regs); |
| 95 | } |
| 96 | |
| 97 | /* |
| 98 | * asm_do_IRQ is the interface to be used from assembly code. |
| 99 | */ |
| 100 | asmlinkage void __exception_irq_entry |
| 101 | asm_do_IRQ(unsigned int irq, struct pt_regs *regs) |
| 102 | { |
| 103 | handle_IRQ(irq, regs); |
| 104 | } |
| 105 | |
| 106 | void set_irq_flags(unsigned int irq, unsigned int iflags) |
| 107 | { |
| 108 | unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; |
| 109 | |
| 110 | if (irq >= nr_irqs) { |
| 111 | printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); |
| 112 | return; |
| 113 | } |
| 114 | |
| 115 | if (iflags & IRQF_VALID) |
| 116 | clr |= IRQ_NOREQUEST; |
| 117 | if (iflags & IRQF_PROBE) |
| 118 | clr |= IRQ_NOPROBE; |
| 119 | if (!(iflags & IRQF_NOAUTOEN)) |
| 120 | clr |= IRQ_NOAUTOEN; |
| 121 | /* Order is clear bits in "clr" then set bits in "set" */ |
| 122 | irq_modify_status(irq, clr, set & ~clr); |
| 123 | } |
| 124 | |
| 125 | void __init init_IRQ(void) |
| 126 | { |
| 127 | machine_desc->init_irq(); |
| 128 | } |
| 129 | |
| 130 | #ifdef CONFIG_SPARSE_IRQ |
| 131 | int __init arch_probe_nr_irqs(void) |
| 132 | { |
| 133 | nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS; |
| 134 | return nr_irqs; |
| 135 | } |
| 136 | #endif |
| 137 | |
| 138 | #ifdef CONFIG_HOTPLUG_CPU |
| 139 | |
| 140 | static bool migrate_one_irq(struct irq_desc *desc) |
| 141 | { |
| 142 | struct irq_data *d = irq_desc_get_irq_data(desc); |
| 143 | const struct cpumask *affinity = d->affinity; |
| 144 | struct irq_chip *c; |
| 145 | bool ret = false; |
| 146 | |
| 147 | /* |
| 148 | * If this is a per-CPU interrupt, or the affinity does not |
| 149 | * include this CPU, then we have nothing to do. |
| 150 | */ |
| 151 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) |
| 152 | return false; |
| 153 | |
| 154 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
| 155 | affinity = cpu_online_mask; |
| 156 | ret = true; |
| 157 | } |
| 158 | |
| 159 | c = irq_data_get_irq_chip(d); |
| 160 | if (!c->irq_set_affinity) |
| 161 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); |
| 162 | else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) |
| 163 | cpumask_copy(d->affinity, affinity); |
| 164 | |
| 165 | return ret; |
| 166 | } |
| 167 | |
| 168 | /* |
| 169 | * The current CPU has been marked offline. Migrate IRQs off this CPU. |
| 170 | * If the affinity settings do not allow other CPUs, force them onto any |
| 171 | * available CPU. |
| 172 | * |
| 173 | * Note: we must iterate over all IRQs, whether they have an attached |
| 174 | * action structure or not, as we need to get chained interrupts too. |
| 175 | */ |
| 176 | void migrate_irqs(void) |
| 177 | { |
| 178 | unsigned int i; |
| 179 | struct irq_desc *desc; |
| 180 | unsigned long flags; |
| 181 | |
| 182 | local_irq_save(flags); |
| 183 | |
| 184 | for_each_irq_desc(i, desc) { |
| 185 | bool affinity_broken; |
| 186 | |
| 187 | raw_spin_lock(&desc->lock); |
| 188 | affinity_broken = migrate_one_irq(desc); |
| 189 | raw_spin_unlock(&desc->lock); |
| 190 | |
| 191 | if (affinity_broken && printk_ratelimit()) |
| 192 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, |
| 193 | smp_processor_id()); |
| 194 | } |
| 195 | |
| 196 | local_irq_restore(flags); |
| 197 | } |
| 198 | #endif /* CONFIG_HOTPLUG_CPU */ |