b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c |
| 4 | * |
| 5 | * This file define the irq handler for MSP PER subsystem interrupts. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/interrupt.h> |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/spinlock.h> |
| 12 | #include <linux/bitops.h> |
| 13 | |
| 14 | #include <asm/mipsregs.h> |
| 15 | |
| 16 | #include <msp_cic_int.h> |
| 17 | #include <msp_regs.h> |
| 18 | |
| 19 | |
| 20 | /* |
| 21 | * Convenience Macro. Should be somewhere generic. |
| 22 | */ |
| 23 | #define get_current_vpe() \ |
| 24 | ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE) |
| 25 | |
| 26 | #ifdef CONFIG_SMP |
| 27 | /* |
| 28 | * The PER registers must be protected from concurrent access. |
| 29 | */ |
| 30 | |
| 31 | static DEFINE_SPINLOCK(per_lock); |
| 32 | #endif |
| 33 | |
| 34 | /* ensure writes to per are completed */ |
| 35 | |
| 36 | static inline void per_wmb(void) |
| 37 | { |
| 38 | const volatile void __iomem *per_mem = PER_INT_MSK_REG; |
| 39 | volatile u32 dummy_read; |
| 40 | |
| 41 | wmb(); |
| 42 | dummy_read = __raw_readl(per_mem); |
| 43 | dummy_read++; |
| 44 | } |
| 45 | |
| 46 | static inline void unmask_per_irq(struct irq_data *d) |
| 47 | { |
| 48 | #ifdef CONFIG_SMP |
| 49 | unsigned long flags; |
| 50 | spin_lock_irqsave(&per_lock, flags); |
| 51 | *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE)); |
| 52 | spin_unlock_irqrestore(&per_lock, flags); |
| 53 | #else |
| 54 | *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE)); |
| 55 | #endif |
| 56 | per_wmb(); |
| 57 | } |
| 58 | |
| 59 | static inline void mask_per_irq(struct irq_data *d) |
| 60 | { |
| 61 | #ifdef CONFIG_SMP |
| 62 | unsigned long flags; |
| 63 | spin_lock_irqsave(&per_lock, flags); |
| 64 | *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE)); |
| 65 | spin_unlock_irqrestore(&per_lock, flags); |
| 66 | #else |
| 67 | *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE)); |
| 68 | #endif |
| 69 | per_wmb(); |
| 70 | } |
| 71 | |
| 72 | static inline void msp_per_irq_ack(struct irq_data *d) |
| 73 | { |
| 74 | mask_per_irq(d); |
| 75 | /* |
| 76 | * In the PER interrupt controller, only bits 11 and 10 |
| 77 | * are write-to-clear, (SPI TX complete, SPI RX complete). |
| 78 | * It does nothing for any others. |
| 79 | */ |
| 80 | *PER_INT_STS_REG = (1 << (d->irq - MSP_PER_INTBASE)); |
| 81 | } |
| 82 | |
| 83 | #ifdef CONFIG_SMP |
| 84 | static int msp_per_irq_set_affinity(struct irq_data *d, |
| 85 | const struct cpumask *affinity, bool force) |
| 86 | { |
| 87 | /* WTF is this doing ????? */ |
| 88 | unmask_per_irq(d); |
| 89 | return 0; |
| 90 | } |
| 91 | #endif |
| 92 | |
| 93 | static struct irq_chip msp_per_irq_controller = { |
| 94 | .name = "MSP_PER", |
| 95 | .irq_enable = unmask_per_irq, |
| 96 | .irq_disable = mask_per_irq, |
| 97 | .irq_ack = msp_per_irq_ack, |
| 98 | #ifdef CONFIG_SMP |
| 99 | .irq_set_affinity = msp_per_irq_set_affinity, |
| 100 | #endif |
| 101 | }; |
| 102 | |
| 103 | void __init msp_per_irq_init(void) |
| 104 | { |
| 105 | int i; |
| 106 | /* Mask/clear interrupts. */ |
| 107 | *PER_INT_MSK_REG = 0x00000000; |
| 108 | *PER_INT_STS_REG = 0xFFFFFFFF; |
| 109 | /* initialize all the IRQ descriptors */ |
| 110 | for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) { |
| 111 | irq_set_chip(i, &msp_per_irq_controller); |
| 112 | } |
| 113 | } |
| 114 | |
| 115 | void msp_per_irq_dispatch(void) |
| 116 | { |
| 117 | u32 per_mask = *PER_INT_MSK_REG; |
| 118 | u32 per_status = *PER_INT_STS_REG; |
| 119 | u32 pending; |
| 120 | |
| 121 | pending = per_status & per_mask; |
| 122 | if (pending) { |
| 123 | do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1); |
| 124 | } else { |
| 125 | spurious_interrupt(); |
| 126 | } |
| 127 | } |