blob: 9dad45d928bfec7aae0c16abfbdfcf112db928eb [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 SiFive
4 * Copyright (C) 2018 Christoph Hellwig
5 */
6#define pr_fmt(fmt) "plic: " fmt
7#include <linux/interrupt.h>
8#include <linux/io.h>
9#include <linux/irq.h>
10#include <linux/irqchip.h>
11#include <linux/irqdomain.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
15#include <linux/of_irq.h>
16#include <linux/platform_device.h>
17#include <linux/spinlock.h>
18#include <asm/smp.h>
19
20/*
21 * This driver implements a version of the RISC-V PLIC with the actual layout
22 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
23 *
24 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
25 *
26 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
27 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
28 * Spec.
29 */
30
31#define MAX_DEVICES 1024
32#define MAX_CONTEXTS 15872
33
34/*
35 * Each interrupt source has a priority register associated with it.
36 * We always hardwire it to one in Linux.
37 */
38#define PRIORITY_BASE 0
39#define PRIORITY_PER_ID 4
40
41/*
42 * Each hart context has a vector of interrupt enable bits associated with it.
43 * There's one bit for each interrupt source.
44 */
45#define ENABLE_BASE 0x2000
46#define ENABLE_PER_HART 0x80
47
48/*
49 * Each hart context has a set of control registers associated with it. Right
50 * now there's only two: a source priority threshold over which the hart will
51 * take an interrupt, and a register to claim interrupts.
52 */
53#define CONTEXT_BASE 0x200000
54#define CONTEXT_PER_HART 0x1000
55#define CONTEXT_THRESHOLD 0x00
56#define CONTEXT_CLAIM 0x04
57
58static void __iomem *plic_regs;
59
60struct plic_handler {
61 bool present;
62 void __iomem *hart_base;
63 /*
64 * Protect mask operations on the registers given that we can't
65 * assume atomic memory operations work on them.
66 */
67 raw_spinlock_t enable_lock;
68 void __iomem *enable_base;
69};
70static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
71
72static inline void plic_toggle(struct plic_handler *handler,
73 int hwirq, int enable)
74{
75 u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
76 u32 hwirq_mask = 1 << (hwirq % 32);
77
78 raw_spin_lock(&handler->enable_lock);
79 if (enable)
80 writel(readl(reg) | hwirq_mask, reg);
81 else
82 writel(readl(reg) & ~hwirq_mask, reg);
83 raw_spin_unlock(&handler->enable_lock);
84}
85
86static inline void plic_irq_toggle(const struct cpumask *mask,
87 int hwirq, int enable)
88{
89 int cpu;
90
91 writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
92 for_each_cpu(cpu, mask) {
93 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
94
95 if (handler->present)
96 plic_toggle(handler, hwirq, enable);
97 }
98}
99
100static void plic_irq_unmask(struct irq_data *d)
101{
102 unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
103 cpu_online_mask);
104 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
105 return;
106 plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
107}
108
109static void plic_irq_mask(struct irq_data *d)
110{
111 plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
112}
113
114#ifdef CONFIG_SMP
115static int plic_set_affinity(struct irq_data *d,
116 const struct cpumask *mask_val, bool force)
117{
118 unsigned int cpu;
119
120 if (force)
121 cpu = cpumask_first(mask_val);
122 else
123 cpu = cpumask_any_and(mask_val, cpu_online_mask);
124
125 if (cpu >= nr_cpu_ids)
126 return -EINVAL;
127
128 plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
129 plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
130
131 irq_data_update_effective_affinity(d, cpumask_of(cpu));
132
133 return IRQ_SET_MASK_OK_DONE;
134}
135#endif
136
137static void plic_irq_eoi(struct irq_data *d)
138{
139 struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
140
141 if (irqd_irq_masked(d)) {
142 plic_irq_unmask(d);
143 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
144 plic_irq_mask(d);
145 } else {
146 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
147 }
148}
149
150static struct irq_chip plic_chip = {
151 .name = "SiFive PLIC",
152 .irq_mask = plic_irq_mask,
153 .irq_unmask = plic_irq_unmask,
154 .irq_eoi = plic_irq_eoi,
155#ifdef CONFIG_SMP
156 .irq_set_affinity = plic_set_affinity,
157#endif
158};
159
160static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
161 irq_hw_number_t hwirq)
162{
163 irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
164 irq_set_chip_data(irq, NULL);
165 irq_set_noprobe(irq);
166 return 0;
167}
168
169static const struct irq_domain_ops plic_irqdomain_ops = {
170 .map = plic_irqdomain_map,
171 .xlate = irq_domain_xlate_onecell,
172};
173
174static struct irq_domain *plic_irqdomain;
175
176/*
177 * Handling an interrupt is a two-step process: first you claim the interrupt
178 * by reading the claim register, then you complete the interrupt by writing
179 * that source ID back to the same claim register. This automatically enables
180 * and disables the interrupt, so there's nothing else to do.
181 */
182static void plic_handle_irq(struct pt_regs *regs)
183{
184 struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
185 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
186 irq_hw_number_t hwirq;
187
188 WARN_ON_ONCE(!handler->present);
189
190 csr_clear(sie, SIE_SEIE);
191 while ((hwirq = readl(claim))) {
192 int irq = irq_find_mapping(plic_irqdomain, hwirq);
193
194 if (unlikely(irq <= 0))
195 pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
196 hwirq);
197 else
198 generic_handle_irq(irq);
199 }
200 csr_set(sie, SIE_SEIE);
201}
202
203/*
204 * Walk up the DT tree until we find an active RISC-V core (HART) node and
205 * extract the cpuid from it.
206 */
207static int plic_find_hart_id(struct device_node *node)
208{
209 for (; node; node = node->parent) {
210 if (of_device_is_compatible(node, "riscv"))
211 return riscv_of_processor_hartid(node);
212 }
213
214 return -1;
215}
216
217static int __init plic_init(struct device_node *node,
218 struct device_node *parent)
219{
220 int error = 0, nr_contexts, nr_handlers = 0, i;
221 u32 nr_irqs;
222
223 if (plic_regs) {
224 pr_warn("PLIC already present.\n");
225 return -ENXIO;
226 }
227
228 plic_regs = of_iomap(node, 0);
229 if (WARN_ON(!plic_regs))
230 return -EIO;
231
232 error = -EINVAL;
233 of_property_read_u32(node, "riscv,ndev", &nr_irqs);
234 if (WARN_ON(!nr_irqs))
235 goto out_iounmap;
236
237 nr_contexts = of_irq_count(node);
238 if (WARN_ON(!nr_contexts))
239 goto out_iounmap;
240 if (WARN_ON(nr_contexts < num_possible_cpus()))
241 goto out_iounmap;
242
243 error = -ENOMEM;
244 plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
245 &plic_irqdomain_ops, NULL);
246 if (WARN_ON(!plic_irqdomain))
247 goto out_iounmap;
248
249 for (i = 0; i < nr_contexts; i++) {
250 struct of_phandle_args parent;
251 struct plic_handler *handler;
252 irq_hw_number_t hwirq;
253 int cpu, hartid;
254 u32 threshold = 0;
255
256 if (of_irq_parse_one(node, i, &parent)) {
257 pr_err("failed to parse parent for context %d.\n", i);
258 continue;
259 }
260
261 /* skip contexts other than supervisor external interrupt */
262 if (parent.args[0] != IRQ_S_EXT)
263 continue;
264
265 hartid = plic_find_hart_id(parent.np);
266 if (hartid < 0) {
267 pr_warn("failed to parse hart ID for context %d.\n", i);
268 continue;
269 }
270
271 cpu = riscv_hartid_to_cpuid(hartid);
272 if (cpu < 0) {
273 pr_warn("Invalid cpuid for context %d\n", i);
274 continue;
275 }
276
277 /*
278 * When running in M-mode we need to ignore the S-mode handler.
279 * Here we assume it always comes later, but that might be a
280 * little fragile.
281 */
282 handler = per_cpu_ptr(&plic_handlers, cpu);
283 if (handler->present) {
284 pr_warn("handler already present for context %d.\n", i);
285 threshold = 0xffffffff;
286 goto done;
287 }
288
289 handler->present = true;
290 handler->hart_base =
291 plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
292 raw_spin_lock_init(&handler->enable_lock);
293 handler->enable_base =
294 plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
295
296done:
297 /* priority must be > threshold to trigger an interrupt */
298 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
299 for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
300 plic_toggle(handler, hwirq, 0);
301 nr_handlers++;
302 }
303
304 pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
305 nr_irqs, nr_handlers, nr_contexts);
306 set_handle_irq(plic_handle_irq);
307 return 0;
308
309out_iounmap:
310 iounmap(plic_regs);
311 return error;
312}
313
314IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
315IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
316IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */