blob: a9f300efce545231b8dcd82b4af6b06854459d0f [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8 */
9#include <linux/bitmap.h>
10#include <linux/clocksource.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/irqchip.h>
15#include <linux/of_address.h>
16#include <linux/percpu.h>
17#include <linux/sched.h>
18#include <linux/smp.h>
19
20#include <asm/mips-cps.h>
21#include <asm/setup.h>
22#include <asm/traps.h>
23
24#include <dt-bindings/interrupt-controller/mips-gic.h>
25
26#define GIC_MAX_INTRS 256
27#define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS)
28
29/* Add 2 to convert GIC CPU pin to core interrupt */
30#define GIC_CPU_PIN_OFFSET 2
31
32/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
33#define GIC_PIN_TO_VEC_OFFSET 1
34
35/* Convert between local/shared IRQ number and GIC HW IRQ number. */
36#define GIC_LOCAL_HWIRQ_BASE 0
37#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
38#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
39#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
40#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
41#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
42
43void __iomem *mips_gic_base;
44
45DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
46
47static DEFINE_SPINLOCK(gic_lock);
48static struct irq_domain *gic_irq_domain;
49static struct irq_domain *gic_ipi_domain;
50static int gic_shared_intrs;
51static int gic_vpes;
52static unsigned int gic_cpu_pin;
53static unsigned int timer_cpu_pin;
54static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
55DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
56DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
57
58static void gic_clear_pcpu_masks(unsigned int intr)
59{
60 unsigned int i;
61
62 /* Clear the interrupt's bit in all pcpu_masks */
63 for_each_possible_cpu(i)
64 clear_bit(intr, per_cpu_ptr(pcpu_masks, i));
65}
66
67static bool gic_local_irq_is_routable(int intr)
68{
69 u32 vpe_ctl;
70
71 /* All local interrupts are routable in EIC mode. */
72 if (cpu_has_veic)
73 return true;
74
75 vpe_ctl = read_gic_vl_ctl();
76 switch (intr) {
77 case GIC_LOCAL_INT_TIMER:
78 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE;
79 case GIC_LOCAL_INT_PERFCTR:
80 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE;
81 case GIC_LOCAL_INT_FDC:
82 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE;
83 case GIC_LOCAL_INT_SWINT0:
84 case GIC_LOCAL_INT_SWINT1:
85 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE;
86 default:
87 return true;
88 }
89}
90
91static void gic_bind_eic_interrupt(int irq, int set)
92{
93 /* Convert irq vector # to hw int # */
94 irq -= GIC_PIN_TO_VEC_OFFSET;
95
96 /* Set irq to use shadow set */
97 write_gic_vl_eic_shadow_set(irq, set);
98}
99
100static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
101{
102 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
103
104 write_gic_wedge(GIC_WEDGE_RW | hwirq);
105}
106
107int gic_get_c0_compare_int(void)
108{
109 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
110 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
111 return irq_create_mapping(gic_irq_domain,
112 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
113}
114
115int gic_get_c0_perfcount_int(void)
116{
117 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
118 /* Is the performance counter shared with the timer? */
119 if (cp0_perfcount_irq < 0)
120 return -1;
121 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
122 }
123 return irq_create_mapping(gic_irq_domain,
124 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
125}
126
127int gic_get_c0_fdc_int(void)
128{
129 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
130 /* Is the FDC IRQ even present? */
131 if (cp0_fdc_irq < 0)
132 return -1;
133 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
134 }
135
136 return irq_create_mapping(gic_irq_domain,
137 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
138}
139
140static void gic_handle_shared_int(bool chained)
141{
142 unsigned int intr, virq;
143 unsigned long *pcpu_mask;
144 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
145
146 /* Get per-cpu bitmaps */
147 pcpu_mask = this_cpu_ptr(pcpu_masks);
148
149 if (mips_cm_is64)
150 __ioread64_copy(pending, addr_gic_pend(),
151 DIV_ROUND_UP(gic_shared_intrs, 64));
152 else
153 __ioread32_copy(pending, addr_gic_pend(),
154 DIV_ROUND_UP(gic_shared_intrs, 32));
155
156 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
157
158 for_each_set_bit(intr, pending, gic_shared_intrs) {
159 virq = irq_linear_revmap(gic_irq_domain,
160 GIC_SHARED_TO_HWIRQ(intr));
161 if (chained)
162 generic_handle_irq(virq);
163 else
164 do_IRQ(virq);
165 }
166}
167
168static void gic_mask_irq(struct irq_data *d)
169{
170 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
171
172 write_gic_rmask(intr);
173 gic_clear_pcpu_masks(intr);
174}
175
176static void gic_unmask_irq(struct irq_data *d)
177{
178 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
179 unsigned int cpu;
180
181 write_gic_smask(intr);
182
183 gic_clear_pcpu_masks(intr);
184 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
185 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
186}
187
188static void gic_ack_irq(struct irq_data *d)
189{
190 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
191
192 write_gic_wedge(irq);
193}
194
195static int gic_set_type(struct irq_data *d, unsigned int type)
196{
197 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
198 unsigned long flags;
199 bool is_edge;
200
201 spin_lock_irqsave(&gic_lock, flags);
202 switch (type & IRQ_TYPE_SENSE_MASK) {
203 case IRQ_TYPE_EDGE_FALLING:
204 change_gic_pol(irq, GIC_POL_FALLING_EDGE);
205 change_gic_trig(irq, GIC_TRIG_EDGE);
206 change_gic_dual(irq, GIC_DUAL_SINGLE);
207 is_edge = true;
208 break;
209 case IRQ_TYPE_EDGE_RISING:
210 change_gic_pol(irq, GIC_POL_RISING_EDGE);
211 change_gic_trig(irq, GIC_TRIG_EDGE);
212 change_gic_dual(irq, GIC_DUAL_SINGLE);
213 is_edge = true;
214 break;
215 case IRQ_TYPE_EDGE_BOTH:
216 /* polarity is irrelevant in this case */
217 change_gic_trig(irq, GIC_TRIG_EDGE);
218 change_gic_dual(irq, GIC_DUAL_DUAL);
219 is_edge = true;
220 break;
221 case IRQ_TYPE_LEVEL_LOW:
222 change_gic_pol(irq, GIC_POL_ACTIVE_LOW);
223 change_gic_trig(irq, GIC_TRIG_LEVEL);
224 change_gic_dual(irq, GIC_DUAL_SINGLE);
225 is_edge = false;
226 break;
227 case IRQ_TYPE_LEVEL_HIGH:
228 default:
229 change_gic_pol(irq, GIC_POL_ACTIVE_HIGH);
230 change_gic_trig(irq, GIC_TRIG_LEVEL);
231 change_gic_dual(irq, GIC_DUAL_SINGLE);
232 is_edge = false;
233 break;
234 }
235
236 if (is_edge)
237 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
238 handle_edge_irq, NULL);
239 else
240 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
241 handle_level_irq, NULL);
242 spin_unlock_irqrestore(&gic_lock, flags);
243
244 return 0;
245}
246
247#ifdef CONFIG_SMP
248static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
249 bool force)
250{
251 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
252 unsigned long flags;
253 unsigned int cpu;
254
255 cpu = cpumask_first_and(cpumask, cpu_online_mask);
256 if (cpu >= NR_CPUS)
257 return -EINVAL;
258
259 /* Assumption : cpumask refers to a single CPU */
260 spin_lock_irqsave(&gic_lock, flags);
261
262 /* Re-route this IRQ */
263 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
264
265 /* Update the pcpu_masks */
266 gic_clear_pcpu_masks(irq);
267 if (read_gic_mask(irq))
268 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
269
270 irq_data_update_effective_affinity(d, cpumask_of(cpu));
271 spin_unlock_irqrestore(&gic_lock, flags);
272
273 return IRQ_SET_MASK_OK;
274}
275#endif
276
277static struct irq_chip gic_level_irq_controller = {
278 .name = "MIPS GIC",
279 .irq_mask = gic_mask_irq,
280 .irq_unmask = gic_unmask_irq,
281 .irq_set_type = gic_set_type,
282#ifdef CONFIG_SMP
283 .irq_set_affinity = gic_set_affinity,
284#endif
285};
286
287static struct irq_chip gic_edge_irq_controller = {
288 .name = "MIPS GIC",
289 .irq_ack = gic_ack_irq,
290 .irq_mask = gic_mask_irq,
291 .irq_unmask = gic_unmask_irq,
292 .irq_set_type = gic_set_type,
293#ifdef CONFIG_SMP
294 .irq_set_affinity = gic_set_affinity,
295#endif
296 .ipi_send_single = gic_send_ipi,
297};
298
299static void gic_handle_local_int(bool chained)
300{
301 unsigned long pending, masked;
302 unsigned int intr, virq;
303
304 pending = read_gic_vl_pend();
305 masked = read_gic_vl_mask();
306
307 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
308
309 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
310 virq = irq_linear_revmap(gic_irq_domain,
311 GIC_LOCAL_TO_HWIRQ(intr));
312 if (chained)
313 generic_handle_irq(virq);
314 else
315 do_IRQ(virq);
316 }
317}
318
319static void gic_mask_local_irq(struct irq_data *d)
320{
321 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
322
323 write_gic_vl_rmask(BIT(intr));
324}
325
326static void gic_unmask_local_irq(struct irq_data *d)
327{
328 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
329
330 write_gic_vl_smask(BIT(intr));
331}
332
333static struct irq_chip gic_local_irq_controller = {
334 .name = "MIPS GIC Local",
335 .irq_mask = gic_mask_local_irq,
336 .irq_unmask = gic_unmask_local_irq,
337};
338
339static void gic_mask_local_irq_all_vpes(struct irq_data *d)
340{
341 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
342 int i;
343 unsigned long flags;
344
345 spin_lock_irqsave(&gic_lock, flags);
346 for (i = 0; i < gic_vpes; i++) {
347 write_gic_vl_other(mips_cm_vp_id(i));
348 write_gic_vo_rmask(BIT(intr));
349 }
350 spin_unlock_irqrestore(&gic_lock, flags);
351}
352
353static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
354{
355 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
356 int i;
357 unsigned long flags;
358
359 spin_lock_irqsave(&gic_lock, flags);
360 for (i = 0; i < gic_vpes; i++) {
361 write_gic_vl_other(mips_cm_vp_id(i));
362 write_gic_vo_smask(BIT(intr));
363 }
364 spin_unlock_irqrestore(&gic_lock, flags);
365}
366
367static struct irq_chip gic_all_vpes_local_irq_controller = {
368 .name = "MIPS GIC Local",
369 .irq_mask = gic_mask_local_irq_all_vpes,
370 .irq_unmask = gic_unmask_local_irq_all_vpes,
371};
372
373static void __gic_irq_dispatch(void)
374{
375 gic_handle_local_int(false);
376 gic_handle_shared_int(false);
377}
378
379static void gic_irq_dispatch(struct irq_desc *desc)
380{
381 gic_handle_local_int(true);
382 gic_handle_shared_int(true);
383}
384
385static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
386 irq_hw_number_t hw)
387{
388 int intr = GIC_HWIRQ_TO_LOCAL(hw);
389 int i;
390 unsigned long flags;
391 u32 val;
392
393 if (!gic_local_irq_is_routable(intr))
394 return -EPERM;
395
396 if (intr > GIC_LOCAL_INT_FDC) {
397 pr_err("Invalid local IRQ %d\n", intr);
398 return -EINVAL;
399 }
400
401 if (intr == GIC_LOCAL_INT_TIMER) {
402 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
403 val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
404 } else {
405 val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
406 }
407
408 spin_lock_irqsave(&gic_lock, flags);
409 for (i = 0; i < gic_vpes; i++) {
410 write_gic_vl_other(mips_cm_vp_id(i));
411 write_gic_vo_map(intr, val);
412 }
413 spin_unlock_irqrestore(&gic_lock, flags);
414
415 return 0;
416}
417
418static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
419 irq_hw_number_t hw, unsigned int cpu)
420{
421 int intr = GIC_HWIRQ_TO_SHARED(hw);
422 struct irq_data *data;
423 unsigned long flags;
424
425 data = irq_get_irq_data(virq);
426
427 spin_lock_irqsave(&gic_lock, flags);
428 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
429 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
430 irq_data_update_effective_affinity(data, cpumask_of(cpu));
431 spin_unlock_irqrestore(&gic_lock, flags);
432
433 return 0;
434}
435
436static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
437 const u32 *intspec, unsigned int intsize,
438 irq_hw_number_t *out_hwirq,
439 unsigned int *out_type)
440{
441 if (intsize != 3)
442 return -EINVAL;
443
444 if (intspec[0] == GIC_SHARED)
445 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
446 else if (intspec[0] == GIC_LOCAL)
447 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
448 else
449 return -EINVAL;
450 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
451
452 return 0;
453}
454
455static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
456 irq_hw_number_t hwirq)
457{
458 int err;
459
460 if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
461 /* verify that shared irqs don't conflict with an IPI irq */
462 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
463 return -EBUSY;
464
465 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
466 &gic_level_irq_controller,
467 NULL);
468 if (err)
469 return err;
470
471 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
472 return gic_shared_irq_domain_map(d, virq, hwirq, 0);
473 }
474
475 switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
476 case GIC_LOCAL_INT_TIMER:
477 case GIC_LOCAL_INT_PERFCTR:
478 case GIC_LOCAL_INT_FDC:
479 /*
480 * HACK: These are all really percpu interrupts, but
481 * the rest of the MIPS kernel code does not use the
482 * percpu IRQ API for them.
483 */
484 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
485 &gic_all_vpes_local_irq_controller,
486 NULL);
487 if (err)
488 return err;
489
490 irq_set_handler(virq, handle_percpu_irq);
491 break;
492
493 default:
494 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
495 &gic_local_irq_controller,
496 NULL);
497 if (err)
498 return err;
499
500 irq_set_handler(virq, handle_percpu_devid_irq);
501 irq_set_percpu_devid(virq);
502 break;
503 }
504
505 return gic_local_irq_domain_map(d, virq, hwirq);
506}
507
508static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
509 unsigned int nr_irqs, void *arg)
510{
511 struct irq_fwspec *fwspec = arg;
512 irq_hw_number_t hwirq;
513
514 if (fwspec->param[0] == GIC_SHARED)
515 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
516 else
517 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
518
519 return gic_irq_domain_map(d, virq, hwirq);
520}
521
522void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
523 unsigned int nr_irqs)
524{
525}
526
527static const struct irq_domain_ops gic_irq_domain_ops = {
528 .xlate = gic_irq_domain_xlate,
529 .alloc = gic_irq_domain_alloc,
530 .free = gic_irq_domain_free,
531 .map = gic_irq_domain_map,
532};
533
534static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
535 const u32 *intspec, unsigned int intsize,
536 irq_hw_number_t *out_hwirq,
537 unsigned int *out_type)
538{
539 /*
540 * There's nothing to translate here. hwirq is dynamically allocated and
541 * the irq type is always edge triggered.
542 * */
543 *out_hwirq = 0;
544 *out_type = IRQ_TYPE_EDGE_RISING;
545
546 return 0;
547}
548
549static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
550 unsigned int nr_irqs, void *arg)
551{
552 struct cpumask *ipimask = arg;
553 irq_hw_number_t hwirq, base_hwirq;
554 int cpu, ret, i;
555
556 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
557 if (base_hwirq == gic_shared_intrs)
558 return -ENOMEM;
559
560 /* check that we have enough space */
561 for (i = base_hwirq; i < nr_irqs; i++) {
562 if (!test_bit(i, ipi_available))
563 return -EBUSY;
564 }
565 bitmap_clear(ipi_available, base_hwirq, nr_irqs);
566
567 /* map the hwirq for each cpu consecutively */
568 i = 0;
569 for_each_cpu(cpu, ipimask) {
570 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
571
572 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
573 &gic_edge_irq_controller,
574 NULL);
575 if (ret)
576 goto error;
577
578 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
579 &gic_edge_irq_controller,
580 NULL);
581 if (ret)
582 goto error;
583
584 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
585 if (ret)
586 goto error;
587
588 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
589 if (ret)
590 goto error;
591
592 i++;
593 }
594
595 return 0;
596error:
597 bitmap_set(ipi_available, base_hwirq, nr_irqs);
598 return ret;
599}
600
601void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
602 unsigned int nr_irqs)
603{
604 irq_hw_number_t base_hwirq;
605 struct irq_data *data;
606
607 data = irq_get_irq_data(virq);
608 if (!data)
609 return;
610
611 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
612 bitmap_set(ipi_available, base_hwirq, nr_irqs);
613}
614
615int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
616 enum irq_domain_bus_token bus_token)
617{
618 bool is_ipi;
619
620 switch (bus_token) {
621 case DOMAIN_BUS_IPI:
622 is_ipi = d->bus_token == bus_token;
623 return (!node || to_of_node(d->fwnode) == node) && is_ipi;
624 break;
625 default:
626 return 0;
627 }
628}
629
630static const struct irq_domain_ops gic_ipi_domain_ops = {
631 .xlate = gic_ipi_domain_xlate,
632 .alloc = gic_ipi_domain_alloc,
633 .free = gic_ipi_domain_free,
634 .match = gic_ipi_domain_match,
635};
636
637
638static int __init gic_of_init(struct device_node *node,
639 struct device_node *parent)
640{
641 unsigned int cpu_vec, i, j, gicconfig, cpu, v[2];
642 unsigned long reserved;
643 phys_addr_t gic_base;
644 struct resource res;
645 size_t gic_len;
646
647 /* Find the first available CPU vector. */
648 i = 0;
649 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
650 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
651 i++, &cpu_vec))
652 reserved |= BIT(cpu_vec);
653
654 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
655 if (cpu_vec == hweight_long(ST0_IM)) {
656 pr_err("No CPU vectors available for GIC\n");
657 return -ENODEV;
658 }
659
660 if (of_address_to_resource(node, 0, &res)) {
661 /*
662 * Probe the CM for the GIC base address if not specified
663 * in the device-tree.
664 */
665 if (mips_cm_present()) {
666 gic_base = read_gcr_gic_base() &
667 ~CM_GCR_GIC_BASE_GICEN;
668 gic_len = 0x20000;
669 } else {
670 pr_err("Failed to get GIC memory range\n");
671 return -ENODEV;
672 }
673 } else {
674 gic_base = res.start;
675 gic_len = resource_size(&res);
676 }
677
678 if (mips_cm_present()) {
679 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
680 /* Ensure GIC region is enabled before trying to access it */
681 __sync();
682 }
683
684 mips_gic_base = ioremap_nocache(gic_base, gic_len);
685
686 gicconfig = read_gic_config();
687 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
688 gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
689 gic_shared_intrs = (gic_shared_intrs + 1) * 8;
690
691 gic_vpes = gicconfig & GIC_CONFIG_PVPS;
692 gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
693 gic_vpes = gic_vpes + 1;
694
695 if (cpu_has_veic) {
696 /* Set EIC mode for all VPEs */
697 for_each_present_cpu(cpu) {
698 write_gic_vl_other(mips_cm_vp_id(cpu));
699 write_gic_vo_ctl(GIC_VX_CTL_EIC);
700 }
701
702 /* Always use vector 1 in EIC mode */
703 gic_cpu_pin = 0;
704 timer_cpu_pin = gic_cpu_pin;
705 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
706 __gic_irq_dispatch);
707 } else {
708 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
709 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
710 gic_irq_dispatch);
711 /*
712 * With the CMP implementation of SMP (deprecated), other CPUs
713 * are started by the bootloader and put into a timer based
714 * waiting poll loop. We must not re-route those CPU's local
715 * timer interrupts as the wait instruction will never finish,
716 * so just handle whatever CPU interrupt it is routed to by
717 * default.
718 *
719 * This workaround should be removed when CMP support is
720 * dropped.
721 */
722 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
723 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
724 timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP;
725 irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
726 GIC_CPU_PIN_OFFSET +
727 timer_cpu_pin,
728 gic_irq_dispatch);
729 } else {
730 timer_cpu_pin = gic_cpu_pin;
731 }
732 }
733
734 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
735 gic_shared_intrs, 0,
736 &gic_irq_domain_ops, NULL);
737 if (!gic_irq_domain) {
738 pr_err("Failed to add GIC IRQ domain");
739 return -ENXIO;
740 }
741
742 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
743 IRQ_DOMAIN_FLAG_IPI_PER_CPU,
744 GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
745 node, &gic_ipi_domain_ops, NULL);
746 if (!gic_ipi_domain) {
747 pr_err("Failed to add GIC IPI domain");
748 return -ENXIO;
749 }
750
751 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
752
753 if (node &&
754 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
755 bitmap_set(ipi_resrv, v[0], v[1]);
756 } else {
757 /* Make the last 2 * gic_vpes available for IPIs */
758 bitmap_set(ipi_resrv,
759 gic_shared_intrs - 2 * gic_vpes,
760 2 * gic_vpes);
761 }
762
763 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
764
765 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
766
767 /* Setup defaults */
768 for (i = 0; i < gic_shared_intrs; i++) {
769 change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
770 change_gic_trig(i, GIC_TRIG_LEVEL);
771 write_gic_rmask(i);
772 }
773
774 for (i = 0; i < gic_vpes; i++) {
775 write_gic_vl_other(mips_cm_vp_id(i));
776 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
777 if (!gic_local_irq_is_routable(j))
778 continue;
779 write_gic_vo_rmask(BIT(j));
780 }
781 }
782
783 return 0;
784}
785IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);