blob: df18465a0985f267652ba90980ce8f633f4201a0 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) Maxime Coquelin 2015
4 * Copyright (C) STMicroelectronics 2017
5 * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
6 */
7
8#include <linux/bitops.h>
9#include <linux/delay.h>
10#include <linux/hwspinlock.h>
11#include <linux/interrupt.h>
12#include <linux/io.h>
13#include <linux/irq.h>
14#include <linux/irqchip.h>
15#include <linux/irqchip/chained_irq.h>
16#include <linux/irqdomain.h>
17#include <linux/module.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20#include <linux/of_platform.h>
21#include <linux/syscore_ops.h>
22
23#include <dt-bindings/interrupt-controller/arm-gic.h>
24
25#define IRQS_PER_BANK 32
26
27#define HWSPNLCK_TIMEOUT 1000 /* usec */
28#define HWSPNLCK_RETRY_DELAY 100 /* usec */
29
30struct stm32_exti_bank {
31 u32 imr_ofst;
32 u32 emr_ofst;
33 u32 rtsr_ofst;
34 u32 ftsr_ofst;
35 u32 swier_ofst;
36 u32 rpr_ofst;
37 u32 fpr_ofst;
38};
39
40#define UNDEF_REG ~0
41
42struct stm32_desc_irq {
43 u32 exti;
44 u32 irq_parent;
45};
46
47struct stm32_exti_drv_data {
48 const struct stm32_exti_bank **exti_banks;
49 const struct stm32_desc_irq *desc_irqs;
50 u32 bank_nr;
51 u32 irq_nr;
52};
53
54struct stm32_exti_chip_data {
55 struct stm32_exti_host_data *host_data;
56 const struct stm32_exti_bank *reg_bank;
57 struct raw_spinlock rlock;
58 u32 wake_active;
59 u32 mask_cache;
60 u32 rtsr_cache;
61 u32 ftsr_cache;
62};
63
64struct stm32_exti_host_data {
65 void __iomem *base;
66 struct stm32_exti_chip_data *chips_data;
67 const struct stm32_exti_drv_data *drv_data;
68 struct hwspinlock *hwlock;
69};
70
71static struct stm32_exti_host_data *stm32_host_data;
72
73static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
74 .imr_ofst = 0x00,
75 .emr_ofst = 0x04,
76 .rtsr_ofst = 0x08,
77 .ftsr_ofst = 0x0C,
78 .swier_ofst = 0x10,
79 .rpr_ofst = 0x14,
80 .fpr_ofst = UNDEF_REG,
81};
82
83static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
84 &stm32f4xx_exti_b1,
85};
86
87static const struct stm32_exti_drv_data stm32f4xx_drv_data = {
88 .exti_banks = stm32f4xx_exti_banks,
89 .bank_nr = ARRAY_SIZE(stm32f4xx_exti_banks),
90};
91
92static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
93 .imr_ofst = 0x80,
94 .emr_ofst = 0x84,
95 .rtsr_ofst = 0x00,
96 .ftsr_ofst = 0x04,
97 .swier_ofst = 0x08,
98 .rpr_ofst = 0x88,
99 .fpr_ofst = UNDEF_REG,
100};
101
102static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
103 .imr_ofst = 0x90,
104 .emr_ofst = 0x94,
105 .rtsr_ofst = 0x20,
106 .ftsr_ofst = 0x24,
107 .swier_ofst = 0x28,
108 .rpr_ofst = 0x98,
109 .fpr_ofst = UNDEF_REG,
110};
111
112static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
113 .imr_ofst = 0xA0,
114 .emr_ofst = 0xA4,
115 .rtsr_ofst = 0x40,
116 .ftsr_ofst = 0x44,
117 .swier_ofst = 0x48,
118 .rpr_ofst = 0xA8,
119 .fpr_ofst = UNDEF_REG,
120};
121
122static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
123 &stm32h7xx_exti_b1,
124 &stm32h7xx_exti_b2,
125 &stm32h7xx_exti_b3,
126};
127
128static const struct stm32_exti_drv_data stm32h7xx_drv_data = {
129 .exti_banks = stm32h7xx_exti_banks,
130 .bank_nr = ARRAY_SIZE(stm32h7xx_exti_banks),
131};
132
133static const struct stm32_exti_bank stm32mp1_exti_b1 = {
134 .imr_ofst = 0x80,
135 .emr_ofst = 0x84,
136 .rtsr_ofst = 0x00,
137 .ftsr_ofst = 0x04,
138 .swier_ofst = 0x08,
139 .rpr_ofst = 0x0C,
140 .fpr_ofst = 0x10,
141};
142
143static const struct stm32_exti_bank stm32mp1_exti_b2 = {
144 .imr_ofst = 0x90,
145 .emr_ofst = 0x94,
146 .rtsr_ofst = 0x20,
147 .ftsr_ofst = 0x24,
148 .swier_ofst = 0x28,
149 .rpr_ofst = 0x2C,
150 .fpr_ofst = 0x30,
151};
152
153static const struct stm32_exti_bank stm32mp1_exti_b3 = {
154 .imr_ofst = 0xA0,
155 .emr_ofst = 0xA4,
156 .rtsr_ofst = 0x40,
157 .ftsr_ofst = 0x44,
158 .swier_ofst = 0x48,
159 .rpr_ofst = 0x4C,
160 .fpr_ofst = 0x50,
161};
162
163static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
164 &stm32mp1_exti_b1,
165 &stm32mp1_exti_b2,
166 &stm32mp1_exti_b3,
167};
168
169static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
170 { .exti = 0, .irq_parent = 6 },
171 { .exti = 1, .irq_parent = 7 },
172 { .exti = 2, .irq_parent = 8 },
173 { .exti = 3, .irq_parent = 9 },
174 { .exti = 4, .irq_parent = 10 },
175 { .exti = 5, .irq_parent = 23 },
176 { .exti = 6, .irq_parent = 64 },
177 { .exti = 7, .irq_parent = 65 },
178 { .exti = 8, .irq_parent = 66 },
179 { .exti = 9, .irq_parent = 67 },
180 { .exti = 10, .irq_parent = 40 },
181 { .exti = 11, .irq_parent = 42 },
182 { .exti = 12, .irq_parent = 76 },
183 { .exti = 13, .irq_parent = 77 },
184 { .exti = 14, .irq_parent = 121 },
185 { .exti = 15, .irq_parent = 127 },
186 { .exti = 16, .irq_parent = 1 },
187 { .exti = 65, .irq_parent = 144 },
188 { .exti = 68, .irq_parent = 143 },
189 { .exti = 73, .irq_parent = 129 },
190};
191
192static const struct stm32_exti_drv_data stm32mp1_drv_data = {
193 .exti_banks = stm32mp1_exti_banks,
194 .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
195 .desc_irqs = stm32mp1_desc_irq,
196 .irq_nr = ARRAY_SIZE(stm32mp1_desc_irq),
197};
198
199static int stm32_exti_to_irq(const struct stm32_exti_drv_data *drv_data,
200 irq_hw_number_t hwirq)
201{
202 const struct stm32_desc_irq *desc_irq;
203 int i;
204
205 if (!drv_data->desc_irqs)
206 return -EINVAL;
207
208 for (i = 0; i < drv_data->irq_nr; i++) {
209 desc_irq = &drv_data->desc_irqs[i];
210 if (desc_irq->exti == hwirq)
211 return desc_irq->irq_parent;
212 }
213
214 return -EINVAL;
215}
216
217static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
218{
219 struct stm32_exti_chip_data *chip_data = gc->private;
220 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
221 unsigned long pending;
222
223 pending = irq_reg_readl(gc, stm32_bank->rpr_ofst);
224 if (stm32_bank->fpr_ofst != UNDEF_REG)
225 pending |= irq_reg_readl(gc, stm32_bank->fpr_ofst);
226
227 return pending;
228}
229
230static void stm32_irq_handler(struct irq_desc *desc)
231{
232 struct irq_domain *domain = irq_desc_get_handler_data(desc);
233 struct irq_chip *chip = irq_desc_get_chip(desc);
234 unsigned int virq, nbanks = domain->gc->num_chips;
235 struct irq_chip_generic *gc;
236 unsigned long pending;
237 int n, i, irq_base = 0;
238
239 chained_irq_enter(chip, desc);
240
241 for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) {
242 gc = irq_get_domain_generic_chip(domain, irq_base);
243
244 while ((pending = stm32_exti_pending(gc))) {
245 for_each_set_bit(n, &pending, IRQS_PER_BANK) {
246 virq = irq_find_mapping(domain, irq_base + n);
247 generic_handle_irq(virq);
248 }
249 }
250 }
251
252 chained_irq_exit(chip, desc);
253}
254
255static int stm32_exti_set_type(struct irq_data *d,
256 unsigned int type, u32 *rtsr, u32 *ftsr)
257{
258 u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
259
260 switch (type) {
261 case IRQ_TYPE_EDGE_RISING:
262 *rtsr |= mask;
263 *ftsr &= ~mask;
264 break;
265 case IRQ_TYPE_EDGE_FALLING:
266 *rtsr &= ~mask;
267 *ftsr |= mask;
268 break;
269 case IRQ_TYPE_EDGE_BOTH:
270 *rtsr |= mask;
271 *ftsr |= mask;
272 break;
273 default:
274 return -EINVAL;
275 }
276
277 return 0;
278}
279
280static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data)
281{
282 int ret, timeout = 0;
283
284 if (!chip_data->host_data->hwlock)
285 return 0;
286
287 /*
288 * Use the x_raw API since we are under spin_lock protection.
289 * Do not use the x_timeout API because we are under irq_disable
290 * mode (see __setup_irq())
291 */
292 do {
293 ret = hwspin_trylock_raw(chip_data->host_data->hwlock);
294 if (!ret)
295 return 0;
296
297 udelay(HWSPNLCK_RETRY_DELAY);
298 timeout += HWSPNLCK_RETRY_DELAY;
299 } while (timeout < HWSPNLCK_TIMEOUT);
300
301 if (ret == -EBUSY)
302 ret = -ETIMEDOUT;
303
304 if (ret)
305 pr_err("%s can't get hwspinlock (%d)\n", __func__, ret);
306
307 return ret;
308}
309
310static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data)
311{
312 if (chip_data->host_data->hwlock)
313 hwspin_unlock_raw(chip_data->host_data->hwlock);
314}
315
316static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
317{
318 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
319 struct stm32_exti_chip_data *chip_data = gc->private;
320 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
321 u32 rtsr, ftsr;
322 int err;
323
324 irq_gc_lock(gc);
325
326 err = stm32_exti_hwspin_lock(chip_data);
327 if (err)
328 goto unlock;
329
330 rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
331 ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
332
333 err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
334 if (err)
335 goto unspinlock;
336
337 irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
338 irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
339
340unspinlock:
341 stm32_exti_hwspin_unlock(chip_data);
342unlock:
343 irq_gc_unlock(gc);
344
345 return err;
346}
347
348static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data,
349 u32 wake_active)
350{
351 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
352 void __iomem *base = chip_data->host_data->base;
353
354 /* save rtsr, ftsr registers */
355 chip_data->rtsr_cache = readl_relaxed(base + stm32_bank->rtsr_ofst);
356 chip_data->ftsr_cache = readl_relaxed(base + stm32_bank->ftsr_ofst);
357
358 writel_relaxed(wake_active, base + stm32_bank->imr_ofst);
359}
360
361static void stm32_chip_resume(struct stm32_exti_chip_data *chip_data,
362 u32 mask_cache)
363{
364 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
365 void __iomem *base = chip_data->host_data->base;
366
367 /* restore rtsr, ftsr, registers */
368 writel_relaxed(chip_data->rtsr_cache, base + stm32_bank->rtsr_ofst);
369 writel_relaxed(chip_data->ftsr_cache, base + stm32_bank->ftsr_ofst);
370
371 writel_relaxed(mask_cache, base + stm32_bank->imr_ofst);
372}
373
374static void stm32_irq_suspend(struct irq_chip_generic *gc)
375{
376 struct stm32_exti_chip_data *chip_data = gc->private;
377
378 irq_gc_lock(gc);
379 stm32_chip_suspend(chip_data, gc->wake_active);
380 irq_gc_unlock(gc);
381}
382
383static void stm32_irq_resume(struct irq_chip_generic *gc)
384{
385 struct stm32_exti_chip_data *chip_data = gc->private;
386
387 irq_gc_lock(gc);
388 stm32_chip_resume(chip_data, gc->mask_cache);
389 irq_gc_unlock(gc);
390}
391
392static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
393 unsigned int nr_irqs, void *data)
394{
395 struct irq_fwspec *fwspec = data;
396 irq_hw_number_t hwirq;
397
398 hwirq = fwspec->param[0];
399
400 irq_map_generic_chip(d, virq, hwirq);
401
402 return 0;
403}
404
405static void stm32_exti_free(struct irq_domain *d, unsigned int virq,
406 unsigned int nr_irqs)
407{
408 struct irq_data *data = irq_domain_get_irq_data(d, virq);
409
410 irq_domain_reset_irq_data(data);
411}
412
413static const struct irq_domain_ops irq_exti_domain_ops = {
414 .map = irq_map_generic_chip,
415 .alloc = stm32_exti_alloc,
416 .free = stm32_exti_free,
417 .xlate = irq_domain_xlate_twocell,
418};
419
420static void stm32_irq_ack(struct irq_data *d)
421{
422 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
423 struct stm32_exti_chip_data *chip_data = gc->private;
424 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
425
426 irq_gc_lock(gc);
427
428 irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst);
429 if (stm32_bank->fpr_ofst != UNDEF_REG)
430 irq_reg_writel(gc, d->mask, stm32_bank->fpr_ofst);
431
432 irq_gc_unlock(gc);
433}
434
435/* directly set the target bit without reading first. */
436static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg)
437{
438 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
439 void __iomem *base = chip_data->host_data->base;
440 u32 val = BIT(d->hwirq % IRQS_PER_BANK);
441
442 writel_relaxed(val, base + reg);
443}
444
445static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
446{
447 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
448 void __iomem *base = chip_data->host_data->base;
449 u32 val;
450
451 val = readl_relaxed(base + reg);
452 val |= BIT(d->hwirq % IRQS_PER_BANK);
453 writel_relaxed(val, base + reg);
454
455 return val;
456}
457
458static inline u32 stm32_exti_clr_bit(struct irq_data *d, u32 reg)
459{
460 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
461 void __iomem *base = chip_data->host_data->base;
462 u32 val;
463
464 val = readl_relaxed(base + reg);
465 val &= ~BIT(d->hwirq % IRQS_PER_BANK);
466 writel_relaxed(val, base + reg);
467
468 return val;
469}
470
471static void stm32_exti_h_eoi(struct irq_data *d)
472{
473 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
474 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
475
476 raw_spin_lock(&chip_data->rlock);
477
478 stm32_exti_write_bit(d, stm32_bank->rpr_ofst);
479 if (stm32_bank->fpr_ofst != UNDEF_REG)
480 stm32_exti_write_bit(d, stm32_bank->fpr_ofst);
481
482 raw_spin_unlock(&chip_data->rlock);
483
484 if (d->parent_data->chip)
485 irq_chip_eoi_parent(d);
486}
487
488static void stm32_exti_h_mask(struct irq_data *d)
489{
490 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
491 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
492
493 raw_spin_lock(&chip_data->rlock);
494 chip_data->mask_cache = stm32_exti_clr_bit(d, stm32_bank->imr_ofst);
495 raw_spin_unlock(&chip_data->rlock);
496
497 if (d->parent_data->chip)
498 irq_chip_mask_parent(d);
499}
500
501static void stm32_exti_h_unmask(struct irq_data *d)
502{
503 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
504 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
505
506 raw_spin_lock(&chip_data->rlock);
507 chip_data->mask_cache = stm32_exti_set_bit(d, stm32_bank->imr_ofst);
508 raw_spin_unlock(&chip_data->rlock);
509
510 if (d->parent_data->chip)
511 irq_chip_unmask_parent(d);
512}
513
514static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type)
515{
516 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
517 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
518 void __iomem *base = chip_data->host_data->base;
519 u32 rtsr, ftsr;
520 int err;
521
522 raw_spin_lock(&chip_data->rlock);
523
524 err = stm32_exti_hwspin_lock(chip_data);
525 if (err)
526 goto unlock;
527
528 rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst);
529 ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst);
530
531 err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
532 if (err)
533 goto unspinlock;
534
535 writel_relaxed(rtsr, base + stm32_bank->rtsr_ofst);
536 writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst);
537
538unspinlock:
539 stm32_exti_hwspin_unlock(chip_data);
540unlock:
541 raw_spin_unlock(&chip_data->rlock);
542
543 return err;
544}
545
546static int stm32_exti_h_set_wake(struct irq_data *d, unsigned int on)
547{
548 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
549 u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
550
551 raw_spin_lock(&chip_data->rlock);
552
553 if (on)
554 chip_data->wake_active |= mask;
555 else
556 chip_data->wake_active &= ~mask;
557
558 raw_spin_unlock(&chip_data->rlock);
559
560 return 0;
561}
562
563static int stm32_exti_h_set_affinity(struct irq_data *d,
564 const struct cpumask *dest, bool force)
565{
566 if (d->parent_data->chip)
567 return irq_chip_set_affinity_parent(d, dest, force);
568
569 return -EINVAL;
570}
571
572static int __maybe_unused stm32_exti_h_suspend(void)
573{
574 struct stm32_exti_chip_data *chip_data;
575 int i;
576
577 for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
578 chip_data = &stm32_host_data->chips_data[i];
579 raw_spin_lock(&chip_data->rlock);
580 stm32_chip_suspend(chip_data, chip_data->wake_active);
581 raw_spin_unlock(&chip_data->rlock);
582 }
583
584 return 0;
585}
586
587static void __maybe_unused stm32_exti_h_resume(void)
588{
589 struct stm32_exti_chip_data *chip_data;
590 int i;
591
592 for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
593 chip_data = &stm32_host_data->chips_data[i];
594 raw_spin_lock(&chip_data->rlock);
595 stm32_chip_resume(chip_data, chip_data->mask_cache);
596 raw_spin_unlock(&chip_data->rlock);
597 }
598}
599
600static struct syscore_ops stm32_exti_h_syscore_ops = {
601#ifdef CONFIG_PM_SLEEP
602 .suspend = stm32_exti_h_suspend,
603 .resume = stm32_exti_h_resume,
604#endif
605};
606
607static void stm32_exti_h_syscore_init(struct stm32_exti_host_data *host_data)
608{
609 stm32_host_data = host_data;
610 register_syscore_ops(&stm32_exti_h_syscore_ops);
611}
612
613static void stm32_exti_h_syscore_deinit(void)
614{
615 unregister_syscore_ops(&stm32_exti_h_syscore_ops);
616}
617
618static struct irq_chip stm32_exti_h_chip = {
619 .name = "stm32-exti-h",
620 .irq_eoi = stm32_exti_h_eoi,
621 .irq_mask = stm32_exti_h_mask,
622 .irq_unmask = stm32_exti_h_unmask,
623 .irq_retrigger = irq_chip_retrigger_hierarchy,
624 .irq_set_type = stm32_exti_h_set_type,
625 .irq_set_wake = stm32_exti_h_set_wake,
626 .flags = IRQCHIP_MASK_ON_SUSPEND,
627 .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL,
628};
629
630static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
631 unsigned int virq,
632 unsigned int nr_irqs, void *data)
633{
634 struct stm32_exti_host_data *host_data = dm->host_data;
635 struct stm32_exti_chip_data *chip_data;
636 struct irq_fwspec *fwspec = data;
637 struct irq_fwspec p_fwspec;
638 irq_hw_number_t hwirq;
639 int p_irq, bank;
640
641 hwirq = fwspec->param[0];
642 bank = hwirq / IRQS_PER_BANK;
643 chip_data = &host_data->chips_data[bank];
644
645 irq_domain_set_hwirq_and_chip(dm, virq, hwirq,
646 &stm32_exti_h_chip, chip_data);
647
648 p_irq = stm32_exti_to_irq(host_data->drv_data, hwirq);
649 if (p_irq >= 0) {
650 p_fwspec.fwnode = dm->parent->fwnode;
651 p_fwspec.param_count = 3;
652 p_fwspec.param[0] = GIC_SPI;
653 p_fwspec.param[1] = p_irq;
654 p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
655
656 return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
657 }
658
659 return 0;
660}
661
662static struct
663stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
664 struct device_node *node)
665{
666 struct stm32_exti_host_data *host_data;
667
668 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
669 if (!host_data)
670 return NULL;
671
672 host_data->drv_data = dd;
673 host_data->chips_data = kcalloc(dd->bank_nr,
674 sizeof(struct stm32_exti_chip_data),
675 GFP_KERNEL);
676 if (!host_data->chips_data)
677 goto free_host_data;
678
679 host_data->base = of_iomap(node, 0);
680 if (!host_data->base) {
681 pr_err("%pOF: Unable to map registers\n", node);
682 goto free_chips_data;
683 }
684
685 stm32_host_data = host_data;
686
687 return host_data;
688
689free_chips_data:
690 kfree(host_data->chips_data);
691free_host_data:
692 kfree(host_data);
693
694 return NULL;
695}
696
697static struct
698stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
699 u32 bank_idx,
700 struct device_node *node)
701{
702 const struct stm32_exti_bank *stm32_bank;
703 struct stm32_exti_chip_data *chip_data;
704 void __iomem *base = h_data->base;
705
706 stm32_bank = h_data->drv_data->exti_banks[bank_idx];
707 chip_data = &h_data->chips_data[bank_idx];
708 chip_data->host_data = h_data;
709 chip_data->reg_bank = stm32_bank;
710
711 raw_spin_lock_init(&chip_data->rlock);
712
713 /*
714 * This IP has no reset, so after hot reboot we should
715 * clear registers to avoid residue
716 */
717 writel_relaxed(0, base + stm32_bank->imr_ofst);
718 writel_relaxed(0, base + stm32_bank->emr_ofst);
719
720 pr_info("%pOF: bank%d\n", node, bank_idx);
721
722 return chip_data;
723}
724
725static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
726 struct device_node *node)
727{
728 struct stm32_exti_host_data *host_data;
729 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
730 int nr_irqs, ret, i;
731 struct irq_chip_generic *gc;
732 struct irq_domain *domain;
733
734 host_data = stm32_exti_host_init(drv_data, node);
735 if (!host_data)
736 return -ENOMEM;
737
738 domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
739 &irq_exti_domain_ops, NULL);
740 if (!domain) {
741 pr_err("%pOFn: Could not register interrupt domain.\n",
742 node);
743 ret = -ENOMEM;
744 goto out_unmap;
745 }
746
747 ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti",
748 handle_edge_irq, clr, 0, 0);
749 if (ret) {
750 pr_err("%pOF: Could not allocate generic interrupt chip.\n",
751 node);
752 goto out_free_domain;
753 }
754
755 for (i = 0; i < drv_data->bank_nr; i++) {
756 const struct stm32_exti_bank *stm32_bank;
757 struct stm32_exti_chip_data *chip_data;
758
759 stm32_bank = drv_data->exti_banks[i];
760 chip_data = stm32_exti_chip_init(host_data, i, node);
761
762 gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
763
764 gc->reg_base = host_data->base;
765 gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
766 gc->chip_types->chip.irq_ack = stm32_irq_ack;
767 gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
768 gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
769 gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
770 gc->chip_types->chip.irq_set_wake = irq_gc_set_wake;
771 gc->suspend = stm32_irq_suspend;
772 gc->resume = stm32_irq_resume;
773 gc->wake_enabled = IRQ_MSK(IRQS_PER_BANK);
774
775 gc->chip_types->regs.mask = stm32_bank->imr_ofst;
776 gc->private = (void *)chip_data;
777 }
778
779 nr_irqs = of_irq_count(node);
780 for (i = 0; i < nr_irqs; i++) {
781 unsigned int irq = irq_of_parse_and_map(node, i);
782
783 irq_set_handler_data(irq, domain);
784 irq_set_chained_handler(irq, stm32_irq_handler);
785 }
786
787 return 0;
788
789out_free_domain:
790 irq_domain_remove(domain);
791out_unmap:
792 iounmap(host_data->base);
793 kfree(host_data->chips_data);
794 kfree(host_data);
795 return ret;
796}
797
798static const struct irq_domain_ops stm32_exti_h_domain_ops = {
799 .alloc = stm32_exti_h_domain_alloc,
800 .free = irq_domain_free_irqs_common,
801 .xlate = irq_domain_xlate_twocell,
802};
803
804static void stm32_exti_remove_irq(void *data)
805{
806 struct irq_domain *domain = data;
807
808 irq_domain_remove(domain);
809}
810
811static int stm32_exti_remove(struct platform_device *pdev)
812{
813 stm32_exti_h_syscore_deinit();
814 return 0;
815}
816
817static int stm32_exti_probe(struct platform_device *pdev)
818{
819 int ret, i;
820 struct device *dev = &pdev->dev;
821 struct device_node *np = dev->of_node;
822 struct irq_domain *parent_domain, *domain;
823 struct stm32_exti_host_data *host_data;
824 const struct stm32_exti_drv_data *drv_data;
825 struct resource *res;
826
827 host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
828 if (!host_data)
829 return -ENOMEM;
830
831 /* check for optional hwspinlock which may be not available yet */
832 ret = of_hwspin_lock_get_id(np, 0);
833 if (ret == -EPROBE_DEFER)
834 /* hwspinlock framework not yet ready */
835 return ret;
836
837 if (ret >= 0) {
838 host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
839 if (!host_data->hwlock) {
840 dev_err(dev, "Failed to request hwspinlock\n");
841 return -EINVAL;
842 }
843 } else if (ret != -ENOENT) {
844 /* note: ENOENT is a valid case (means 'no hwspinlock') */
845 dev_err(dev, "Failed to get hwspinlock\n");
846 return ret;
847 }
848
849 /* initialize host_data */
850 drv_data = of_device_get_match_data(dev);
851 if (!drv_data) {
852 dev_err(dev, "no of match data\n");
853 return -ENODEV;
854 }
855 host_data->drv_data = drv_data;
856
857 host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr,
858 sizeof(*host_data->chips_data),
859 GFP_KERNEL);
860 if (!host_data->chips_data)
861 return -ENOMEM;
862
863 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
864 host_data->base = devm_ioremap_resource(dev, res);
865 if (IS_ERR(host_data->base)) {
866 dev_err(dev, "Unable to map registers\n");
867 return PTR_ERR(host_data->base);
868 }
869
870 for (i = 0; i < drv_data->bank_nr; i++)
871 stm32_exti_chip_init(host_data, i, np);
872
873 parent_domain = irq_find_host(of_irq_find_parent(np));
874 if (!parent_domain) {
875 dev_err(dev, "GIC interrupt-parent not found\n");
876 return -EINVAL;
877 }
878
879 domain = irq_domain_add_hierarchy(parent_domain, 0,
880 drv_data->bank_nr * IRQS_PER_BANK,
881 np, &stm32_exti_h_domain_ops,
882 host_data);
883
884 if (!domain) {
885 dev_err(dev, "Could not register exti domain\n");
886 return -ENOMEM;
887 }
888
889 ret = devm_add_action_or_reset(dev, stm32_exti_remove_irq, domain);
890 if (ret)
891 return ret;
892
893 stm32_exti_h_syscore_init(host_data);
894
895 return 0;
896}
897
898/* platform driver only for MP1 */
899static const struct of_device_id stm32_exti_ids[] = {
900 { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
901 {},
902};
903MODULE_DEVICE_TABLE(of, stm32_exti_ids);
904
905static struct platform_driver stm32_exti_driver = {
906 .probe = stm32_exti_probe,
907 .remove = stm32_exti_remove,
908 .driver = {
909 .name = "stm32_exti",
910 .of_match_table = stm32_exti_ids,
911 },
912};
913
914static int __init stm32_exti_arch_init(void)
915{
916 return platform_driver_register(&stm32_exti_driver);
917}
918
919static void __exit stm32_exti_arch_exit(void)
920{
921 return platform_driver_unregister(&stm32_exti_driver);
922}
923
924arch_initcall(stm32_exti_arch_init);
925module_exit(stm32_exti_arch_exit);
926
927/* no platform driver for F4 and H7 */
928static int __init stm32f4_exti_of_init(struct device_node *np,
929 struct device_node *parent)
930{
931 return stm32_exti_init(&stm32f4xx_drv_data, np);
932}
933
934IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init);
935
936static int __init stm32h7_exti_of_init(struct device_node *np,
937 struct device_node *parent)
938{
939 return stm32_exti_init(&stm32h7xx_drv_data, np);
940}
941
942IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);