blob: 15ed882ed53c92e33d1d2198fa4edde6c3944eba [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2014-2018 MediaTek Inc.
3
4/*
5 * Library for MediaTek External Interrupt Support
6 *
7 * Author: Maoguang Meng <maoguang.meng@mediatek.com>
8 * Sean Wang <sean.wang@mediatek.com>
9 *
10 */
11
12#include <linux/delay.h>
13#include <linux/err.h>
14#include <linux/gpio/driver.h>
15#include <linux/io.h>
16#include <linux/irqchip/chained_irq.h>
17#include <linux/irqdomain.h>
18#include <linux/of_irq.h>
19#include <linux/platform_device.h>
20#include <linux/syscore_ops.h>
21#include <linux/wakeup_reason.h>
22
23#include "mtk-eint.h"
24
25#define MTK_EINT_EDGE_SENSITIVE 0
26#define MTK_EINT_LEVEL_SENSITIVE 1
27#define MTK_EINT_DBNC_SET_DBNC_BITS 4
28#define MTK_EINT_DBNC_RST_BIT (0x1 << 1)
29#define MTK_EINT_DBNC_SET_EN (0x1 << 0)
30
31static const struct mtk_eint_regs mtk_generic_eint_regs = {
32 .stat = 0x000,
33 .ack = 0x040,
34 .mask = 0x080,
35 .mask_set = 0x0c0,
36 .mask_clr = 0x100,
37 .sens = 0x140,
38 .sens_set = 0x180,
39 .sens_clr = 0x1c0,
40 .soft = 0x200,
41 .soft_set = 0x240,
42 .soft_clr = 0x280,
43 .pol = 0x300,
44 .pol_set = 0x340,
45 .pol_clr = 0x380,
46 .dom_en = 0x400,
47 .dbnc_ctrl = 0x500,
48 .dbnc_set = 0x600,
49 .dbnc_clr = 0x700,
50};
51
52static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
53 unsigned int eint_num,
54 unsigned int offset)
55{
56 unsigned int eint_base = 0;
57 void __iomem *reg;
58
59 if (eint_num >= eint->hw->ap_num)
60 eint_base = eint->hw->ap_num;
61
62 reg = eint->base + offset + ((eint_num - eint_base) / 32) * 4;
63
64 return reg;
65}
66
67static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint,
68 unsigned int eint_num)
69{
70 unsigned int sens;
71 unsigned int bit = BIT(eint_num % 32);
72 void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
73 eint->regs->sens);
74
75 if (readl(reg) & bit)
76 sens = MTK_EINT_LEVEL_SENSITIVE;
77 else
78 sens = MTK_EINT_EDGE_SENSITIVE;
79
80 if (eint_num < eint->hw->db_cnt && sens != MTK_EINT_EDGE_SENSITIVE)
81 return 1;
82 else
83 return 0;
84}
85
86static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq)
87{
88 int start_level, curr_level;
89 unsigned int reg_offset;
90 u32 mask = BIT(hwirq & 0x1f);
91 u32 port = (hwirq >> 5) & eint->hw->port_mask;
92 void __iomem *reg = eint->base + (port << 2);
93
94 curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, hwirq);
95
96 do {
97 start_level = curr_level;
98 if (start_level)
99 reg_offset = eint->regs->pol_clr;
100 else
101 reg_offset = eint->regs->pol_set;
102 writel(mask, reg + reg_offset);
103
104 curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl,
105 hwirq);
106 } while (start_level != curr_level);
107
108 return start_level;
109}
110
111static void mtk_eint_mask(struct irq_data *d)
112{
113 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
114 u32 mask = BIT(d->hwirq & 0x1f);
115 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
116 eint->regs->mask_set);
117
118 writel(mask, reg);
119}
120
121static void mtk_eint_unmask(struct irq_data *d)
122{
123 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
124 u32 mask = BIT(d->hwirq & 0x1f);
125 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
126 eint->regs->mask_clr);
127
128 writel(mask, reg);
129
130 if (eint->dual_edge[d->hwirq])
131 mtk_eint_flip_edge(eint, d->hwirq);
132}
133
134static unsigned int mtk_eint_get_mask(struct mtk_eint *eint,
135 unsigned int eint_num)
136{
137 unsigned int bit = BIT(eint_num % 32);
138 void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
139 eint->regs->mask);
140
141 return !!(readl(reg) & bit);
142}
143
144static void mtk_eint_ack(struct irq_data *d)
145{
146 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
147 u32 mask = BIT(d->hwirq & 0x1f);
148 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
149 eint->regs->ack);
150
151 writel(mask, reg);
152}
153
154static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
155{
156 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
157 u32 mask = BIT(d->hwirq & 0x1f);
158 void __iomem *reg;
159
160 if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
161 ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
162 dev_err(eint->dev,
163 "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
164 d->irq, d->hwirq, type);
165 return -EINVAL;
166 }
167
168 if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
169 eint->dual_edge[d->hwirq] = 1;
170 else
171 eint->dual_edge[d->hwirq] = 0;
172
173 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
174 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_clr);
175 writel(mask, reg);
176 } else {
177 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_set);
178 writel(mask, reg);
179 }
180
181 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
182 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_clr);
183 writel(mask, reg);
184 } else {
185 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_set);
186 writel(mask, reg);
187 }
188
189 if (eint->dual_edge[d->hwirq])
190 mtk_eint_flip_edge(eint, d->hwirq);
191
192 return 0;
193}
194
195static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
196{
197 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
198 int shift = d->hwirq & 0x1f;
199 int reg = d->hwirq >> 5;
200
201 if (on)
202 eint->wake_mask[reg] |= BIT(shift);
203 else
204 eint->wake_mask[reg] &= ~BIT(shift);
205
206 return 0;
207}
208
209static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
210 void __iomem *base, u32 *buf)
211{
212 int port;
213 void __iomem *reg;
214
215 for (port = 0; port < eint->hw->ports; port++) {
216 reg = base + (port << 2);
217 writel_relaxed(~buf[port], reg + eint->regs->mask_set);
218 writel_relaxed(buf[port], reg + eint->regs->mask_clr);
219 }
220}
221
222static void mtk_eint_chip_read_mask(const struct mtk_eint *eint,
223 void __iomem *base, u32 *buf)
224{
225 int port;
226 void __iomem *reg;
227
228 for (port = 0; port < eint->hw->ports; port++) {
229 reg = base + eint->regs->mask + (port << 2);
230 buf[port] = ~readl_relaxed(reg);
231 /* Mask is 0 when irq is enabled, and 1 when disabled. */
232 }
233}
234
235static int mtk_eint_irq_request_resources(struct irq_data *d)
236{
237 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
238 struct gpio_chip *gpio_c;
239 unsigned int gpio_n;
240 int err;
241
242 err = eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq,
243 &gpio_n, &gpio_c);
244 if (err < 0) {
245 dev_err(eint->dev, "Can not find pin\n");
246 return err;
247 }
248
249 err = gpiochip_lock_as_irq(gpio_c, gpio_n);
250 if (err < 0) {
251 dev_err(eint->dev, "unable to lock HW IRQ %lu for IRQ\n",
252 irqd_to_hwirq(d));
253 return err;
254 }
255
256 err = eint->gpio_xlate->set_gpio_as_eint(eint->pctl, d->hwirq);
257 if (err < 0) {
258 dev_err(eint->dev, "Can not eint mode\n");
259 return err;
260 }
261
262 return 0;
263}
264
265static void mtk_eint_irq_release_resources(struct irq_data *d)
266{
267 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
268 struct gpio_chip *gpio_c;
269 unsigned int gpio_n;
270
271 eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq, &gpio_n,
272 &gpio_c);
273
274 gpiochip_unlock_as_irq(gpio_c, gpio_n);
275}
276
277static struct irq_chip mtk_eint_irq_chip = {
278 .name = "mt-eint",
279 .irq_disable = mtk_eint_mask,
280 .irq_mask = mtk_eint_mask,
281 .irq_unmask = mtk_eint_unmask,
282 .irq_ack = mtk_eint_ack,
283 .irq_set_type = mtk_eint_set_type,
284 .irq_set_wake = mtk_eint_irq_set_wake,
285 .irq_request_resources = mtk_eint_irq_request_resources,
286 .irq_release_resources = mtk_eint_irq_release_resources,
287};
288
289static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
290{
291 void __iomem *reg = eint->base + eint->regs->dom_en;
292 unsigned int i;
293
294 for (i = 0; i < eint->hw->ap_num; i += 32) {
295 writel(0xffffffff, reg);
296 reg += 4;
297 }
298
299 return 0;
300}
301
302static inline void
303mtk_eint_debounce_process(struct mtk_eint *eint, int index)
304{
305 unsigned int rst, ctrl_offset;
306 unsigned int bit, dbnc;
307
308 ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_ctrl;
309 dbnc = readl(eint->base + ctrl_offset);
310 bit = MTK_EINT_DBNC_SET_EN << ((index % 4) * 8);
311 if ((bit & dbnc) > 0) {
312 ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_set;
313 rst = MTK_EINT_DBNC_RST_BIT << ((index % 4) * 8);
314 writel(rst, eint->base + ctrl_offset);
315 }
316}
317
318static void mtk_eint_irq_handler(struct irq_desc *desc)
319{
320 struct irq_chip *chip = irq_desc_get_chip(desc);
321 struct mtk_eint *eint = irq_desc_get_handler_data(desc);
322 unsigned int status, eint_num;
323 int offset, index, virq;
324 void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
325 int dual_edge, start_level, curr_level;
326
327 chained_irq_enter(chip, desc);
328 for (eint_num = 0; eint_num < eint->hw->ap_num; eint_num += 32,
329 reg += 4) {
330 status = readl(reg);
331 while (status) {
332 offset = __ffs(status);
333 index = eint_num + offset;
334 virq = irq_find_mapping(eint->domain, index);
335 status &= ~BIT(offset);
336
337 dual_edge = eint->dual_edge[index];
338 if (dual_edge) {
339 /*
340 * Clear soft-irq in case we raised it last
341 * time.
342 */
343 writel(BIT(offset), reg - eint->regs->stat +
344 eint->regs->soft_clr);
345
346 start_level =
347 eint->gpio_xlate->get_gpio_state(eint->pctl,
348 index);
349 }
350
351 generic_handle_irq(virq);
352
353 if (dual_edge) {
354 curr_level = mtk_eint_flip_edge(eint, index);
355
356 /*
357 * If level changed, we might lost one edge
358 * interrupt, raised it through soft-irq.
359 */
360 if (start_level != curr_level)
361 writel(BIT(offset), reg -
362 eint->regs->stat +
363 eint->regs->soft_set);
364 }
365
366 if (index < eint->hw->db_cnt)
367 mtk_eint_debounce_process(eint, index);
368 }
369 }
370 chained_irq_exit(chip, desc);
371}
372
373int mtk_eint_do_suspend(struct mtk_eint *eint)
374{
375 mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask);
376 mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
377
378 return 0;
379}
380
381int mtk_eint_do_resume(struct mtk_eint *eint)
382{
383 mtk_eint_chip_write_mask(eint, eint->base, eint->cur_mask);
384
385 return 0;
386}
387
388int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
389 unsigned int debounce)
390{
391 int virq, eint_offset;
392 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
393 dbnc;
394 static const unsigned int debounce_time[] = {500, 1000, 16000, 32000,
395 64000, 128000, 256000};
396 struct irq_data *d;
397
398 virq = irq_find_mapping(eint->domain, eint_num);
399 eint_offset = (eint_num % 4) * 8;
400 d = irq_get_irq_data(virq);
401
402 set_offset = (eint_num / 4) * 4 + eint->regs->dbnc_set;
403 clr_offset = (eint_num / 4) * 4 + eint->regs->dbnc_clr;
404
405 if (!mtk_eint_can_en_debounce(eint, eint_num))
406 return -EINVAL;
407
408 dbnc = ARRAY_SIZE(debounce_time);
409 for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
410 if (debounce <= debounce_time[i]) {
411 dbnc = i;
412 break;
413 }
414 }
415
416 if (!mtk_eint_get_mask(eint, eint_num)) {
417 mtk_eint_mask(d);
418 unmask = 1;
419 } else {
420 unmask = 0;
421 }
422
423 clr_bit = 0xff << eint_offset;
424 writel(clr_bit, eint->base + clr_offset);
425
426 bit = ((dbnc << MTK_EINT_DBNC_SET_DBNC_BITS) | MTK_EINT_DBNC_SET_EN) <<
427 eint_offset;
428 rst = MTK_EINT_DBNC_RST_BIT << eint_offset;
429 writel(rst | bit, eint->base + set_offset);
430
431 /*
432 * Delay a while (more than 2T) to wait for hw debounce counter reset
433 * work correctly.
434 */
435 udelay(1);
436 if (unmask == 1)
437 mtk_eint_unmask(d);
438
439 return 0;
440}
441
442int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
443{
444 int irq;
445
446 irq = irq_find_mapping(eint->domain, eint_n);
447 if (!irq)
448 return -EINVAL;
449
450 return irq;
451}
452
453static struct mtk_eint *g_eint;
454void mt_eint_log_resume_irq(void)
455{
456 unsigned int status, eint_num;
457 unsigned int offset;
458 const struct mtk_eint_regs *eint_offsets = g_eint->regs;
459 void __iomem *reg_base = mtk_eint_get_offset(g_eint, 0
460 , eint_offsets->stat);
461 unsigned int triggered_eint;
462
463 for (eint_num = 0; eint_num < g_eint->hw->ap_num;
464 reg_base += 4, eint_num += 32) {
465 /* read status register every 32 interrupts */
466 status = readl(reg_base);
467 if (!status)
468 continue;
469
470 while (status) {
471 offset = __ffs(status);
472 triggered_eint = eint_num + offset;
473 pr_info("EINT %d is pending\n", triggered_eint);
474 log_wakeup_reason(irq_find_mapping(g_eint->domain,
475 triggered_eint));
476 status &= ~BIT(offset);
477 }
478 }
479}
480
481
482static struct syscore_ops mtk_eint_syscore_ops = {
483 .resume = mt_eint_log_resume_irq,
484};
485
486int mtk_eint_do_init(struct mtk_eint *eint)
487{
488 int i;
489
490 /* If clients don't assign a specific regs, let's use generic one */
491 if (!eint->regs)
492 eint->regs = &mtk_generic_eint_regs;
493
494 eint->wake_mask = devm_kcalloc(eint->dev, eint->hw->ports,
495 sizeof(*eint->wake_mask), GFP_KERNEL);
496 if (!eint->wake_mask)
497 return -ENOMEM;
498
499 eint->cur_mask = devm_kcalloc(eint->dev, eint->hw->ports,
500 sizeof(*eint->cur_mask), GFP_KERNEL);
501 if (!eint->cur_mask)
502 return -ENOMEM;
503
504 eint->dual_edge = devm_kcalloc(eint->dev, eint->hw->ap_num,
505 sizeof(int), GFP_KERNEL);
506 if (!eint->dual_edge)
507 return -ENOMEM;
508
509 eint->domain = irq_domain_add_linear(eint->dev->of_node,
510 eint->hw->ap_num,
511 &irq_domain_simple_ops, NULL);
512 if (!eint->domain)
513 return -ENOMEM;
514
515 mtk_eint_hw_init(eint);
516 for (i = 0; i < eint->hw->ap_num; i++) {
517 int virq = irq_create_mapping(eint->domain, i);
518
519 irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
520 handle_level_irq);
521 irq_set_chip_data(virq, eint);
522 }
523
524 irq_set_chained_handler_and_data(eint->irq, mtk_eint_irq_handler,
525 eint);
526 g_eint = eint;
527 register_syscore_ops(&mtk_eint_syscore_ops);
528
529 return 0;
530}