blob: 23aafc52768c7d823b02b02a4c1d2b208e071629 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2017 NXP
4 * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de>
5 */
6
7#include <linux/clk.h>
8#include <linux/interrupt.h>
9#include <linux/irq.h>
10#include <linux/irqchip/chained_irq.h>
11#include <linux/irqdomain.h>
12#include <linux/kernel.h>
13#include <linux/of_irq.h>
14#include <linux/of_platform.h>
15#include <linux/pm_runtime.h>
16#include <linux/spinlock.h>
17
18#define CTRL_STRIDE_OFF(_t, _r) (_t * 4 * _r)
19#define CHANCTRL 0x0
20#define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
21#define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
22#define CHANSTATUS(n, t) (CTRL_STRIDE_OFF(t, 2) + 0x4 * (n) + 0x4)
23#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
24#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
25
26#define CHAN_MAX_OUTPUT_INT 0x8
27
28struct irqsteer_data {
29 void __iomem *regs;
30 struct clk *ipg_clk;
31 int irq[CHAN_MAX_OUTPUT_INT];
32 int irq_count;
33 raw_spinlock_t lock;
34 int reg_num;
35 int channel;
36 struct irq_domain *domain;
37 u32 *saved_reg;
38 struct device *dev;
39};
40
41static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
42 unsigned long irqnum)
43{
44 return (data->reg_num - irqnum / 32 - 1);
45}
46
47static void imx_irqsteer_irq_unmask(struct irq_data *d)
48{
49 struct irqsteer_data *data = d->chip_data;
50 int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
51 unsigned long flags;
52 u32 val;
53
54 raw_spin_lock_irqsave(&data->lock, flags);
55 val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
56 val |= BIT(d->hwirq % 32);
57 writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
58 raw_spin_unlock_irqrestore(&data->lock, flags);
59}
60
61static void imx_irqsteer_irq_mask(struct irq_data *d)
62{
63 struct irqsteer_data *data = d->chip_data;
64 int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
65 unsigned long flags;
66 u32 val;
67
68 raw_spin_lock_irqsave(&data->lock, flags);
69 val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
70 val &= ~BIT(d->hwirq % 32);
71 writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
72 raw_spin_unlock_irqrestore(&data->lock, flags);
73}
74
75static void imx_irqsteer_irq_bus_lock(struct irq_data *d)
76{
77 struct irqsteer_data *data = d->chip_data;
78
79 pm_runtime_get_sync(data->dev);
80}
81
82static void imx_irqsteer_irq_bus_sync_unlock(struct irq_data *d)
83{
84 struct irqsteer_data *data = d->chip_data;
85
86 pm_runtime_put_autosuspend(data->dev);
87}
88
89static const struct irq_chip imx_irqsteer_irq_chip = {
90 .name = "irqsteer",
91 .irq_mask = imx_irqsteer_irq_mask,
92 .irq_unmask = imx_irqsteer_irq_unmask,
93 .irq_bus_lock = imx_irqsteer_irq_bus_lock,
94 .irq_bus_sync_unlock = imx_irqsteer_irq_bus_sync_unlock,
95};
96
97static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
98 irq_hw_number_t hwirq)
99{
100 irq_set_status_flags(irq, IRQ_LEVEL);
101 irq_set_chip_data(irq, h->host_data);
102 irq_set_chip_and_handler(irq, &imx_irqsteer_irq_chip, handle_level_irq);
103
104 return 0;
105}
106
107static const struct irq_domain_ops imx_irqsteer_domain_ops = {
108 .map = imx_irqsteer_irq_map,
109 .xlate = irq_domain_xlate_onecell,
110};
111
112static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq)
113{
114 int i;
115
116 for (i = 0; i < data->irq_count; i++) {
117 if (data->irq[i] == irq)
118 return i * 64;
119 }
120
121 return -EINVAL;
122}
123
124static void imx_irqsteer_irq_handler(struct irq_desc *desc)
125{
126 struct irqsteer_data *data = irq_desc_get_handler_data(desc);
127 int hwirq;
128 int irq, i;
129
130 chained_irq_enter(irq_desc_get_chip(desc), desc);
131
132 irq = irq_desc_get_irq(desc);
133 hwirq = imx_irqsteer_get_hwirq_base(data, irq);
134 if (hwirq < 0) {
135 pr_warn("%s: unable to get hwirq base for irq %d\n",
136 __func__, irq);
137 return;
138 }
139
140 for (i = 0; i < 2; i++, hwirq += 32) {
141 int idx = imx_irqsteer_get_reg_index(data, hwirq);
142 unsigned long irqmap;
143 int pos, virq;
144
145 if (hwirq >= data->reg_num * 32)
146 break;
147
148 irqmap = readl_relaxed(data->regs +
149 CHANSTATUS(idx, data->reg_num));
150
151 for_each_set_bit(pos, &irqmap, 32) {
152 virq = irq_find_mapping(data->domain, pos + hwirq);
153 if (virq)
154 generic_handle_irq(virq);
155 }
156 }
157
158 chained_irq_exit(irq_desc_get_chip(desc), desc);
159}
160
161static int imx_irqsteer_probe(struct platform_device *pdev)
162{
163 struct device_node *np = pdev->dev.of_node;
164 struct irqsteer_data *data;
165 u32 irqs_num;
166 int i, ret;
167
168 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
169 if (!data)
170 return -ENOMEM;
171
172 data->dev = &pdev->dev;
173 data->regs = devm_platform_ioremap_resource(pdev, 0);
174 if (IS_ERR(data->regs)) {
175 dev_err(&pdev->dev, "failed to initialize reg\n");
176 return PTR_ERR(data->regs);
177 }
178
179 data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
180 if (IS_ERR(data->ipg_clk)) {
181 ret = PTR_ERR(data->ipg_clk);
182 if (ret != -EPROBE_DEFER)
183 dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
184 return ret;
185 }
186
187 raw_spin_lock_init(&data->lock);
188
189 ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
190 if (ret)
191 return ret;
192 ret = of_property_read_u32(np, "fsl,channel", &data->channel);
193 if (ret)
194 return ret;
195
196 /*
197 * There is one output irq for each group of 64 inputs.
198 * One register bit map can represent 32 input interrupts.
199 */
200 data->irq_count = DIV_ROUND_UP(irqs_num, 64);
201 data->reg_num = irqs_num / 32;
202
203 if (IS_ENABLED(CONFIG_PM)) {
204 data->saved_reg = devm_kzalloc(&pdev->dev,
205 sizeof(u32) * data->reg_num,
206 GFP_KERNEL);
207 if (!data->saved_reg)
208 return -ENOMEM;
209 }
210
211 ret = clk_prepare_enable(data->ipg_clk);
212 if (ret) {
213 dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
214 return ret;
215 }
216
217 /* steer all IRQs into configured channel */
218 writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
219
220 data->domain = irq_domain_add_linear(np, data->reg_num * 32,
221 &imx_irqsteer_domain_ops, data);
222 if (!data->domain) {
223 dev_err(&pdev->dev, "failed to create IRQ domain\n");
224 ret = -ENOMEM;
225 goto out;
226 }
227 irq_domain_set_pm_device(data->domain, &pdev->dev);
228
229 if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) {
230 ret = -EINVAL;
231 goto out;
232 }
233
234 for (i = 0; i < data->irq_count; i++) {
235 data->irq[i] = irq_of_parse_and_map(np, i);
236 if (!data->irq[i]) {
237 ret = -EINVAL;
238 goto out;
239 }
240
241 irq_set_chained_handler_and_data(data->irq[i],
242 imx_irqsteer_irq_handler,
243 data);
244 }
245
246 platform_set_drvdata(pdev, data);
247
248 pm_runtime_set_active(&pdev->dev);
249 pm_runtime_enable(&pdev->dev);
250
251 return 0;
252out:
253 clk_disable_unprepare(data->ipg_clk);
254 return ret;
255}
256
257static int imx_irqsteer_remove(struct platform_device *pdev)
258{
259 struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
260 int i;
261
262 for (i = 0; i < irqsteer_data->irq_count; i++)
263 irq_set_chained_handler_and_data(irqsteer_data->irq[i],
264 NULL, NULL);
265
266 irq_domain_remove(irqsteer_data->domain);
267
268 clk_disable_unprepare(irqsteer_data->ipg_clk);
269
270 return 0;
271}
272
273#ifdef CONFIG_PM
274static void imx_irqsteer_save_regs(struct irqsteer_data *data)
275{
276 int i;
277
278 for (i = 0; i < data->reg_num; i++)
279 data->saved_reg[i] = readl_relaxed(data->regs +
280 CHANMASK(i, data->reg_num));
281}
282
283static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
284{
285 int i;
286
287 writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
288 for (i = 0; i < data->reg_num; i++)
289 writel_relaxed(data->saved_reg[i],
290 data->regs + CHANMASK(i, data->reg_num));
291}
292
293static int imx_irqsteer_suspend(struct device *dev)
294{
295 struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
296
297 imx_irqsteer_save_regs(irqsteer_data);
298 clk_disable_unprepare(irqsteer_data->ipg_clk);
299
300 return 0;
301}
302
303static int imx_irqsteer_resume(struct device *dev)
304{
305 struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
306 int ret;
307
308 ret = clk_prepare_enable(irqsteer_data->ipg_clk);
309 if (ret) {
310 dev_err(dev, "failed to enable ipg clk: %d\n", ret);
311 return ret;
312 }
313 imx_irqsteer_restore_regs(irqsteer_data);
314
315 return 0;
316}
317#endif
318
319static const struct dev_pm_ops imx_irqsteer_pm_ops = {
320 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
321 pm_runtime_force_resume)
322 SET_RUNTIME_PM_OPS(imx_irqsteer_suspend,
323 imx_irqsteer_resume, NULL)
324};
325
326static const struct of_device_id imx_irqsteer_dt_ids[] = {
327 { .compatible = "fsl,imx-irqsteer", },
328 {},
329};
330
331static struct platform_driver imx_irqsteer_driver = {
332 .driver = {
333 .name = "imx-irqsteer",
334 .of_match_table = imx_irqsteer_dt_ids,
335 .pm = &imx_irqsteer_pm_ops,
336 },
337 .probe = imx_irqsteer_probe,
338 .remove = imx_irqsteer_remove,
339};
340builtin_platform_driver(imx_irqsteer_driver);