blob: da51186c9d1c7e2f6fd2e0d059efbbc2b3bd4a94 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2015-2016 MediaTek Inc.
3 * Author: Yong Wu <yong.wu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/bootmem.h>
15#include <linux/bug.h>
16#include <linux/clk.h>
17#include <linux/component.h>
18#include <linux/device.h>
19#include <linux/dma-iommu.h>
20#include <linux/err.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/iommu.h>
24#include <linux/iopoll.h>
25#include <linux/list.h>
26#include <linux/of_address.h>
27#include <linux/of_iommu.h>
28#include <linux/of_irq.h>
29#include <linux/of_platform.h>
30#include <linux/platform_device.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <asm/barrier.h>
34#include <soc/mediatek/smi.h>
35
36#include "mtk_iommu.h"
37
38#define REG_MMU_PT_BASE_ADDR 0x000
39#define MMU_PT_ADDR_MASK GENMASK(31, 7)
40
41#define REG_MMU_INVALIDATE 0x020
42#define F_ALL_INVLD 0x2
43#define F_MMU_INV_RANGE 0x1
44
45#define REG_MMU_INVLD_START_A 0x024
46#define REG_MMU_INVLD_END_A 0x028
47
48#define REG_MMU_INV_SEL 0x038
49#define F_INVLD_EN0 BIT(0)
50#define F_INVLD_EN1 BIT(1)
51
52#define REG_MMU_STANDARD_AXI_MODE 0x048
53#define REG_MMU_DCM_DIS 0x050
54
55#define REG_MMU_CTRL_REG 0x110
56#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
57#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \
58 ((data)->plat_data->m4u_plat == M4U_MT8173 ? 5 : 4)
59/* It's named by F_MMU_TF_PROT_SEL in mt2712. */
60#define F_MMU_TF_PROTECT_SEL(prot, data) \
61 (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
62
63#define REG_MMU_IVRP_PADDR 0x114
64
65#define REG_MMU_VLD_PA_RNG 0x118
66#define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
67
68#define REG_MMU_INT_CONTROL0 0x120
69#define F_L2_MULIT_HIT_EN BIT(0)
70#define F_TABLE_WALK_FAULT_INT_EN BIT(1)
71#define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
72#define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
73#define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
74#define F_MISS_FIFO_ERR_INT_EN BIT(6)
75#define F_INT_CLR_BIT BIT(12)
76
77#define REG_MMU_INT_MAIN_CONTROL 0x124 /* mmu0 | mmu1 */
78#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
79#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
80#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
81#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
82#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
83#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
84#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
85
86#define REG_MMU_CPE_DONE 0x12C
87
88#define REG_MMU_FAULT_ST1 0x134
89#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
90#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
91
92#define REG_MMU0_FAULT_VA 0x13c
93#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
94#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
95
96#define REG_MMU0_INVLD_PA 0x140
97#define REG_MMU1_FAULT_VA 0x144
98#define REG_MMU1_INVLD_PA 0x148
99#define REG_MMU0_INT_ID 0x150
100#define REG_MMU1_INT_ID 0x154
101#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
102#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
103
104#define MTK_PROTECT_PA_ALIGN 128
105
106/*
107 * Get the local arbiter ID and the portid within the larb arbiter
108 * from mtk_m4u_id which is defined by MTK_M4U_ID.
109 */
110#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
111#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
112
113/* VPU range reserve iova */
114#define IOVPU_RANGE_START 0x7DA00000
115#define IOVPU_RANGE_LEN 0x4C00000
116
117struct mtk_iommu_domain {
118 spinlock_t pgtlock; /* lock for page table */
119
120 struct io_pgtable_cfg cfg;
121 struct io_pgtable_ops *iop;
122
123 struct iommu_domain domain;
124};
125
126static struct iommu_ops mtk_iommu_ops;
127
128/*
129 * In M4U 4GB mode, the physical address is remapped as below:
130 *
131 * CPU Physical address:
132 * ====================
133 *
134 * 0 1G 2G 3G 4G 5G
135 * |---A---|---B---|---C---|---D---|---E---|
136 * +--I/O--+------------Memory-------------+
137 *
138 * IOMMU output physical address:
139 * =============================
140 *
141 * 4G 5G 6G 7G 8G
142 * |---E---|---B---|---C---|---D---|
143 * +------------Memory-------------+
144 *
145 * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
146 * bit32 of the CPU physical address always is needed to set, and for Region
147 * 'E', the CPU physical address keep as is.
148 * Additionally, The iommu consumers always use the CPU phyiscal address.
149 */
150#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x40000000
151
152static LIST_HEAD(m4ulist); /* List all the M4U HWs */
153
154#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
155
156/*
157 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
158 * for the performance.
159 *
160 * Here always return the mtk_iommu_data of the first probed M4U where the
161 * iommu domain information is recorded.
162 */
163static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
164{
165 struct mtk_iommu_data *data;
166
167 for_each_m4u(data)
168 return data;
169
170 return NULL;
171}
172
173static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
174{
175 return container_of(dom, struct mtk_iommu_domain, domain);
176}
177
178static void mtk_iommu_tlb_flush_all(void *cookie)
179{
180 struct mtk_iommu_data *data = cookie;
181
182 for_each_m4u(data) {
183 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
184 data->base + REG_MMU_INV_SEL);
185 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
186 wmb(); /* Make sure the tlb flush all done */
187 }
188}
189
190static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
191 size_t granule, bool leaf,
192 void *cookie)
193{
194 struct mtk_iommu_data *data = cookie;
195
196 for_each_m4u(data) {
197 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
198 data->base + REG_MMU_INV_SEL);
199
200 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
201 writel_relaxed(iova + size - 1,
202 data->base + REG_MMU_INVLD_END_A);
203 writel_relaxed(F_MMU_INV_RANGE,
204 data->base + REG_MMU_INVALIDATE);
205 data->tlb_flush_active = true;
206 }
207}
208
209static void mtk_iommu_tlb_sync(void *cookie)
210{
211 struct mtk_iommu_data *data = cookie;
212 int ret;
213 u32 tmp;
214
215 for_each_m4u(data) {
216 /* Avoid timing out if there's nothing to wait for */
217 if (!data->tlb_flush_active)
218 return;
219
220 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
221 tmp, tmp != 0, 10, 100000);
222 if (ret) {
223 dev_warn(data->dev,
224 "Partial TLB flush timed out, falling back to full flush\n");
225 mtk_iommu_tlb_flush_all(cookie);
226 }
227 /* Clear the CPE status */
228 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
229 data->tlb_flush_active = false;
230 }
231}
232
233static const struct iommu_gather_ops mtk_iommu_gather_ops = {
234 .tlb_flush_all = mtk_iommu_tlb_flush_all,
235 .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
236 .tlb_sync = mtk_iommu_tlb_sync,
237};
238
239static unsigned int mtk_iommu_get_larbid(const unsigned int *larbid_in_common,
240 const unsigned int fault_larb)
241{
242 int i;
243
244 for (i = 0; i < MTK_LARB_NR_MAX; i++)
245 if (larbid_in_common[i] == fault_larb)
246 return i;
247 return MTK_LARB_NR_MAX; /* Invalid larb id. */
248}
249
250static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
251{
252 struct mtk_iommu_data *data = dev_id;
253 struct mtk_iommu_domain *dom = data->m4u_dom;
254 u32 int_state, regval, fault_iova, fault_pa;
255 unsigned int fault_larb, fault_port;
256 bool layer, write;
257
258 /* Read error info from registers */
259 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
260 if (int_state & F_REG_MMU0_FAULT_MASK) {
261 regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
262 fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
263 fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
264 } else {
265 regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
266 fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
267 fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
268 }
269 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
270 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
271 fault_larb = F_MMU_INT_ID_LARB_ID(regval);
272 fault_port = F_MMU_INT_ID_PORT_ID(regval);
273
274 if (data->plat_data->larbid_remap_enable)
275 fault_larb = mtk_iommu_get_larbid(
276 data->plat_data->larbid_in_common,
277 fault_larb);
278 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
279 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
280 dev_err_ratelimited(
281 data->dev,
282 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
283 int_state, fault_iova, fault_pa, fault_larb, fault_port,
284 layer, write ? "write" : "read");
285 }
286
287 /* Interrupt clear */
288 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
289 regval |= F_INT_CLR_BIT;
290 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
291
292 mtk_iommu_tlb_flush_all(data);
293
294 return IRQ_HANDLED;
295}
296
297static void mtk_iommu_config(struct mtk_iommu_data *data,
298 struct device *dev, bool enable)
299{
300 struct mtk_smi_larb_iommu *larb_mmu;
301 unsigned int larbid, portid;
302 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
303 int i;
304
305 for (i = 0; i < fwspec->num_ids; ++i) {
306 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
307 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
308 larb_mmu = &data->smi_imu.larb_imu[larbid];
309
310 dev_dbg(dev, "%s iommu port: %d\n",
311 enable ? "enable" : "disable", portid);
312
313 if (enable)
314 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
315 else
316 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
317 }
318}
319
320static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
321{
322 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
323
324 spin_lock_init(&dom->pgtlock);
325
326 dom->cfg = (struct io_pgtable_cfg) {
327 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
328 IO_PGTABLE_QUIRK_NO_PERMS |
329 IO_PGTABLE_QUIRK_TLBI_ON_MAP,
330 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
331 .ias = 32,
332 .oas = 32,
333 .tlb = &mtk_iommu_gather_ops,
334 .iommu_dev = data->dev,
335 };
336
337 if (data->enable_4GB)
338 dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
339
340 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
341 if (!dom->iop) {
342 dev_err(data->dev, "Failed to alloc io pgtable\n");
343 return -EINVAL;
344 }
345
346 /* Update our support page sizes bitmap */
347 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
348 return 0;
349}
350
351static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
352{
353 struct mtk_iommu_domain *dom;
354
355 if (type != IOMMU_DOMAIN_DMA)
356 return NULL;
357
358 dom = kzalloc(sizeof(*dom), GFP_KERNEL);
359 if (!dom)
360 return NULL;
361
362 if (iommu_get_dma_cookie(&dom->domain))
363 goto free_dom;
364
365 if (mtk_iommu_domain_finalise(dom))
366 goto put_dma_cookie;
367
368 dom->domain.geometry.aperture_start = 0;
369 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
370 dom->domain.geometry.force_aperture = true;
371
372 return &dom->domain;
373
374put_dma_cookie:
375 iommu_put_dma_cookie(&dom->domain);
376free_dom:
377 kfree(dom);
378 return NULL;
379}
380
381static void mtk_iommu_domain_free(struct iommu_domain *domain)
382{
383 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
384
385 free_io_pgtable_ops(dom->iop);
386 iommu_put_dma_cookie(domain);
387 kfree(to_mtk_domain(domain));
388}
389
390static int mtk_iommu_attach_device(struct iommu_domain *domain,
391 struct device *dev)
392{
393 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
394 struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
395
396 if (!data)
397 return -ENODEV;
398
399 /* Update the pgtable base address register of the M4U HW */
400 if (!data->m4u_dom) {
401 data->m4u_dom = dom;
402 writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
403 data->base + REG_MMU_PT_BASE_ADDR);
404 }
405
406 mtk_iommu_config(data, dev, true);
407 return 0;
408}
409
410static void mtk_iommu_detach_device(struct iommu_domain *domain,
411 struct device *dev)
412{
413 struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
414
415 if (!data)
416 return;
417
418 mtk_iommu_config(data, dev, false);
419}
420
421static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
422 phys_addr_t paddr, size_t size, int prot)
423{
424 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
425 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
426 unsigned long flags;
427 int ret;
428
429 if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) &&
430 data->plat_data->has_4gb_mode &&
431 data->enable_4GB)
432 paddr |= BIT_ULL(32);
433
434 spin_lock_irqsave(&dom->pgtlock, flags);
435 ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
436 spin_unlock_irqrestore(&dom->pgtlock, flags);
437
438 return ret;
439}
440
441static size_t mtk_iommu_unmap(struct iommu_domain *domain,
442 unsigned long iova, size_t size)
443{
444 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
445 unsigned long flags;
446 size_t unmapsz;
447
448 spin_lock_irqsave(&dom->pgtlock, flags);
449 unmapsz = dom->iop->unmap(dom->iop, iova, size);
450 spin_unlock_irqrestore(&dom->pgtlock, flags);
451
452 return unmapsz;
453}
454
455static void mtk_iommu_iotlb_sync(struct iommu_domain *domain)
456{
457 mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
458}
459
460static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
461 dma_addr_t iova)
462{
463 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
464 unsigned long flags;
465 phys_addr_t pa;
466
467 spin_lock_irqsave(&dom->pgtlock, flags);
468 pa = dom->iop->iova_to_phys(dom->iop, iova);
469 spin_unlock_irqrestore(&dom->pgtlock, flags);
470
471 if (data->enable_4GB && pa < MTK_IOMMU_4GB_MODE_REMAP_BASE)
472 pa |= BIT_ULL(32);
473
474 return pa;
475}
476
477static int mtk_iommu_add_device(struct device *dev)
478{
479 struct mtk_iommu_data *data;
480 struct iommu_group *group;
481
482 if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
483 return -ENODEV; /* Not a iommu client device */
484
485 data = dev->iommu_fwspec->iommu_priv;
486 iommu_device_link(&data->iommu, dev);
487
488 group = iommu_group_get_for_dev(dev);
489 if (IS_ERR(group))
490 return PTR_ERR(group);
491
492 iommu_group_put(group);
493 return 0;
494}
495
496static void mtk_iommu_remove_device(struct device *dev)
497{
498 struct mtk_iommu_data *data;
499
500 if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
501 return;
502
503 data = dev->iommu_fwspec->iommu_priv;
504 iommu_device_unlink(&data->iommu, dev);
505
506 iommu_group_remove_device(dev);
507 iommu_fwspec_free(dev);
508}
509
510static struct iommu_group *mtk_iommu_device_group(struct device *dev)
511{
512 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
513
514 if (!data)
515 return ERR_PTR(-ENODEV);
516
517 /* All the client devices are in the same m4u iommu-group */
518 if (!data->m4u_group) {
519 data->m4u_group = iommu_group_alloc();
520 if (IS_ERR(data->m4u_group))
521 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
522 } else {
523 iommu_group_ref_get(data->m4u_group);
524 }
525 return data->m4u_group;
526}
527
528static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
529{
530 struct platform_device *m4updev;
531
532 if (args->args_count != 1) {
533 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
534 args->args_count);
535 return -EINVAL;
536 }
537
538 if (!dev->iommu_fwspec->iommu_priv) {
539 /* Get the m4u device */
540 m4updev = of_find_device_by_node(args->np);
541 if (WARN_ON(!m4updev))
542 return -EINVAL;
543
544 dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
545 }
546
547 return iommu_fwspec_add_ids(dev, args->args, 1);
548}
549
550static void mtk_iommu_get_resv_regions(struct device *dev,
551 struct list_head *head)
552{
553 struct iommu_resv_region *region;
554 int prot = IOMMU_WRITE | IOMMU_READ;
555
556 region = iommu_alloc_resv_region(IOVPU_RANGE_START, IOVPU_RANGE_LEN,
557 prot, IOMMU_RESV_RESERVED);
558 if (!region)
559 return;
560
561 list_add_tail(&region->list, head);
562}
563
564static void mtk_iommu_put_resv_regions(struct device *dev,
565 struct list_head *head)
566{
567 struct iommu_resv_region *entry, *next;
568
569 list_for_each_entry_safe(entry, next, head, list)
570 kfree(entry);
571}
572
573static struct iommu_ops mtk_iommu_ops = {
574 .domain_alloc = mtk_iommu_domain_alloc,
575 .domain_free = mtk_iommu_domain_free,
576 .attach_dev = mtk_iommu_attach_device,
577 .detach_dev = mtk_iommu_detach_device,
578 .map = mtk_iommu_map,
579 .unmap = mtk_iommu_unmap,
580 .map_sg = default_iommu_map_sg,
581 .flush_iotlb_all = mtk_iommu_iotlb_sync,
582 .iotlb_sync = mtk_iommu_iotlb_sync,
583 .iova_to_phys = mtk_iommu_iova_to_phys,
584 .add_device = mtk_iommu_add_device,
585 .remove_device = mtk_iommu_remove_device,
586 .device_group = mtk_iommu_device_group,
587 .of_xlate = mtk_iommu_of_xlate,
588 .get_resv_regions = mtk_iommu_get_resv_regions,
589 .put_resv_regions = mtk_iommu_put_resv_regions,
590 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
591};
592
593static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
594{
595 enum mtk_iommu_plat m4u_plat = data->plat_data->m4u_plat;
596 u32 regval;
597 int ret;
598
599 ret = clk_prepare_enable(data->bclk);
600 if (ret) {
601 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
602 return ret;
603 }
604
605 regval = F_MMU_TF_PROTECT_SEL(2, data);
606 if (m4u_plat == M4U_MT8173)
607 regval |= F_MMU_PREFETCH_RT_REPLACE_MOD;
608 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
609
610 regval = F_L2_MULIT_HIT_EN |
611 F_TABLE_WALK_FAULT_INT_EN |
612 F_PREETCH_FIFO_OVERFLOW_INT_EN |
613 F_MISS_FIFO_OVERFLOW_INT_EN |
614 F_PREFETCH_FIFO_ERR_INT_EN |
615 F_MISS_FIFO_ERR_INT_EN;
616 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
617
618 regval = F_INT_TRANSLATION_FAULT |
619 F_INT_MAIN_MULTI_HIT_FAULT |
620 F_INT_INVALID_PA_FAULT |
621 F_INT_ENTRY_REPLACEMENT_FAULT |
622 F_INT_TLB_MISS_FAULT |
623 F_INT_MISS_TRANSACTION_FIFO_FAULT |
624 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
625 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
626
627 if (m4u_plat == M4U_MT8173)
628 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
629 else
630 regval = lower_32_bits(data->protect_base) |
631 upper_32_bits(data->protect_base);
632 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
633
634 if (data->enable_4GB && m4u_plat == M4U_MT2712) {
635 /*
636 * If 4GB mode is enabled, the validate PA range is from
637 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
638 */
639 regval = F_MMU_VLD_PA_RNG(7, 4);
640 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
641 }
642 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
643
644 /*
645 * It's MISC control register whose default value is ok
646 * except mt8173 and mt8183.
647 */
648 if (m4u_plat == M4U_MT8173 || m4u_plat == M4U_MT8183)
649 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
650
651 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
652 dev_name(data->dev), (void *)data)) {
653 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
654 clk_disable_unprepare(data->bclk);
655 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
656 return -ENODEV;
657 }
658
659 return 0;
660}
661
662static const struct component_master_ops mtk_iommu_com_ops = {
663 .bind = mtk_iommu_bind,
664 .unbind = mtk_iommu_unbind,
665};
666
667static int mtk_iommu_probe(struct platform_device *pdev)
668{
669 struct mtk_iommu_data *data;
670 struct device *dev = &pdev->dev;
671 struct resource *res;
672 resource_size_t ioaddr;
673 struct component_match *match = NULL;
674 void *protect;
675 int i, larb_nr, ret;
676
677 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
678 if (!data)
679 return -ENOMEM;
680 data->dev = dev;
681 data->plat_data = of_device_get_match_data(dev);
682
683 /* Protect memory. HW will access here while translation fault.*/
684 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
685 if (!protect)
686 return -ENOMEM;
687 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
688
689 /* Whether the current dram is over 4GB */
690 data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
691
692 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
693 data->base = devm_ioremap_resource(dev, res);
694 if (IS_ERR(data->base))
695 return PTR_ERR(data->base);
696 ioaddr = res->start;
697
698 data->irq = platform_get_irq(pdev, 0);
699 if (data->irq < 0)
700 return data->irq;
701
702 data->bclk = devm_clk_get(dev, "bclk");
703 if (PTR_ERR(data->bclk) == -ENOENT)
704 data->bclk = NULL;
705 else if (IS_ERR(data->bclk))
706 return PTR_ERR(data->bclk);
707
708 larb_nr = of_count_phandle_with_args(dev->of_node,
709 "mediatek,larbs", NULL);
710 if (larb_nr < 0)
711 return larb_nr;
712 data->smi_imu.larb_nr = larb_nr;
713
714 for (i = 0; i < larb_nr; i++) {
715 struct device_node *larbnode;
716 struct platform_device *plarbdev;
717 u32 id;
718
719 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
720 if (!larbnode)
721 return -EINVAL;
722
723 if (!of_device_is_available(larbnode))
724 continue;
725
726 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
727 if (ret)/* The id is consecutive if there is no this property */
728 id = i;
729
730 plarbdev = of_find_device_by_node(larbnode);
731 if (!plarbdev)
732 return -EPROBE_DEFER;
733 data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
734
735 component_match_add_release(dev, &match, release_of,
736 compare_of, larbnode);
737 }
738
739 platform_set_drvdata(pdev, data);
740
741 ret = mtk_iommu_hw_init(data);
742 if (ret)
743 return ret;
744
745 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
746 "mtk-iommu.%pa", &ioaddr);
747 if (ret)
748 return ret;
749
750 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
751 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
752
753 ret = iommu_device_register(&data->iommu);
754 if (ret)
755 return ret;
756
757 list_add_tail(&data->list, &m4ulist);
758
759 if (!iommu_present(&platform_bus_type))
760 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
761
762 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
763}
764
765static int mtk_iommu_remove(struct platform_device *pdev)
766{
767 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
768
769 iommu_device_sysfs_remove(&data->iommu);
770 iommu_device_unregister(&data->iommu);
771
772 if (iommu_present(&platform_bus_type))
773 bus_set_iommu(&platform_bus_type, NULL);
774
775 clk_disable_unprepare(data->bclk);
776 devm_free_irq(&pdev->dev, data->irq, data);
777 component_master_del(&pdev->dev, &mtk_iommu_com_ops);
778 return 0;
779}
780
781static void mtk_iommu_shutdown(struct platform_device *pdev)
782{
783 mtk_iommu_remove(pdev);
784}
785
786static int __maybe_unused mtk_iommu_suspend(struct device *dev)
787{
788 struct mtk_iommu_data *data = dev_get_drvdata(dev);
789 struct mtk_iommu_suspend_reg *reg = &data->reg;
790 void __iomem *base = data->base;
791
792 reg->standard_axi_mode = readl_relaxed(base +
793 REG_MMU_STANDARD_AXI_MODE);
794 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
795 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
796 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
797 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
798 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
799 reg->vld_pa_range = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
800 clk_disable_unprepare(data->bclk);
801 return 0;
802}
803
804static int __maybe_unused mtk_iommu_resume(struct device *dev)
805{
806 struct mtk_iommu_data *data = dev_get_drvdata(dev);
807 struct mtk_iommu_suspend_reg *reg = &data->reg;
808 struct mtk_iommu_domain *dom = data->m4u_dom;
809 void __iomem *base = data->base;
810 int ret;
811
812 ret = clk_prepare_enable(data->bclk);
813 if (ret) {
814 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
815 return ret;
816 }
817 writel_relaxed(reg->standard_axi_mode,
818 base + REG_MMU_STANDARD_AXI_MODE);
819 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
820 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
821 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
822 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
823 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
824 writel_relaxed(reg->vld_pa_range, base + REG_MMU_VLD_PA_RNG);
825 if (dom)
826 writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
827 base + REG_MMU_PT_BASE_ADDR);
828 return 0;
829}
830
831static const struct dev_pm_ops mtk_iommu_pm_ops = {
832 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
833};
834
835static const struct mtk_iommu_plat_data mt2712_data = {
836 .m4u_plat = M4U_MT2712,
837 .has_4gb_mode = true,
838};
839
840static const struct mtk_iommu_plat_data mt8173_data = {
841 .m4u_plat = M4U_MT8173,
842 .has_4gb_mode = true,
843};
844
845static const struct mtk_iommu_plat_data mt8183_data = {
846 .m4u_plat = M4U_MT8183,
847 .larbid_remap_enable = true,
848 .larbid_in_common = {0, 7, 5, 6, 1, 2, 3, 4},
849};
850
851static const struct of_device_id mtk_iommu_of_ids[] = {
852 { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
853 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
854 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
855 {}
856};
857
858static struct platform_driver mtk_iommu_driver = {
859 .probe = mtk_iommu_probe,
860 .remove = mtk_iommu_remove,
861 .shutdown = mtk_iommu_shutdown,
862 .driver = {
863 .name = "mtk-iommu",
864 .of_match_table = of_match_ptr(mtk_iommu_of_ids),
865 .pm = &mtk_iommu_pm_ops,
866 }
867};
868
869static int __init mtk_iommu_init(void)
870{
871 int ret;
872
873 ret = platform_driver_register(&mtk_iommu_driver);
874 if (ret != 0)
875 pr_err("Failed to register MTK IOMMU driver\n");
876
877 return ret;
878}
879
880subsys_initcall(mtk_iommu_init)