blob: 42e721628b66285bfab701883053bd1c5e50d709 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mm/ioremap.c
4 *
5 * Re-map IO memory to kernel address space so that we can access it.
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 *
9 * Hacked for ARM by Phil Blundell <philb@gnu.org>
10 * Hacked to allow all architectures to build, and various cleanups
11 * by Russell King
12 *
13 * This allows a driver to remap an arbitrary region of bus memory into
14 * virtual space. One should *only* use readl, writel, memcpy_toio and
15 * so on with such remapped areas.
16 *
17 * Because the ARM only has a 32-bit address space we can't address the
18 * whole of the (physical) PCI space at once. PCI huge-mode addressing
19 * allows us to circumvent this restriction by splitting PCI space into
20 * two 2GB chunks and mapping only one at a time into processor memory.
21 * We use MMU protection domains to trap any attempt to access the bank
22 * that is not currently mapped. (This isn't fully implemented yet.)
23 */
24#include <linux/module.h>
25#include <linux/errno.h>
26#include <linux/mm.h>
27#include <linux/vmalloc.h>
28#include <linux/io.h>
29#include <linux/sizes.h>
30#include <linux/memblock.h>
31
32#include <asm/cp15.h>
33#include <asm/cputype.h>
34#include <asm/cacheflush.h>
35#include <asm/early_ioremap.h>
36#include <asm/mmu_context.h>
37#include <asm/pgalloc.h>
38#include <asm/tlbflush.h>
39#include <asm/system_info.h>
40
41#include <asm/mach/map.h>
42#include <asm/mach/pci.h>
43#include "mm.h"
44
45
46LIST_HEAD(static_vmlist);
47
48static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
49 size_t size, unsigned int mtype)
50{
51 struct static_vm *svm;
52 struct vm_struct *vm;
53
54 list_for_each_entry(svm, &static_vmlist, list) {
55 vm = &svm->vm;
56 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
57 continue;
58 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
59 continue;
60
61 if (vm->phys_addr > paddr ||
62 paddr + size - 1 > vm->phys_addr + vm->size - 1)
63 continue;
64
65 return svm;
66 }
67
68 return NULL;
69}
70
71struct static_vm *find_static_vm_vaddr(void *vaddr)
72{
73 struct static_vm *svm;
74 struct vm_struct *vm;
75
76 list_for_each_entry(svm, &static_vmlist, list) {
77 vm = &svm->vm;
78
79 /* static_vmlist is ascending order */
80 if (vm->addr > vaddr)
81 break;
82
83 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
84 return svm;
85 }
86
87 return NULL;
88}
89
90void __init add_static_vm_early(struct static_vm *svm)
91{
92 struct static_vm *curr_svm;
93 struct vm_struct *vm;
94 void *vaddr;
95
96 vm = &svm->vm;
97 vm_area_add_early(vm);
98 vaddr = vm->addr;
99
100 list_for_each_entry(curr_svm, &static_vmlist, list) {
101 vm = &curr_svm->vm;
102
103 if (vm->addr > vaddr)
104 break;
105 }
106 list_add_tail(&svm->list, &curr_svm->list);
107}
108
109int ioremap_page(unsigned long virt, unsigned long phys,
110 const struct mem_type *mtype)
111{
112 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
113 __pgprot(mtype->prot_pte));
114}
115EXPORT_SYMBOL(ioremap_page);
116
117void __check_vmalloc_seq(struct mm_struct *mm)
118{
119 unsigned int seq;
120
121 do {
122 seq = init_mm.context.vmalloc_seq;
123 memcpy(pgd_offset(mm, VMALLOC_START),
124 pgd_offset_k(VMALLOC_START),
125 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
126 pgd_index(VMALLOC_START)));
127 mm->context.vmalloc_seq = seq;
128 } while (seq != init_mm.context.vmalloc_seq);
129}
130
131#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
132/*
133 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
134 * the other CPUs will not see this change until their next context switch.
135 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
136 * which requires the new ioremap'd region to be referenced, the CPU will
137 * reference the _old_ region.
138 *
139 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
140 * mask the size back to 1MB aligned or we will overflow in the loop below.
141 */
142static void unmap_area_sections(unsigned long virt, unsigned long size)
143{
144 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
145 pgd_t *pgd;
146 pud_t *pud;
147 pmd_t *pmdp;
148
149 flush_cache_vunmap(addr, end);
150 pgd = pgd_offset_k(addr);
151 pud = pud_offset(pgd, addr);
152 pmdp = pmd_offset(pud, addr);
153 do {
154 pmd_t pmd = *pmdp;
155
156 if (!pmd_none(pmd)) {
157 /*
158 * Clear the PMD from the page table, and
159 * increment the vmalloc sequence so others
160 * notice this change.
161 *
162 * Note: this is still racy on SMP machines.
163 */
164 pmd_clear(pmdp);
165 init_mm.context.vmalloc_seq++;
166
167 /*
168 * Free the page table, if there was one.
169 */
170 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
171 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
172 }
173
174 addr += PMD_SIZE;
175 pmdp += 2;
176 } while (addr < end);
177
178 /*
179 * Ensure that the active_mm is up to date - we want to
180 * catch any use-after-iounmap cases.
181 */
182 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
183 __check_vmalloc_seq(current->active_mm);
184
185 flush_tlb_kernel_range(virt, end);
186}
187
188static int
189remap_area_sections(unsigned long virt, unsigned long pfn,
190 size_t size, const struct mem_type *type)
191{
192 unsigned long addr = virt, end = virt + size;
193 pgd_t *pgd;
194 pud_t *pud;
195 pmd_t *pmd;
196
197 /*
198 * Remove and free any PTE-based mapping, and
199 * sync the current kernel mapping.
200 */
201 unmap_area_sections(virt, size);
202
203 pgd = pgd_offset_k(addr);
204 pud = pud_offset(pgd, addr);
205 pmd = pmd_offset(pud, addr);
206 do {
207 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
208 pfn += SZ_1M >> PAGE_SHIFT;
209 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
210 pfn += SZ_1M >> PAGE_SHIFT;
211 flush_pmd_entry(pmd);
212
213 addr += PMD_SIZE;
214 pmd += 2;
215 } while (addr < end);
216
217 return 0;
218}
219
220static int
221remap_area_supersections(unsigned long virt, unsigned long pfn,
222 size_t size, const struct mem_type *type)
223{
224 unsigned long addr = virt, end = virt + size;
225 pgd_t *pgd;
226 pud_t *pud;
227 pmd_t *pmd;
228
229 /*
230 * Remove and free any PTE-based mapping, and
231 * sync the current kernel mapping.
232 */
233 unmap_area_sections(virt, size);
234
235 pgd = pgd_offset_k(virt);
236 pud = pud_offset(pgd, addr);
237 pmd = pmd_offset(pud, addr);
238 do {
239 unsigned long super_pmd_val, i;
240
241 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
242 PMD_SECT_SUPER;
243 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
244
245 for (i = 0; i < 8; i++) {
246 pmd[0] = __pmd(super_pmd_val);
247 pmd[1] = __pmd(super_pmd_val);
248 flush_pmd_entry(pmd);
249
250 addr += PMD_SIZE;
251 pmd += 2;
252 }
253
254 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
255 } while (addr < end);
256
257 return 0;
258}
259#endif
260
261static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
262 unsigned long offset, size_t size, unsigned int mtype, void *caller)
263{
264 const struct mem_type *type;
265 int err;
266 unsigned long addr;
267 struct vm_struct *area;
268 phys_addr_t paddr = __pfn_to_phys(pfn);
269
270#ifndef CONFIG_ARM_LPAE
271 /*
272 * High mappings must be supersection aligned
273 */
274 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
275 return NULL;
276#endif
277
278 type = get_mem_type(mtype);
279 if (!type)
280 return NULL;
281
282 /*
283 * Page align the mapping size, taking account of any offset.
284 */
285 size = PAGE_ALIGN(offset + size);
286
287 /*
288 * Try to reuse one of the static mapping whenever possible.
289 */
290 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
291 struct static_vm *svm;
292
293 svm = find_static_vm_paddr(paddr, size, mtype);
294 if (svm) {
295 addr = (unsigned long)svm->vm.addr;
296 addr += paddr - svm->vm.phys_addr;
297 return (void __iomem *) (offset + addr);
298 }
299 }
300
301 /*
302 * Don't allow RAM to be mapped with mismatched attributes - this
303 * causes problems with ARMv6+
304 */
305 if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
306 mtype != MT_MEMORY_RW))
307 return NULL;
308
309 area = get_vm_area_caller(size, VM_IOREMAP, caller);
310 if (!area)
311 return NULL;
312 addr = (unsigned long)area->addr;
313 area->phys_addr = paddr;
314
315#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
316 if (DOMAIN_IO == 0 &&
317 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
318 cpu_is_xsc3()) && pfn >= 0x100000 &&
319 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
320 area->flags |= VM_ARM_SECTION_MAPPING;
321 err = remap_area_supersections(addr, pfn, size, type);
322 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
323 area->flags |= VM_ARM_SECTION_MAPPING;
324 err = remap_area_sections(addr, pfn, size, type);
325 } else
326#endif
327 err = ioremap_page_range(addr, addr + size, paddr,
328 __pgprot(type->prot_pte));
329
330 if (err) {
331 vunmap((void *)addr);
332 return NULL;
333 }
334
335 flush_cache_vmap(addr, addr + size);
336 return (void __iomem *) (offset + addr);
337}
338
339void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
340 unsigned int mtype, void *caller)
341{
342 phys_addr_t last_addr;
343 unsigned long offset = phys_addr & ~PAGE_MASK;
344 unsigned long pfn = __phys_to_pfn(phys_addr);
345
346 /*
347 * Don't allow wraparound or zero size
348 */
349 last_addr = phys_addr + size - 1;
350 if (!size || last_addr < phys_addr)
351 return NULL;
352
353 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
354 caller);
355}
356
357/*
358 * Remap an arbitrary physical address space into the kernel virtual
359 * address space. Needed when the kernel wants to access high addresses
360 * directly.
361 *
362 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
363 * have to convert them into an offset in a page-aligned mapping, but the
364 * caller shouldn't need to know that small detail.
365 */
366void __iomem *
367__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
368 unsigned int mtype)
369{
370 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
371 __builtin_return_address(0));
372}
373EXPORT_SYMBOL(__arm_ioremap_pfn);
374
375void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
376 unsigned int, void *) =
377 __arm_ioremap_caller;
378
379void __iomem *ioremap(resource_size_t res_cookie, size_t size)
380{
381 return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
382 __builtin_return_address(0));
383}
384EXPORT_SYMBOL(ioremap);
385
386void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
387 __alias(ioremap_cached);
388
389void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
390{
391 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
392 __builtin_return_address(0));
393}
394EXPORT_SYMBOL(ioremap_cache);
395EXPORT_SYMBOL(ioremap_cached);
396
397void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
398{
399 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
400 __builtin_return_address(0));
401}
402EXPORT_SYMBOL(ioremap_wc);
403
404/*
405 * Remap an arbitrary physical address space into the kernel virtual
406 * address space as memory. Needed when the kernel wants to execute
407 * code in external memory. This is needed for reprogramming source
408 * clocks that would affect normal memory for example. Please see
409 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
410 */
411void __iomem *
412__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
413{
414 unsigned int mtype;
415
416 if (cached)
417 mtype = MT_MEMORY_RWX;
418 else
419 mtype = MT_MEMORY_RWX_NONCACHED;
420
421 return __arm_ioremap_caller(phys_addr, size, mtype,
422 __builtin_return_address(0));
423}
424
425void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
426{
427 return (__force void *)arch_ioremap_caller(phys_addr, size,
428 MT_MEMORY_RW,
429 __builtin_return_address(0));
430}
431
432void __iounmap(volatile void __iomem *io_addr)
433{
434 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
435 struct static_vm *svm;
436
437 /* If this is a static mapping, we must leave it alone */
438 svm = find_static_vm_vaddr(addr);
439 if (svm)
440 return;
441
442#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
443 {
444 struct vm_struct *vm;
445
446 vm = find_vm_area(addr);
447
448 /*
449 * If this is a section based mapping we need to handle it
450 * specially as the VM subsystem does not know how to handle
451 * such a beast.
452 */
453 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
454 unmap_area_sections((unsigned long)vm->addr, vm->size);
455 }
456#endif
457
458 vunmap(addr);
459}
460
461void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
462
463void iounmap(volatile void __iomem *cookie)
464{
465 arch_iounmap(cookie);
466}
467EXPORT_SYMBOL(iounmap);
468
469#ifdef CONFIG_PCI
470static int pci_ioremap_mem_type = MT_DEVICE;
471
472void pci_ioremap_set_mem_type(int mem_type)
473{
474 pci_ioremap_mem_type = mem_type;
475}
476
477int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
478{
479 BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
480
481 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
482 PCI_IO_VIRT_BASE + offset + SZ_64K,
483 phys_addr,
484 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
485}
486EXPORT_SYMBOL_GPL(pci_ioremap_io);
487
488void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
489{
490 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
491 __builtin_return_address(0));
492}
493EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
494#endif
495
496/*
497 * Must be called after early_fixmap_init
498 */
499void __init early_ioremap_init(void)
500{
501 early_ioremap_setup();
502}
503
504bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
505 unsigned long flags)
506{
507 unsigned long pfn = PHYS_PFN(offset);
508
509 return memblock_is_map_memory(pfn);
510}
511
512DEFINE_SPINLOCK(addr_map_lock);
513unsigned mv_cp_virtual_to_physical(unsigned va)
514{
515 unsigned pa;
516 unsigned old_bp = 0;
517 unsigned long flags;
518
519 spin_lock_irqsave(&addr_map_lock, flags);
520
521 __asm__ __volatile__ ( "mrc p15, 0x0, %0, c2, c0, 0\n" : "=r" (old_bp) : : "memory");
522 __asm__ __volatile__ ( "mcr p15, 0x0, %0, c2, c0, 0\n" : : "r" (__pa(swapper_pg_dir)) : "memory");
523
524 __asm__ __volatile__ ( "mcr p15, 0x0, %0, c7, c8, 0\nisb\n" : : "r" (va) : "memory");
525 __asm__ __volatile__ ( "mrc p15, 0x0, %0, c7, c4, 0\n" : "=r" (pa) : : "memory");
526
527 __asm__ __volatile__ ( "mcr p15, 0x0, %0, c2, c0, 0\n" : : "r" (old_bp) : "memory");
528
529 spin_unlock_irqrestore(&addr_map_lock, flags);
530
531 if ((pa & 1) == 0)
532 pa = (pa & ~0xfff) | (va & 0xfff);
533 else
534 pa = va; /* translation failed */
535
536 return pa;
537}
538EXPORT_SYMBOL_GPL(mv_cp_virtual_to_physical);