b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | |
| 3 | #include <linux/io.h> |
| 4 | #include <linux/slab.h> |
| 5 | #include <linux/vmalloc.h> |
| 6 | |
| 7 | #include <mm/mmu_decl.h> |
| 8 | |
| 9 | void __iomem *ioremap_wt(phys_addr_t addr, unsigned long size) |
| 10 | { |
| 11 | pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL); |
| 12 | |
| 13 | return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); |
| 14 | } |
| 15 | EXPORT_SYMBOL(ioremap_wt); |
| 16 | |
| 17 | void __iomem * |
| 18 | __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) |
| 19 | { |
| 20 | unsigned long v; |
| 21 | phys_addr_t p, offset; |
| 22 | int err; |
| 23 | |
| 24 | /* |
| 25 | * Choose an address to map it to. |
| 26 | * Once the vmalloc system is running, we use it. |
| 27 | * Before then, we use space going down from IOREMAP_TOP |
| 28 | * (ioremap_bot records where we're up to). |
| 29 | */ |
| 30 | p = addr & PAGE_MASK; |
| 31 | offset = addr & ~PAGE_MASK; |
| 32 | size = PAGE_ALIGN(addr + size) - p; |
| 33 | |
| 34 | /* |
| 35 | * If the address lies within the first 16 MB, assume it's in ISA |
| 36 | * memory space |
| 37 | */ |
| 38 | if (p < 16 * 1024 * 1024) |
| 39 | p += _ISA_MEM_BASE; |
| 40 | |
| 41 | #ifndef CONFIG_CRASH_DUMP |
| 42 | /* |
| 43 | * Don't allow anybody to remap normal RAM that we're using. |
| 44 | * mem_init() sets high_memory so only do the check after that. |
| 45 | */ |
| 46 | if (slab_is_available() && p <= virt_to_phys(high_memory - 1) && |
| 47 | page_is_ram(__phys_to_pfn(p))) { |
| 48 | pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__, |
| 49 | (unsigned long long)p, __builtin_return_address(0)); |
| 50 | return NULL; |
| 51 | } |
| 52 | #endif |
| 53 | |
| 54 | if (size == 0) |
| 55 | return NULL; |
| 56 | |
| 57 | /* |
| 58 | * Is it already mapped? Perhaps overlapped by a previous |
| 59 | * mapping. |
| 60 | */ |
| 61 | v = p_block_mapped(p); |
| 62 | if (v) |
| 63 | return (void __iomem *)v + offset; |
| 64 | |
| 65 | if (slab_is_available()) |
| 66 | return do_ioremap(p, offset, size, prot, caller); |
| 67 | |
| 68 | /* |
| 69 | * Should check if it is a candidate for a BAT mapping |
| 70 | */ |
| 71 | |
| 72 | err = early_ioremap_range(ioremap_bot - size, p, size, prot); |
| 73 | if (err) |
| 74 | return NULL; |
| 75 | ioremap_bot -= size; |
| 76 | |
| 77 | return (void __iomem *)ioremap_bot + offset; |
| 78 | } |
| 79 | |
| 80 | void iounmap(volatile void __iomem *addr) |
| 81 | { |
| 82 | /* |
| 83 | * If mapped by BATs then there is nothing to do. |
| 84 | * Calling vfree() generates a benign warning. |
| 85 | */ |
| 86 | if (v_block_mapped((unsigned long)addr)) |
| 87 | return; |
| 88 | |
| 89 | if (addr > high_memory && (unsigned long)addr < ioremap_bot) |
| 90 | vunmap((void *)(PAGE_MASK & (unsigned long)addr)); |
| 91 | } |
| 92 | EXPORT_SYMBOL(iounmap); |