yuezonghe | 824eb0c | 2024-06-27 02:32:26 -0700 | [diff] [blame^] | 1 | #include <linux/kernel.h> |
| 2 | |
| 3 | #include <asm/cputype.h> |
| 4 | #include <asm/idmap.h> |
| 5 | #include <asm/pgalloc.h> |
| 6 | #include <asm/pgtable.h> |
| 7 | #include <asm/sections.h> |
| 8 | #include <asm/system_info.h> |
| 9 | |
| 10 | pgd_t *idmap_pgd; |
| 11 | |
| 12 | #ifdef CONFIG_ARM_LPAE |
| 13 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, |
| 14 | unsigned long prot) |
| 15 | { |
| 16 | pmd_t *pmd; |
| 17 | unsigned long next; |
| 18 | |
| 19 | if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) { |
| 20 | pmd = pmd_alloc_one(&init_mm, addr); |
| 21 | if (!pmd) { |
| 22 | pr_warning("Failed to allocate identity pmd.\n"); |
| 23 | return; |
| 24 | } |
| 25 | /* |
| 26 | * Copy the original PMD to ensure that the PMD entries for |
| 27 | * the kernel image are preserved. |
| 28 | */ |
| 29 | if (!pud_none(*pud)) |
| 30 | memcpy(pmd, pmd_offset(pud, 0), |
| 31 | PTRS_PER_PMD * sizeof(pmd_t)); |
| 32 | pud_populate(&init_mm, pud, pmd); |
| 33 | pmd += pmd_index(addr); |
| 34 | } else |
| 35 | pmd = pmd_offset(pud, addr); |
| 36 | |
| 37 | do { |
| 38 | next = pmd_addr_end(addr, end); |
| 39 | *pmd = __pmd((addr & PMD_MASK) | prot); |
| 40 | flush_pmd_entry(pmd); |
| 41 | } while (pmd++, addr = next, addr != end); |
| 42 | } |
| 43 | #else /* !CONFIG_ARM_LPAE */ |
| 44 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, |
| 45 | unsigned long prot) |
| 46 | { |
| 47 | pmd_t *pmd = pmd_offset(pud, addr); |
| 48 | |
| 49 | addr = (addr & PMD_MASK) | prot; |
| 50 | pmd[0] = __pmd(addr); |
| 51 | addr += SECTION_SIZE; |
| 52 | pmd[1] = __pmd(addr); |
| 53 | flush_pmd_entry(pmd); |
| 54 | } |
| 55 | #endif /* CONFIG_ARM_LPAE */ |
| 56 | |
| 57 | static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, |
| 58 | unsigned long prot) |
| 59 | { |
| 60 | pud_t *pud = pud_offset(pgd, addr); |
| 61 | unsigned long next; |
| 62 | |
| 63 | do { |
| 64 | next = pud_addr_end(addr, end); |
| 65 | idmap_add_pmd(pud, addr, next, prot); |
| 66 | } while (pud++, addr = next, addr != end); |
| 67 | } |
| 68 | |
| 69 | static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) |
| 70 | { |
| 71 | unsigned long prot, next; |
| 72 | |
| 73 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; |
| 74 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) |
| 75 | prot |= PMD_BIT4; |
| 76 | |
| 77 | pgd += pgd_index(addr); |
| 78 | do { |
| 79 | next = pgd_addr_end(addr, end); |
| 80 | idmap_add_pud(pgd, addr, next, prot); |
| 81 | } while (pgd++, addr = next, addr != end); |
| 82 | } |
| 83 | |
| 84 | extern char __idmap_text_start[], __idmap_text_end[]; |
| 85 | |
| 86 | static int __init init_static_idmap(void) |
| 87 | { |
| 88 | phys_addr_t idmap_start, idmap_end; |
| 89 | |
| 90 | idmap_pgd = pgd_alloc(&init_mm); |
| 91 | if (!idmap_pgd) |
| 92 | return -ENOMEM; |
| 93 | |
| 94 | /* Add an identity mapping for the physical address of the section. */ |
| 95 | idmap_start = virt_to_phys((void *)__idmap_text_start); |
| 96 | idmap_end = virt_to_phys((void *)__idmap_text_end); |
| 97 | |
| 98 | pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", |
| 99 | (long long)idmap_start, (long long)idmap_end); |
| 100 | identity_mapping_add(idmap_pgd, idmap_start, idmap_end); |
| 101 | |
| 102 | return 0; |
| 103 | } |
| 104 | early_initcall(init_static_idmap); |
| 105 | |
| 106 | /* |
| 107 | * In order to soft-boot, we need to switch to a 1:1 mapping for the |
| 108 | * cpu_reset functions. This will then ensure that we have predictable |
| 109 | * results when turning off the mmu. |
| 110 | */ |
| 111 | void setup_mm_for_reboot(void) |
| 112 | { |
| 113 | /* Clean and invalidate L1. */ |
| 114 | flush_cache_all(); |
| 115 | |
| 116 | /* Switch to the identity mapping. */ |
| 117 | cpu_switch_mm(idmap_pgd, &init_mm); |
| 118 | |
| 119 | /* Flush the TLB. */ |
| 120 | local_flush_tlb_all(); |
| 121 | } |