b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * xtensa mmu stuff |
| 4 | * |
| 5 | * Extracted from init.c |
| 6 | */ |
| 7 | #include <linux/memblock.h> |
| 8 | #include <linux/percpu.h> |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/string.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/cache.h> |
| 13 | |
| 14 | #include <asm/tlb.h> |
| 15 | #include <asm/tlbflush.h> |
| 16 | #include <asm/mmu_context.h> |
| 17 | #include <asm/page.h> |
| 18 | #include <asm/initialize_mmu.h> |
| 19 | #include <asm/io.h> |
| 20 | |
| 21 | #if defined(CONFIG_HIGHMEM) |
| 22 | static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) |
| 23 | { |
| 24 | pgd_t *pgd = pgd_offset_k(vaddr); |
| 25 | pmd_t *pmd = pmd_offset(pgd, vaddr); |
| 26 | pte_t *pte; |
| 27 | unsigned long i; |
| 28 | |
| 29 | n_pages = ALIGN(n_pages, PTRS_PER_PTE); |
| 30 | |
| 31 | pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n", |
| 32 | __func__, vaddr, n_pages); |
| 33 | |
| 34 | pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); |
| 35 | if (!pte) |
| 36 | panic("%s: Failed to allocate %lu bytes align=%lx\n", |
| 37 | __func__, n_pages * sizeof(pte_t), PAGE_SIZE); |
| 38 | |
| 39 | for (i = 0; i < n_pages; ++i) |
| 40 | pte_clear(NULL, 0, pte + i); |
| 41 | |
| 42 | for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) { |
| 43 | pte_t *cur_pte = pte + i; |
| 44 | |
| 45 | BUG_ON(!pmd_none(*pmd)); |
| 46 | set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK)); |
| 47 | BUG_ON(cur_pte != pte_offset_kernel(pmd, 0)); |
| 48 | pr_debug("%s: pmd: 0x%p, pte: 0x%p\n", |
| 49 | __func__, pmd, cur_pte); |
| 50 | } |
| 51 | return pte; |
| 52 | } |
| 53 | |
| 54 | static void __init fixedrange_init(void) |
| 55 | { |
| 56 | init_pmd(__fix_to_virt(0), __end_of_fixed_addresses); |
| 57 | } |
| 58 | #endif |
| 59 | |
| 60 | void __init paging_init(void) |
| 61 | { |
| 62 | #ifdef CONFIG_HIGHMEM |
| 63 | fixedrange_init(); |
| 64 | pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); |
| 65 | kmap_init(); |
| 66 | #endif |
| 67 | } |
| 68 | |
| 69 | /* |
| 70 | * Flush the mmu and reset associated register to default values. |
| 71 | */ |
| 72 | void init_mmu(void) |
| 73 | { |
| 74 | #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) |
| 75 | /* |
| 76 | * Writing zeros to the instruction and data TLBCFG special |
| 77 | * registers ensure that valid values exist in the register. |
| 78 | * |
| 79 | * For existing PGSZID<w> fields, zero selects the first element |
| 80 | * of the page-size array. For nonexistent PGSZID<w> fields, |
| 81 | * zero is the best value to write. Also, when changing PGSZID<w> |
| 82 | * fields, the corresponding TLB must be flushed. |
| 83 | */ |
| 84 | set_itlbcfg_register(0); |
| 85 | set_dtlbcfg_register(0); |
| 86 | #endif |
| 87 | init_kio(); |
| 88 | local_flush_tlb_all(); |
| 89 | |
| 90 | /* Set rasid register to a known value. */ |
| 91 | |
| 92 | set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); |
| 93 | |
| 94 | /* Set PTEVADDR special register to the start of the page |
| 95 | * table, which is in kernel mappable space (ie. not |
| 96 | * statically mapped). This register's value is undefined on |
| 97 | * reset. |
| 98 | */ |
| 99 | set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR); |
| 100 | } |
| 101 | |
| 102 | void init_kio(void) |
| 103 | { |
| 104 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF) |
| 105 | /* |
| 106 | * Update the IO area mapping in case xtensa_kio_paddr has changed |
| 107 | */ |
| 108 | write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), |
| 109 | XCHAL_KIO_CACHED_VADDR + 6); |
| 110 | write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), |
| 111 | XCHAL_KIO_CACHED_VADDR + 6); |
| 112 | write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), |
| 113 | XCHAL_KIO_BYPASS_VADDR + 6); |
| 114 | write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), |
| 115 | XCHAL_KIO_BYPASS_VADDR + 6); |
| 116 | #endif |
| 117 | } |