blob: f55f86687df89bb278faa3bb9a5c138a375fdab1 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2015 Stefan Kristiansson
3 * Based on arch/arm/arm/mmu.c
4 * Copyright (c) 2008-2014 Travis Geiselbrecht
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files
8 * (the "Software"), to deal in the Software without restriction,
9 * including without limitation the rights to use, copy, modify, merge,
10 * publish, distribute, sublicense, and/or sell copies of the Software,
11 * and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include <trace.h>
26#include <debug.h>
27#include <err.h>
28#include <string.h>
29#include <arch/mmu.h>
30#include <arch/or1k.h>
31#include <arch/or1k/mmu.h>
32#include <kernel/vm.h>
33
34#define LOCAL_TRACE 0
35
36#if WITH_KERNEL_VM
37
38uint32_t or1k_kernel_translation_table[256] __ALIGNED(8192) __SECTION(".bss.prebss.translation_table");
39
40/* Pessimistic tlb invalidation, which rather invalidate too much.
41 * TODO: make it more precise. */
42void or1k_invalidate_tlb(vaddr_t vaddr, uint count)
43{
44 uint32_t dmmucfgr = mfspr(OR1K_SPR_SYS_DMMUCFGR_ADDR);
45 uint32_t immucfgr = mfspr(OR1K_SPR_SYS_IMMUCFGR_ADDR);
46 uint32_t num_dtlb_ways = OR1K_SPR_SYS_DMMUCFGR_NTW_GET(dmmucfgr) + 1;
47 uint32_t num_dtlb_sets = 1 << OR1K_SPR_SYS_DMMUCFGR_NTS_GET(dmmucfgr);
48 uint32_t num_itlb_ways = OR1K_SPR_SYS_IMMUCFGR_NTW_GET(immucfgr) + 1;
49 uint32_t num_itlb_sets = 1 << OR1K_SPR_SYS_IMMUCFGR_NTS_GET(immucfgr);
50 uint32_t offs;
51
52 for (; count; count--) {
53 offs = (vaddr >> PAGE_SIZE_SHIFT) & (num_dtlb_sets-1);
54 switch (num_dtlb_ways) {
55 case 4:
56 mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(3, offs), 0);
57 case 3:
58 mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(2, offs), 0);
59 case 2:
60 mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(1, offs), 0);
61 case 1:
62 mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(0, offs), 0);
63 }
64
65 offs = (vaddr >> PAGE_SIZE_SHIFT) & (num_itlb_sets-1);
66 switch (num_itlb_ways) {
67 case 4:
68 mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(3, offs), 0);
69 case 3:
70 mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(2, offs), 0);
71 case 2:
72 mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(1, offs), 0);
73 case 1:
74 mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(0, offs), 0);
75 }
76 vaddr += PAGE_SIZE;
77 }
78}
79
80status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
81{
82 uint index = vaddr / SECTION_SIZE;
83 uint32_t pte = or1k_kernel_translation_table[index];
84 uint32_t vmask = SECTION_SIZE-1;
85
86 if (!(pte & OR1K_MMU_PG_PRESENT))
87 return ERR_NOT_FOUND;
88
89 /* not a l1 entry */
90 if (!(pte & OR1K_MMU_PG_L)) {
91 uint32_t *l2_table = paddr_to_kvaddr(pte & ~OR1K_MMU_PG_FLAGS_MASK);
92 index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
93 pte = l2_table[index];
94 vmask = PAGE_SIZE-1;
95 }
96
97 if (paddr)
98 *paddr = (pte & ~OR1K_MMU_PG_FLAGS_MASK) | (vaddr & vmask);
99
100 if (flags) {
101 *flags = 0;
102 if (pte & OR1K_MMU_PG_U)
103 *flags |= ARCH_MMU_FLAG_PERM_USER;
104 if (!(pte & OR1K_MMU_PG_X))
105 *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
106 if (!(pte & OR1K_MMU_PG_W))
107 *flags |= ARCH_MMU_FLAG_PERM_RO;
108 if (pte & OR1K_MMU_PG_CI)
109 *flags |= ARCH_MMU_FLAG_UNCACHED;
110 }
111
112 return NO_ERROR;
113}
114
115int arch_mmu_unmap(vaddr_t vaddr, uint count)
116{
117 LTRACEF("vaddr = 0x%x, count = %d\n", vaddr, count);
118
119 if (!IS_PAGE_ALIGNED(vaddr))
120 return ERR_INVALID_ARGS;
121
122 uint unmapped = 0;
123 while (count) {
124 uint index = vaddr / SECTION_SIZE;
125 uint32_t pte = or1k_kernel_translation_table[index];
126 if (!(pte & OR1K_MMU_PG_PRESENT)) {
127 vaddr += PAGE_SIZE;
128 count--;
129 continue;
130 }
131 /* Unmapping of l2 tables is not implemented (yet) */
132 if (!(pte & OR1K_MMU_PG_L) || !IS_ALIGNED(vaddr, SECTION_SIZE) || count < SECTION_SIZE / PAGE_SIZE)
133 PANIC_UNIMPLEMENTED;
134
135 or1k_kernel_translation_table[index] = 0;
136 or1k_invalidate_tlb(vaddr, SECTION_SIZE / PAGE_SIZE);
137 vaddr += SECTION_SIZE;
138 count -= SECTION_SIZE / PAGE_SIZE;
139 unmapped += SECTION_SIZE / PAGE_SIZE;
140 }
141
142 return unmapped;
143}
144
145int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
146{
147 uint l1_index;
148 uint32_t pte;
149 uint32_t arch_flags = 0;
150
151 LTRACEF("vaddr = 0x%x, paddr = 0x%x, count = %d, flags = 0x%x\n", vaddr, paddr, count, flags);
152
153 if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr))
154 return ERR_INVALID_ARGS;
155
156 if (flags & ARCH_MMU_FLAG_PERM_USER)
157 arch_flags |= OR1K_MMU_PG_U;
158 if (!(flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE))
159 arch_flags |= OR1K_MMU_PG_X;
160 if (flags & ARCH_MMU_FLAG_CACHE_MASK)
161 arch_flags |= OR1K_MMU_PG_CI;
162 if (!(flags & ARCH_MMU_FLAG_PERM_RO))
163 arch_flags |= OR1K_MMU_PG_W;
164
165 uint mapped = 0;
166 while (count) {
167 l1_index = vaddr / SECTION_SIZE;
168 if (IS_ALIGNED(vaddr, SECTION_SIZE) && IS_ALIGNED(paddr, SECTION_SIZE) && count >= SECTION_SIZE / PAGE_SIZE) {
169 or1k_kernel_translation_table[l1_index] = (paddr & ~(SECTION_SIZE-1)) | arch_flags | OR1K_MMU_PG_PRESENT | OR1K_MMU_PG_L;
170 count -= SECTION_SIZE / PAGE_SIZE;
171 mapped += SECTION_SIZE / PAGE_SIZE;
172 vaddr += SECTION_SIZE;
173 paddr += SECTION_SIZE;
174 continue;
175 }
176
177 uint32_t *l2_table;
178
179 pte = or1k_kernel_translation_table[l1_index];
180
181 /* FIXME: l1 already mapped as a section */
182 if (pte & OR1K_MMU_PG_PRESENT && pte & OR1K_MMU_PG_L)
183 PANIC_UNIMPLEMENTED;
184
185 if (pte & OR1K_MMU_PG_PRESENT) {
186 l2_table = paddr_to_kvaddr(pte & ~OR1K_MMU_PG_FLAGS_MASK);
187 LTRACEF("l2_table at %p\n", l2_table);
188 } else {
189 l2_table = pmm_alloc_kpage();
190 if (!l2_table) {
191 TRACEF("failed to allocate pagetable\n");
192 return mapped;
193 }
194
195 memset(l2_table, 0, PAGE_SIZE);
196 paddr_t l2_pa = kvaddr_to_paddr(l2_table);
197 LTRACEF("allocated pagetable at %p, pa 0x%lx\n", l2_table, l2_pa);
198 or1k_kernel_translation_table[l1_index] = l2_pa | arch_flags | OR1K_MMU_PG_PRESENT;
199 }
200
201 uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
202
203 LTRACEF("l2_index = 0x%x, vaddr = 0x%x, paddr = 0x%x\n", l2_index, vaddr, paddr);
204 l2_table[l2_index] = paddr | arch_flags | OR1K_MMU_PG_PRESENT | OR1K_MMU_PG_L;
205
206 count--;
207 mapped++;
208 vaddr += PAGE_SIZE;
209 paddr += PAGE_SIZE;
210 }
211
212 return mapped;
213}
214
215#endif /* WITH_KERNEL_VM */