blob: c723bf83d1c83e4445cc57a9c7a053a1a093a97f [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2014 Google Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <arch/arm64/mmu.h>
25#include <assert.h>
26#include <debug.h>
27#include <err.h>
28#include <kernel/vm.h>
29#include <lib/heap.h>
30#include <stdlib.h>
31#include <string.h>
32#include <sys/types.h>
33#include <trace.h>
34
35#define LOCAL_TRACE 0
36
37STATIC_ASSERT(((long)KERNEL_BASE >> MMU_KERNEL_SIZE_SHIFT) == -1);
38STATIC_ASSERT(((long)KERNEL_ASPACE_BASE >> MMU_KERNEL_SIZE_SHIFT) == -1);
39STATIC_ASSERT(MMU_KERNEL_SIZE_SHIFT <= 48);
40STATIC_ASSERT(MMU_KERNEL_SIZE_SHIFT >= 25);
41
42/* the main translation table */
43pte_t arm64_kernel_translation_table[MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP] __ALIGNED(MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP * 8) __SECTION(".bss.prebss.translation_table");
44
45/* convert user level mmu flags to flags that go in L1 descriptors */
46static pte_t mmu_flags_to_pte_attr(uint flags)
47{
48 pte_t attr = MMU_PTE_ATTR_AF;
49
50 switch (flags & ARCH_MMU_FLAG_CACHE_MASK) {
51 case ARCH_MMU_FLAG_CACHED:
52 attr |= MMU_PTE_ATTR_NORMAL_MEMORY | MMU_PTE_ATTR_SH_INNER_SHAREABLE;
53 break;
54 case ARCH_MMU_FLAG_UNCACHED:
55 attr |= MMU_PTE_ATTR_STRONGLY_ORDERED;
56 break;
57 case ARCH_MMU_FLAG_UNCACHED_DEVICE:
58 attr |= MMU_PTE_ATTR_DEVICE;
59 break;
60 default:
61 /* invalid user-supplied flag */
62 DEBUG_ASSERT(1);
63 return ERR_INVALID_ARGS;
64 }
65
66 switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)) {
67 case 0:
68 attr |= MMU_PTE_ATTR_AP_P_RW_U_NA;
69 break;
70 case ARCH_MMU_FLAG_PERM_RO:
71 attr |= MMU_PTE_ATTR_AP_P_RO_U_NA;
72 break;
73 case ARCH_MMU_FLAG_PERM_USER:
74 attr |= MMU_PTE_ATTR_AP_P_RW_U_RW;
75 break;
76 case ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO:
77 attr |= MMU_PTE_ATTR_AP_P_RO_U_RO;
78 break;
79 }
80
81 if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE) {
82 attr |= MMU_PTE_ATTR_UXN | MMU_PTE_ATTR_PXN;
83 }
84
85 if (flags & ARCH_MMU_FLAG_NS) {
86 attr |= MMU_PTE_ATTR_NON_SECURE;
87 }
88
89 return attr;
90}
91
92status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
93{
94 uint index;
95 uint index_shift;
96 pte_t pte;
97 pte_t pte_addr;
98 uint descriptor_type;
99 pte_t *page_table;
100 vaddr_t kernel_base = ~0UL << MMU_KERNEL_SIZE_SHIFT;
101 vaddr_t vaddr_rem;
102
103 if (vaddr < kernel_base) {
104 TRACEF("vaddr 0x%lx < base 0x%lx\n", vaddr, kernel_base);
105 return ERR_INVALID_ARGS;
106 }
107
108 index_shift = MMU_KERNEL_TOP_SHIFT;
109 page_table = arm64_kernel_translation_table;
110
111 vaddr_rem = vaddr - kernel_base;
112 index = vaddr_rem >> index_shift;
113 ASSERT(index < MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP);
114
115 while (true) {
116 index = vaddr_rem >> index_shift;
117 vaddr_rem -= (vaddr_t)index << index_shift;
118 pte = page_table[index];
119 descriptor_type = pte & MMU_PTE_DESCRIPTOR_MASK;
120 pte_addr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
121
122 LTRACEF("va 0x%lx, index %d, index_shift %d, rem 0x%lx, pte 0x%llx\n",
123 vaddr, index, index_shift, vaddr_rem, pte);
124
125 if (descriptor_type == MMU_PTE_DESCRIPTOR_INVALID)
126 return ERR_NOT_FOUND;
127
128 if (descriptor_type == ((index_shift > MMU_KERNEL_PAGE_SIZE_SHIFT) ?
129 MMU_PTE_L012_DESCRIPTOR_BLOCK :
130 MMU_PTE_L3_DESCRIPTOR_PAGE)) {
131 break;
132 }
133
134 if (index_shift <= MMU_KERNEL_PAGE_SIZE_SHIFT ||
135 descriptor_type != MMU_PTE_L012_DESCRIPTOR_TABLE) {
136 PANIC_UNIMPLEMENTED;
137 }
138
139 page_table = paddr_to_kvaddr(pte_addr);
140 index_shift -= MMU_KERNEL_PAGE_SIZE_SHIFT - 3;
141 }
142
143 if (paddr)
144 *paddr = pte_addr + vaddr_rem;
145 if (flags) {
146 *flags = 0;
147 if (pte & MMU_PTE_ATTR_NON_SECURE)
148 *flags |= ARCH_MMU_FLAG_NS;
149 switch (pte & MMU_PTE_ATTR_ATTR_INDEX_MASK) {
150 case MMU_PTE_ATTR_STRONGLY_ORDERED:
151 *flags |= ARCH_MMU_FLAG_UNCACHED;
152 break;
153 case MMU_PTE_ATTR_DEVICE:
154 *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
155 break;
156 case MMU_PTE_ATTR_NORMAL_MEMORY:
157 break;
158 default:
159 PANIC_UNIMPLEMENTED;
160 }
161 switch (pte & MMU_PTE_ATTR_AP_MASK) {
162 case MMU_PTE_ATTR_AP_P_RW_U_NA:
163 break;
164 case MMU_PTE_ATTR_AP_P_RW_U_RW:
165 *flags |= ARCH_MMU_FLAG_PERM_USER;
166 break;
167 case MMU_PTE_ATTR_AP_P_RO_U_NA:
168 *flags |= ARCH_MMU_FLAG_PERM_RO;
169 break;
170 case MMU_PTE_ATTR_AP_P_RO_U_RO:
171 *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
172 break;
173 }
174 if ((pte & MMU_PTE_ATTR_UXN) && (pte & MMU_PTE_ATTR_PXN)) {
175 *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
176 }
177 }
178 LTRACEF("va 0x%lx, paddr 0x%lx, flags 0x%x\n",
179 vaddr, paddr ? *paddr : ~0UL, flags ? *flags : ~0U);
180 return 0;
181}
182
183static int alloc_page_table(paddr_t *paddrp, uint page_size_shift)
184{
185 size_t ret;
186 size_t count;
187 size_t size = 1U << page_size_shift;
188 void *vaddr;
189
190 if (size >= PAGE_SIZE) {
191 count = size / PAGE_SIZE;
192 ret = pmm_alloc_contiguous(count, page_size_shift, paddrp, NULL);
193 if (ret != count)
194 return ERR_NO_MEMORY;
195 } else {
196 vaddr = memalign(size, size);
197 if (!vaddr)
198 return ERR_NO_MEMORY;
199 ret = arch_mmu_query((vaddr_t)vaddr, paddrp, NULL);
200 if (ret) {
201 free(vaddr);
202 return ret;
203 }
204 }
205 return 0;
206}
207
208static void free_page_table(void *vaddr, paddr_t paddr, uint page_size_shift)
209{
210 vm_page_t *address_to_page(paddr_t addr); /* TODO: remove */
211
212 size_t size = 1U << page_size_shift;
213 vm_page_t *page;
214
215 if (size >= PAGE_SIZE) {
216 page = address_to_page(paddr);
217 if (!page)
218 panic("bad page table paddr 0x%lx\n", paddr);
219 pmm_free_page(page);
220 } else {
221 free(vaddr);
222 }
223}
224
225static pte_t *arm64_mmu_get_page_table(vaddr_t index, uint page_size_shift, pte_t *page_table)
226{
227 pte_t pte;
228 paddr_t paddr;
229 void *vaddr;
230 int ret;
231
232 pte = page_table[index];
233 switch (pte & MMU_PTE_DESCRIPTOR_MASK) {
234 case MMU_PTE_DESCRIPTOR_INVALID:
235 ret = alloc_page_table(&paddr, page_size_shift);
236 if (ret) {
237 TRACEF("failed to allocate page table\n");
238 return NULL;
239 }
240 vaddr = paddr_to_kvaddr(paddr);
241 LTRACEF("allocated page table, vaddr %p, paddr 0x%lx\n", vaddr, paddr);
242 memset(vaddr, MMU_PTE_DESCRIPTOR_INVALID, 1U << page_size_shift);
243 __asm__ volatile("dmb ishst" ::: "memory");
244 pte = paddr | MMU_PTE_L012_DESCRIPTOR_TABLE;
245 page_table[index] = pte;
246 LTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
247 return vaddr;
248
249 case MMU_PTE_L012_DESCRIPTOR_TABLE:
250 paddr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
251 LTRACEF("found page table 0x%lx\n", paddr);
252 return paddr_to_kvaddr(paddr);
253
254 case MMU_PTE_L012_DESCRIPTOR_BLOCK:
255 return NULL;
256
257 default:
258 PANIC_UNIMPLEMENTED;
259 }
260}
261
262static bool page_table_is_clear(pte_t *page_table, uint page_size_shift)
263{
264 int i;
265 int count = 1U << (page_size_shift - 3);
266 pte_t pte;
267
268 for (i = 0; i < count; i++) {
269 pte = page_table[i];
270 if (pte != MMU_PTE_DESCRIPTOR_INVALID) {
271 LTRACEF("page_table at %p still in use, index %d is 0x%llx\n",
272 page_table, i, pte);
273 return false;
274 }
275 }
276
277 LTRACEF("page table at %p is clear\n", page_table);
278 return true;
279}
280
281static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel,
282 size_t size,
283 uint index_shift, uint page_size_shift,
284 pte_t *page_table, uint asid)
285{
286 pte_t *next_page_table;
287 vaddr_t index;
288 size_t chunk_size;
289 vaddr_t vaddr_rem;
290 vaddr_t block_size;
291 vaddr_t block_mask;
292 pte_t pte;
293 paddr_t page_table_paddr;
294
295 LTRACEF("vaddr 0x%lx, vaddr_rel 0x%lx, size 0x%lx, index shift %d, page_size_shift %d, page_table %p\n",
296 vaddr, vaddr_rel, size, index_shift, page_size_shift, page_table);
297
298 while (size) {
299 block_size = 1UL << index_shift;
300 block_mask = block_size - 1;
301 vaddr_rem = vaddr_rel & block_mask;
302 chunk_size = MIN(size, block_size - vaddr_rem);
303 index = vaddr_rel >> index_shift;
304
305 pte = page_table[index];
306
307 if (index_shift > page_size_shift &&
308 (pte & MMU_PTE_DESCRIPTOR_MASK) == MMU_PTE_L012_DESCRIPTOR_TABLE) {
309 page_table_paddr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
310 next_page_table = paddr_to_kvaddr(page_table_paddr);
311 arm64_mmu_unmap_pt(vaddr, vaddr_rem, chunk_size,
312 index_shift - (page_size_shift - 3),
313 page_size_shift,
314 next_page_table, asid);
315 if (chunk_size == block_size ||
316 page_table_is_clear(next_page_table, page_size_shift)) {
317 LTRACEF("pte %p[0x%lx] = 0 (was page table)\n", page_table, index);
318 page_table[index] = MMU_PTE_DESCRIPTOR_INVALID;
319 __asm__ volatile("dmb ishst" ::: "memory");
320 free_page_table(next_page_table, page_table_paddr, page_size_shift);
321 }
322 } else if (pte) {
323 LTRACEF("pte %p[0x%lx] = 0\n", page_table, index);
324 page_table[index] = MMU_PTE_DESCRIPTOR_INVALID;
325 CF;
326 if (asid == MMU_ARM64_GLOBAL_ASID)
327 ARM64_TLBI(vaae1is, vaddr >> 12);
328 else
329 ARM64_TLBI(vae1is, vaddr >> 12 | (vaddr_t)asid << 48);
330 } else {
331 LTRACEF("pte %p[0x%lx] already clear\n", page_table, index);
332 }
333 vaddr += chunk_size;
334 vaddr_rel += chunk_size;
335 size -= chunk_size;
336 }
337}
338
339static int arm64_mmu_map_pt(vaddr_t vaddr_in, vaddr_t vaddr_rel_in,
340 paddr_t paddr_in,
341 size_t size_in, pte_t attrs,
342 uint index_shift, uint page_size_shift,
343 pte_t *page_table, uint asid)
344{
345 int ret;
346 pte_t *next_page_table;
347 vaddr_t index;
348 vaddr_t vaddr = vaddr_in;
349 vaddr_t vaddr_rel = vaddr_rel_in;
350 paddr_t paddr = paddr_in;
351 size_t size = size_in;
352 size_t chunk_size;
353 vaddr_t vaddr_rem;
354 vaddr_t block_size;
355 vaddr_t block_mask;
356 pte_t pte;
357
358 LTRACEF("vaddr 0x%lx, vaddr_rel 0x%lx, paddr 0x%lx, size 0x%lx, attrs 0x%llx, index shift %d, page_size_shift %d, page_table %p\n",
359 vaddr, vaddr_rel, paddr, size, attrs,
360 index_shift, page_size_shift, page_table);
361
362 if ((vaddr_rel | paddr | size) & ((1UL << page_size_shift) - 1)) {
363 TRACEF("not page aligned\n");
364 return ERR_INVALID_ARGS;
365 }
366
367 while (size) {
368 block_size = 1UL << index_shift;
369 block_mask = block_size - 1;
370 vaddr_rem = vaddr_rel & block_mask;
371 chunk_size = MIN(size, block_size - vaddr_rem);
372 index = vaddr_rel >> index_shift;
373
374 if (((vaddr_rel | paddr) & block_mask) ||
375 (chunk_size != block_size) ||
376 (index_shift > MMU_PTE_DESCRIPTOR_BLOCK_MAX_SHIFT)) {
377 next_page_table = arm64_mmu_get_page_table(index, page_size_shift,
378 page_table);
379 if (!next_page_table)
380 goto err;
381
382 ret = arm64_mmu_map_pt(vaddr, vaddr_rem, paddr, chunk_size, attrs,
383 index_shift - (page_size_shift - 3),
384 page_size_shift, next_page_table, asid);
385 if (ret)
386 goto err;
387 } else {
388 pte = page_table[index];
389 if (pte) {
390 TRACEF("page table entry already in use, index 0x%lx, 0x%llx\n",
391 index, pte);
392 goto err;
393 }
394
395 pte = paddr | attrs;
396 if (index_shift > page_size_shift)
397 pte |= MMU_PTE_L012_DESCRIPTOR_BLOCK;
398 else
399 pte |= MMU_PTE_L3_DESCRIPTOR_PAGE;
400
401 LTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
402 page_table[index] = pte;
403 }
404 vaddr += chunk_size;
405 vaddr_rel += chunk_size;
406 paddr += chunk_size;
407 size -= chunk_size;
408 }
409
410 return 0;
411
412err:
413 arm64_mmu_unmap_pt(vaddr_in, vaddr_rel_in, size_in - size,
414 index_shift, page_size_shift, page_table, asid);
415 DSB;
416 return ERR_GENERIC;
417}
418
419int arm64_mmu_map(vaddr_t vaddr, paddr_t paddr, size_t size, pte_t attrs,
420 vaddr_t vaddr_base, uint top_size_shift,
421 uint top_index_shift, uint page_size_shift,
422 pte_t *top_page_table, uint asid)
423{
424 int ret;
425 vaddr_t vaddr_rel = vaddr - vaddr_base;
426 vaddr_t vaddr_rel_max = 1UL << top_size_shift;
427
428 LTRACEF("vaddr 0x%lx, paddr 0x%lx, size 0x%lx, attrs 0x%llx, asid 0x%x\n",
429 vaddr, paddr, size, attrs, asid);
430
431 if (vaddr_rel > vaddr_rel_max - size || size > vaddr_rel_max) {
432 TRACEF("vaddr 0x%lx, size 0x%lx out of range vaddr 0x%lx, size 0x%lx\n",
433 vaddr, size, vaddr_base, vaddr_rel_max);
434 return ERR_INVALID_ARGS;
435 }
436
437 if (!top_page_table) {
438 TRACEF("page table is NULL\n");
439 return ERR_INVALID_ARGS;
440 }
441
442 ret = arm64_mmu_map_pt(vaddr, vaddr_rel, paddr, size, attrs,
443 top_index_shift, page_size_shift, top_page_table, asid);
444 DSB;
445 return ret;
446}
447
448int arm64_mmu_unmap(vaddr_t vaddr, size_t size,
449 vaddr_t vaddr_base, uint top_size_shift,
450 uint top_index_shift, uint page_size_shift,
451 pte_t *top_page_table, uint asid)
452{
453 vaddr_t vaddr_rel = vaddr - vaddr_base;
454 vaddr_t vaddr_rel_max = 1UL << top_size_shift;
455
456 LTRACEF("vaddr 0x%lx, size 0x%lx, asid 0x%x\n", vaddr, size, asid);
457
458 if (vaddr_rel > vaddr_rel_max - size || size > vaddr_rel_max) {
459 TRACEF("vaddr 0x%lx, size 0x%lx out of range vaddr 0x%lx, size 0x%lx\n",
460 vaddr, size, vaddr_base, vaddr_rel_max);
461 return ERR_INVALID_ARGS;
462 }
463
464 if (!top_page_table) {
465 TRACEF("page table is NULL\n");
466 return ERR_INVALID_ARGS;
467 }
468
469 arm64_mmu_unmap_pt(vaddr, vaddr_rel, size,
470 top_index_shift, page_size_shift, top_page_table, asid);
471 DSB;
472 return 0;
473}
474
475int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
476{
477 return arm64_mmu_map(vaddr, paddr, count * PAGE_SIZE,
478 mmu_flags_to_pte_attr(flags),
479 ~0UL << MMU_KERNEL_SIZE_SHIFT, MMU_KERNEL_SIZE_SHIFT,
480 MMU_KERNEL_TOP_SHIFT, MMU_KERNEL_PAGE_SIZE_SHIFT,
481 arm64_kernel_translation_table, MMU_ARM64_GLOBAL_ASID);
482}
483
484int arch_mmu_unmap(vaddr_t vaddr, uint count)
485{
486 return arm64_mmu_unmap(vaddr, count * PAGE_SIZE,
487 ~0UL << MMU_KERNEL_SIZE_SHIFT, MMU_KERNEL_SIZE_SHIFT,
488 MMU_KERNEL_TOP_SHIFT, MMU_KERNEL_PAGE_SIZE_SHIFT,
489 arm64_kernel_translation_table,
490 MMU_ARM64_GLOBAL_ASID);
491}