blob: 978d0f3892a19f02d594d28a55f069ca9ecdaf1f [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2014 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <trace.h>
24#include <assert.h>
25#include <err.h>
26#include <string.h>
27#include <lib/console.h>
28#include <kernel/vm.h>
29#include <kernel/mutex.h>
30#include "vm_priv.h"
31
32#define LOCAL_TRACE 0
33
34static struct list_node aspace_list = LIST_INITIAL_VALUE(aspace_list);
35static mutex_t vmm_lock = MUTEX_INITIAL_VALUE(vmm_lock);
36
37vmm_aspace_t _kernel_aspace;
38
39static void dump_aspace(const vmm_aspace_t *a);
40static void dump_region(const vmm_region_t *r);
41
42void vmm_init(void)
43{
44 /* initialize the kernel address space */
45 strlcpy(_kernel_aspace.name, "kernel", sizeof(_kernel_aspace.name));
46 _kernel_aspace.base = KERNEL_ASPACE_BASE,
47 _kernel_aspace.size = KERNEL_ASPACE_SIZE,
48 _kernel_aspace.flags = VMM_FLAG_ASPACE_KERNEL;
49 list_initialize(&_kernel_aspace.region_list);
50
51 list_add_head(&aspace_list, &_kernel_aspace.node);
52}
53
54static inline bool is_inside_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr)
55{
56 DEBUG_ASSERT(aspace);
57
58 return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1);
59}
60
61static bool is_region_inside_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t size)
62{
63 DEBUG_ASSERT(aspace);
64
65 /* is the starting address within the address space*/
66 if (!is_inside_aspace(aspace, vaddr))
67 return false;
68
69 if (size == 0)
70 return true;
71
72 /* see if the size is enough to wrap the integer */
73 if (vaddr + size - 1 < vaddr)
74 return false;
75
76 /* test to see if the end address is within the address space's */
77 if (vaddr + size - 1 > aspace->base + aspace->size - 1)
78 return false;
79
80 return true;
81}
82
83static size_t trim_to_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t size)
84{
85 DEBUG_ASSERT(aspace);
86 DEBUG_ASSERT(is_inside_aspace(aspace, vaddr));
87
88 if (size == 0)
89 return size;
90
91 size_t offset = vaddr - aspace->base;
92
93 //LTRACEF("vaddr 0x%lx size 0x%zx offset 0x%zx aspace base 0x%lx aspace size 0x%zx\n",
94 // vaddr, size, offset, aspace->base, aspace->size);
95
96 if (offset + size < offset)
97 size = ULONG_MAX - offset - 1;
98
99 //LTRACEF("size now 0x%zx\n", size);
100
101 if (offset + size >= aspace->size - 1)
102 size = aspace->size - offset;
103
104 //LTRACEF("size now 0x%zx\n", size);
105
106 return size;
107}
108
109static vmm_region_t *alloc_region_struct(const char *name, vaddr_t base, size_t size, uint flags, uint arch_mmu_flags)
110{
111 DEBUG_ASSERT(name);
112
113 vmm_region_t *r = malloc(sizeof(vmm_region_t));
114 if (!r)
115 return NULL;
116
117 strlcpy(r->name, name, sizeof(r->name));
118 r->base = base;
119 r->size = size;
120 r->flags = flags;
121 r->arch_mmu_flags = arch_mmu_flags;
122 list_initialize(&r->page_list);
123
124 return r;
125}
126
127/* add a region to the appropriate spot in the address space list,
128 * testing to see if there's a space */
129static status_t add_region_to_aspace(vmm_aspace_t *aspace, vmm_region_t *r)
130{
131 DEBUG_ASSERT(aspace);
132 DEBUG_ASSERT(r);
133
134 LTRACEF("aspace %p base 0x%lx size 0x%zx r %p base 0x%lx size 0x%zx\n",
135 aspace, aspace->base, aspace->size, r, r->base, r->size);
136
137 /* only try if the region will at least fit in the address space */
138 if (r->size == 0 || !is_region_inside_aspace(aspace, r->base, r->size)) {
139 LTRACEF("region was out of range\n");
140 return ERR_OUT_OF_RANGE;
141 }
142
143 vaddr_t r_end = r->base + r->size - 1;
144
145 /* does it fit in front */
146 vmm_region_t *last;
147 last = list_peek_head_type(&aspace->region_list, vmm_region_t, node);
148 if (!last || r_end < last->base) {
149 /* empty list or not empty and fits before the first element */
150 list_add_head(&aspace->region_list, &r->node);
151 return NO_ERROR;
152 }
153
154 /* walk the list, finding the right spot to put it */
155 list_for_every_entry(&aspace->region_list, last, vmm_region_t, node) {
156 /* does it go after last? */
157 if (r->base > last->base + last->size - 1) {
158 /* get the next element in the list */
159 vmm_region_t *next = list_next_type(&aspace->region_list, &last->node, vmm_region_t, node);
160 if (!next || (r_end < next->base)) {
161 /* end of the list or next exists and it goes between them */
162 list_add_after(&last->node, &r->node);
163 return NO_ERROR;
164 }
165 }
166 }
167
168 LTRACEF("couldn't find spot\n");
169 return ERR_NO_MEMORY;
170}
171
172/*
173 * Try to pick the spot within specified gap
174 *
175 * Arch can override this to impose it's own restrictions.
176 */
177__WEAK vaddr_t arch_mmu_pick_spot(vaddr_t base, uint prev_region_arch_mmu_flags,
178 vaddr_t end, uint next_region_arch_mmu_flags,
179 vaddr_t align, size_t size, uint arch_mmu_flags)
180{
181 /* just align it by default */
182 return ALIGN(base, align);
183}
184
185/*
186 * Returns true if the caller has to stop search
187 */
188static inline bool check_gap(vmm_aspace_t *aspace,
189 vmm_region_t *prev, vmm_region_t *next,
190 vaddr_t *pva, vaddr_t align, size_t size,
191 uint arch_mmu_flags)
192{
193 vaddr_t gap_beg; /* first byte of a gap */
194 vaddr_t gap_end; /* last byte of a gap */
195
196 DEBUG_ASSERT(aspace);
197 DEBUG_ASSERT(pva);
198
199 if (prev)
200 gap_beg = prev->base + prev->size;
201 else
202 gap_beg = aspace->base;
203
204 if (next) {
205 if (gap_beg == next->base)
206 goto next_gap; /* no gap between regions */
207 gap_end = next->base - 1;
208 } else {
209 if (gap_beg == (aspace->base + aspace->size))
210 goto not_found; /* no gap at the end of address space. Stop search */
211 gap_end = aspace->base + aspace->size - 1;
212 }
213
214 *pva = arch_mmu_pick_spot(gap_beg, prev ? prev->arch_mmu_flags : ARCH_MMU_FLAG_INVALID,
215 gap_end, next ? next->arch_mmu_flags : ARCH_MMU_FLAG_INVALID,
216 align, size, arch_mmu_flags);
217 if (*pva < gap_beg)
218 goto not_found; /* address wrapped around */
219
220 if (*pva < gap_end && ((gap_end - *pva + 1) >= size)) {
221 /* we have enough room */
222 return true; /* found spot, stop search */
223 }
224
225next_gap:
226 return false; /* continue search */
227
228not_found:
229 *pva = -1;
230 return true; /* not_found: stop search */
231}
232
233static vaddr_t alloc_spot(vmm_aspace_t *aspace, size_t size, uint8_t align_pow2,
234 uint arch_mmu_flags, struct list_node **before)
235{
236 DEBUG_ASSERT(aspace);
237 DEBUG_ASSERT(size > 0 && IS_PAGE_ALIGNED(size));
238
239 LTRACEF("aspace %p size 0x%zx align %hhu\n", aspace, size, align_pow2);
240
241 if (align_pow2 < PAGE_SIZE_SHIFT)
242 align_pow2 = PAGE_SIZE_SHIFT;
243 vaddr_t align = 1UL << align_pow2;
244
245 vaddr_t spot;
246 vmm_region_t *r = NULL;
247
248 /* try to pick spot at the beginning of address space */
249 if (check_gap(aspace, NULL,
250 list_peek_head_type(&aspace->region_list, vmm_region_t, node),
251 &spot, align, size, arch_mmu_flags))
252 goto done;
253
254 /* search the middle of the list */
255 list_for_every_entry(&aspace->region_list, r, vmm_region_t, node) {
256 if (check_gap(aspace, r,
257 list_next_type(&aspace->region_list, &r->node, vmm_region_t, node),
258 &spot, align, size, arch_mmu_flags))
259 goto done;
260 }
261
262 /* couldn't find anything */
263 return -1;
264
265done:
266 if (before)
267 *before = r ? &r->node : &aspace->region_list;
268 return spot;
269}
270
271/* allocate a region structure and stick it in the address space */
272static vmm_region_t *alloc_region(vmm_aspace_t *aspace, const char *name, size_t size,
273 vaddr_t vaddr, uint8_t align_pow2,
274 uint vmm_flags, uint region_flags, uint arch_mmu_flags)
275{
276 /* make a region struct for it and stick it in the list */
277 vmm_region_t *r = alloc_region_struct(name, vaddr, size, region_flags, arch_mmu_flags);
278 if (!r)
279 return NULL;
280
281 /* if they ask us for a specific spot, put it there */
282 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
283 /* stick it in the list, checking to see if it fits */
284 if (add_region_to_aspace(aspace, r) < 0) {
285 /* didn't fit */
286 free(r);
287 return NULL;
288 }
289 } else {
290 /* allocate a virtual slot for it */
291 struct list_node *before = NULL;
292
293 vaddr = alloc_spot(aspace, size, align_pow2, arch_mmu_flags, &before);
294 LTRACEF("alloc_spot returns 0x%lx, before %p\n", vaddr, before);
295
296 if (vaddr == (vaddr_t)-1) {
297 LTRACEF("failed to find spot\n");
298 free(r);
299 return NULL;
300 }
301
302 DEBUG_ASSERT(before != NULL);
303
304 r->base = (vaddr_t)vaddr;
305
306 /* add it to the region list */
307 list_add_after(before, &r->node);
308 }
309
310 return r;
311}
312
313status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size, vaddr_t vaddr)
314{
315 LTRACEF("aspace %p name '%s' size 0x%zx vaddr 0x%lx\n", aspace, name, size, vaddr);
316
317 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
318 DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
319
320 if (!name)
321 name = "";
322
323 if (size == 0)
324 return NO_ERROR;
325 if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(size))
326 return ERR_INVALID_ARGS;
327
328 if (!is_inside_aspace(aspace, vaddr))
329 return ERR_OUT_OF_RANGE;
330
331 /* trim the size */
332 size = trim_to_aspace(aspace, vaddr, size);
333
334 mutex_acquire(&vmm_lock);
335
336 /* lookup how it's already mapped */
337 uint arch_mmu_flags = 0;
338 arch_mmu_query(vaddr, NULL, &arch_mmu_flags);
339
340 /* build a new region structure */
341 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, 0, VMM_FLAG_VALLOC_SPECIFIC, VMM_REGION_FLAG_RESERVED, arch_mmu_flags);
342
343 mutex_release(&vmm_lock);
344 return r ? NO_ERROR : ERR_NO_MEMORY;
345}
346
347status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags)
348{
349 status_t ret;
350
351 LTRACEF("aspace %p name '%s' size 0x%zx ptr %p paddr 0x%lx vmm_flags 0x%x arch_mmu_flags 0x%x\n",
352 aspace, name, size, ptr ? *ptr : 0, paddr, vmm_flags, arch_mmu_flags);
353
354 DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
355 DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
356
357 if (!name)
358 name = "";
359
360 if (size == 0)
361 return NO_ERROR;
362 if (!IS_PAGE_ALIGNED(paddr) || !IS_PAGE_ALIGNED(size))
363 return ERR_INVALID_ARGS;
364
365 vaddr_t vaddr = 0;
366
367 /* if they're asking for a specific spot, copy the address */
368 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
369 /* can't ask for a specific spot and then not provide one */
370 if (!ptr) {
371 return ERR_INVALID_ARGS;
372 }
373 vaddr = (vaddr_t)*ptr;
374 }
375
376 mutex_acquire(&vmm_lock);
377
378 /* allocate a region and put it in the aspace list */
379 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, align_log2, vmm_flags,
380 VMM_REGION_FLAG_PHYSICAL, arch_mmu_flags);
381 if (!r) {
382 ret = ERR_NO_MEMORY;
383 goto err_alloc_region;
384 }
385
386 /* return the vaddr if requested */
387 if (ptr)
388 *ptr = (void *)r->base;
389
390 /* map all of the pages */
391 int err = arch_mmu_map(r->base, paddr, size / PAGE_SIZE, arch_mmu_flags);
392 LTRACEF("arch_mmu_map returns %d\n", err);
393
394 ret = NO_ERROR;
395
396err_alloc_region:
397 mutex_release(&vmm_lock);
398 return ret;
399}
400
401status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr,
402 uint8_t align_pow2, uint vmm_flags, uint arch_mmu_flags)
403{
404 status_t err = NO_ERROR;
405
406 LTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
407 aspace, name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
408
409 size = ROUNDUP(size, PAGE_SIZE);
410 if (size == 0)
411 return ERR_INVALID_ARGS;
412
413 if (!name)
414 name = "";
415
416 vaddr_t vaddr = 0;
417
418 /* if they're asking for a specific spot, copy the address */
419 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
420 /* can't ask for a specific spot and then not provide one */
421 if (!ptr) {
422 err = ERR_INVALID_ARGS;
423 goto err;
424 }
425 vaddr = (vaddr_t)*ptr;
426 }
427
428 /* allocate physical memory up front, in case it cant be satisfied */
429 struct list_node page_list;
430 list_initialize(&page_list);
431
432 paddr_t pa = 0;
433 /* allocate a run of physical pages */
434 size_t count = pmm_alloc_contiguous(size / PAGE_SIZE, align_pow2, &pa, &page_list);
435 if (count < size / PAGE_SIZE) {
436 DEBUG_ASSERT(count == 0); /* check that the pmm didn't allocate a partial run */
437 err = ERR_NO_MEMORY;
438 goto err;
439 }
440
441 mutex_acquire(&vmm_lock);
442
443 /* allocate a region and put it in the aspace list */
444 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, align_pow2, vmm_flags,
445 VMM_REGION_FLAG_PHYSICAL, arch_mmu_flags);
446 if (!r) {
447 err = ERR_NO_MEMORY;
448 goto err1;
449 }
450
451 /* return the vaddr if requested */
452 if (ptr)
453 *ptr = (void *)r->base;
454
455 /* map all of the pages */
456 arch_mmu_map(r->base, pa, size / PAGE_SIZE, arch_mmu_flags);
457 // XXX deal with error mapping here
458
459 vm_page_t *p;
460 while ((p = list_remove_head_type(&page_list, vm_page_t, node))) {
461 list_add_tail(&r->page_list, &p->node);
462 }
463
464 mutex_release(&vmm_lock);
465 return NO_ERROR;
466
467err1:
468 mutex_release(&vmm_lock);
469 pmm_free(&page_list);
470err:
471 return err;
472}
473
474status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr,
475 uint8_t align_pow2, uint vmm_flags, uint arch_mmu_flags)
476{
477 status_t err = NO_ERROR;
478
479 LTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
480 aspace, name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
481
482 size = ROUNDUP(size, PAGE_SIZE);
483 if (size == 0)
484 return ERR_INVALID_ARGS;
485
486 if (!name)
487 name = "";
488
489 vaddr_t vaddr = 0;
490
491 /* if they're asking for a specific spot, copy the address */
492 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
493 /* can't ask for a specific spot and then not provide one */
494 if (!ptr) {
495 err = ERR_INVALID_ARGS;
496 goto err;
497 }
498 vaddr = (vaddr_t)*ptr;
499 }
500
501 /* allocate physical memory up front, in case it cant be satisfied */
502
503 /* allocate a random pile of pages */
504 struct list_node page_list;
505 list_initialize(&page_list);
506
507 size_t count = pmm_alloc_pages(size / PAGE_SIZE, &page_list);
508 DEBUG_ASSERT(count <= size);
509 if (count < size / PAGE_SIZE) {
510 LTRACEF("failed to allocate enough pages (asked for %zu, got %zu)\n", size / PAGE_SIZE, count);
511 pmm_free(&page_list);
512 err = ERR_NO_MEMORY;
513 goto err;
514 }
515
516 mutex_acquire(&vmm_lock);
517
518 /* allocate a region and put it in the aspace list */
519 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, align_pow2, vmm_flags,
520 VMM_REGION_FLAG_PHYSICAL, arch_mmu_flags);
521 if (!r) {
522 err = ERR_NO_MEMORY;
523 goto err1;
524 }
525
526 /* return the vaddr if requested */
527 if (ptr)
528 *ptr = (void *)r->base;
529
530 /* map all of the pages */
531 /* XXX use smarter algorithm that tries to build runs */
532 vm_page_t *p;
533 vaddr_t va = r->base;
534 DEBUG_ASSERT(IS_PAGE_ALIGNED(va));
535 while ((p = list_remove_head_type(&page_list, vm_page_t, node))) {
536 DEBUG_ASSERT(va <= r->base + r->size - 1);
537
538 paddr_t pa = page_to_address(p);
539 DEBUG_ASSERT(IS_PAGE_ALIGNED(pa));
540
541 arch_mmu_map(va, pa, 1, arch_mmu_flags);
542 // XXX deal with error mapping here
543
544 list_add_tail(&r->page_list, &p->node);
545
546 va += PAGE_SIZE;
547 }
548
549 mutex_release(&vmm_lock);
550 return NO_ERROR;
551
552err1:
553 mutex_release(&vmm_lock);
554 pmm_free(&page_list);
555err:
556 return err;
557}
558
559static vmm_region_t *vmm_find_region(const vmm_aspace_t *aspace, vaddr_t vaddr)
560{
561 vmm_region_t *r;
562
563 DEBUG_ASSERT(aspace);
564
565 if (!aspace)
566 return NULL;
567
568 /* search the region list */
569 list_for_every_entry(&aspace->region_list, r, vmm_region_t, node) {
570 if ((vaddr >= r->base) && (vaddr <= r->base + r->size - 1))
571 return r;
572 }
573
574 return NULL;
575}
576
577status_t vmm_free_region(vmm_aspace_t *aspace, vaddr_t vaddr)
578{
579 DEBUG_ASSERT(aspace);
580
581 mutex_acquire(&vmm_lock);
582
583 vmm_region_t *r = vmm_find_region (aspace, vaddr);
584 if (!r) {
585 mutex_release(&vmm_lock);
586 return ERR_NOT_FOUND;
587 }
588
589 /* remove it from aspace */
590 list_delete(&r->node);
591
592 /* unmap it */
593 arch_mmu_unmap(r->base, r->size / PAGE_SIZE);
594
595 mutex_release(&vmm_lock);
596
597 /* return physical pages if any */
598 pmm_free(&r->page_list);
599
600 /* free it */
601 free(r);
602
603 return NO_ERROR;
604}
605
606status_t vmm_create_aspace(vmm_aspace_t **_aspace, const char *name, uint flags)
607{
608 DEBUG_ASSERT(_aspace);
609
610 vmm_aspace_t *aspace = malloc(sizeof(vmm_aspace_t));
611 if (!aspace)
612 return ERR_NO_MEMORY;
613
614 if (name)
615 strlcpy(aspace->name, name, sizeof(aspace->name));
616 else
617 strlcpy(aspace->name, "unnamed", sizeof(aspace->name));
618
619 if (flags & VMM_FLAG_ASPACE_KERNEL) {
620 aspace->base = KERNEL_ASPACE_BASE;
621 aspace->size = KERNEL_ASPACE_SIZE;
622 } else {
623 aspace->base = USER_ASPACE_BASE;
624 aspace->size = USER_ASPACE_SIZE;
625 }
626
627 list_clear_node(&aspace->node);
628 list_initialize(&aspace->region_list);
629
630 mutex_acquire(&vmm_lock);
631 list_add_head(&aspace_list, &aspace->node);
632 mutex_release(&vmm_lock);
633
634 *_aspace = aspace;
635
636 return NO_ERROR;
637}
638
639status_t vmm_free_aspace(vmm_aspace_t *aspace)
640{
641 DEBUG_ASSERT(aspace);
642
643 /* pop it out of the global aspace list */
644 mutex_acquire(&vmm_lock);
645 if (!list_in_list(&aspace->node)) {
646 mutex_release(&vmm_lock);
647 return ERR_INVALID_ARGS;
648 }
649 list_delete(&aspace->node);
650
651 /* free all of the regions */
652 struct list_node region_list = LIST_INITIAL_VALUE(region_list);
653
654 vmm_region_t *r;
655 while ((r = list_remove_head_type(&aspace->region_list, vmm_region_t, node))) {
656 /* add it to our tempoary list */
657 list_add_tail(&region_list, &r->node);
658
659 /* unmap it */
660 arch_mmu_unmap(r->base, r->size / PAGE_SIZE);
661 }
662 mutex_release(&vmm_lock);
663
664 /* without the vmm lock held, free all of the pmm pages and the structure */
665 while ((r = list_remove_head_type(&region_list, vmm_region_t, node))) {
666 /* return physical pages if any */
667 pmm_free(&r->page_list);
668
669 /* free it */
670 free(r);
671 }
672
673 /* free the aspace */
674 free(aspace);
675
676 return NO_ERROR;
677}
678
679static void dump_region(const vmm_region_t *r)
680{
681 DEBUG_ASSERT(r);
682
683 printf("\tregion %p: name '%s' range 0x%lx - 0x%lx size 0x%zx flags 0x%x mmu_flags 0x%x\n",
684 r, r->name, r->base, r->base + r->size - 1, r->size, r->flags, r->arch_mmu_flags);
685}
686
687static void dump_aspace(const vmm_aspace_t *a)
688{
689 DEBUG_ASSERT(a);
690
691 printf("aspace %p: name '%s' range 0x%lx - 0x%lx size 0x%zx flags 0x%x\n",
692 a, a->name, a->base, a->base + a->size - 1, a->size, a->flags);
693
694 printf("regions:\n");
695 vmm_region_t *r;
696 list_for_every_entry(&a->region_list, r, vmm_region_t, node) {
697 dump_region(r);
698 }
699}
700
701static int cmd_vmm(int argc, const cmd_args *argv)
702{
703 if (argc < 2) {
704notenoughargs:
705 printf("not enough arguments\n");
706usage:
707 printf("usage:\n");
708 printf("%s aspaces\n", argv[0].str);
709 printf("%s alloc <size> <align_pow2>\n", argv[0].str);
710 printf("%s alloc_physical <paddr> <size> <align_pow2>\n", argv[0].str);
711 printf("%s alloc_contig <size> <align_pow2>\n", argv[0].str);
712 printf("%s create_aspace\n", argv[0].str);
713 return ERR_GENERIC;
714 }
715
716 if (!strcmp(argv[1].str, "aspaces")) {
717 vmm_aspace_t *a;
718 list_for_every_entry(&aspace_list, a, vmm_aspace_t, node) {
719 dump_aspace(a);
720 }
721 } else if (!strcmp(argv[1].str, "alloc")) {
722 if (argc < 4) goto notenoughargs;
723
724 void *ptr = (void *)0x99;
725 status_t err = vmm_alloc(vmm_get_kernel_aspace(), "alloc test", argv[2].u, &ptr, argv[3].u, 0, 0);
726 printf("vmm_alloc returns %d, ptr %p\n", err, ptr);
727 } else if (!strcmp(argv[1].str, "alloc_physical")) {
728 if (argc < 4) goto notenoughargs;
729
730 void *ptr = (void *)0x99;
731 status_t err = vmm_alloc_physical(vmm_get_kernel_aspace(), "physical test", argv[3].u, &ptr, argv[4].u, argv[2].u, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE);
732 printf("vmm_alloc_physical returns %d, ptr %p\n", err, ptr);
733 } else if (!strcmp(argv[1].str, "alloc_contig")) {
734 if (argc < 4) goto notenoughargs;
735
736 void *ptr = (void *)0x99;
737 status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "contig test", argv[2].u, &ptr, argv[3].u, 0, 0);
738 printf("vmm_alloc_contig returns %d, ptr %p\n", err, ptr);
739 } else if (!strcmp(argv[1].str, "create_aspace")) {
740 vmm_aspace_t *aspace;
741 status_t err = vmm_create_aspace(&aspace, "test", 0);
742 printf("vmm_create_aspace returns %d, aspace %p\n", err, aspace);
743 } else {
744 printf("unknown command\n");
745 goto usage;
746 }
747
748 return NO_ERROR;
749}
750
751STATIC_COMMAND_START
752#if LK_DEBUGLEVEL > 0
753STATIC_COMMAND("vmm", "virtual memory manager", &cmd_vmm)
754#endif
755STATIC_COMMAND_END(vmm);
756