blob: 3bcdc778ad7bbc55c10c3965b18e7427862a156c [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2014 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <kernel/vm.h>
24#include "vm_priv.h"
25
26#include <trace.h>
27#include <assert.h>
28#include <list.h>
29#include <stdlib.h>
30#include <err.h>
31#include <string.h>
32#include <pow2.h>
33#include <lib/console.h>
34#include <kernel/mutex.h>
35
36#define LOCAL_TRACE 0
37
38static struct list_node arena_list = LIST_INITIAL_VALUE(arena_list);
39static mutex_t lock = MUTEX_INITIAL_VALUE(lock);
40
41#define PAGE_BELONGS_TO_ARENA(page, arena) \
42 (((uintptr_t)(page) >= (uintptr_t)(arena)->page_array) && \
43 ((uintptr_t)(page) < ((uintptr_t)(arena)->page_array + (arena)->size / PAGE_SIZE * sizeof(vm_page_t))))
44
45#define PAGE_ADDRESS_FROM_ARENA(page, arena) \
46 (paddr_t)(((uintptr_t)page - (uintptr_t)a->page_array) / sizeof(vm_page_t)) * PAGE_SIZE + a->base;
47
48#define ADDRESS_IN_ARENA(address, arena) \
49 ((address) >= (arena)->base && (address) <= (arena)->base + (arena)->size - 1)
50
51static inline bool page_is_free(const vm_page_t *page)
52{
53 DEBUG_ASSERT(page);
54
55 return !(page->flags & VM_PAGE_FLAG_NONFREE);
56}
57
58paddr_t page_to_address(const vm_page_t *page)
59{
60 DEBUG_ASSERT(page);
61
62 pmm_arena_t *a;
63 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
64 if (PAGE_BELONGS_TO_ARENA(page, a)) {
65 return PAGE_ADDRESS_FROM_ARENA(page, a);
66 }
67 }
68 return -1;
69}
70
71vm_page_t *address_to_page(paddr_t addr)
72{
73 pmm_arena_t *a;
74 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
75 if (addr >= a->base && addr <= a->base + a->size - 1) {
76 size_t index = (addr - a->base) / PAGE_SIZE;
77 return &a->page_array[index];
78 }
79 }
80 return NULL;
81}
82
83status_t pmm_add_arena(pmm_arena_t *arena)
84{
85 LTRACEF("arena %p name '%s' base 0x%lx size 0x%zx\n", arena, arena->name, arena->base, arena->size);
86
87 DEBUG_ASSERT(arena);
88 DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->base));
89 DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->size));
90 DEBUG_ASSERT(arena->size > 0);
91
92 /* walk the arena list and add arena based on priority order */
93 pmm_arena_t *a;
94 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
95 if (a->priority > arena->priority) {
96 list_add_before(&a->node, &arena->node);
97 goto done_add;
98 }
99 }
100
101 /* walked off the end, add it to the end of the list */
102 list_add_tail(&arena_list, &arena->node);
103
104done_add:
105
106 /* zero out some of the structure */
107 arena->free_count = 0;
108 list_initialize(&arena->free_list);
109
110 /* allocate an array of pages to back this one */
111 size_t page_count = arena->size / PAGE_SIZE;
112 arena->page_array = boot_alloc_mem(page_count * sizeof(vm_page_t));
113
114 /* initialize all of the pages */
115 memset(arena->page_array, 0, page_count * sizeof(vm_page_t));
116
117 /* add them to the free list */
118 for (size_t i = 0; i < page_count; i++) {
119 vm_page_t *p = &arena->page_array[i];
120
121 list_add_tail(&arena->free_list, &p->node);
122
123 arena->free_count++;
124 }
125
126 return NO_ERROR;
127}
128
129size_t pmm_alloc_pages(uint count, struct list_node *list)
130{
131 LTRACEF("count %u\n", count);
132
133 uint allocated = 0;
134 if (count == 0)
135 return 0;
136
137 mutex_acquire(&lock);
138
139 /* walk the arenas in order, allocating as many pages as we can from each */
140 pmm_arena_t *a;
141 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
142 while (allocated < count) {
143 vm_page_t *page = list_remove_head_type(&a->free_list, vm_page_t, node);
144 if (!page)
145 goto done;
146
147 a->free_count--;
148
149 page->flags |= VM_PAGE_FLAG_NONFREE;
150 list_add_tail(list, &page->node);
151
152 allocated++;
153 }
154 }
155
156done:
157 mutex_release(&lock);
158 return allocated;
159}
160
161size_t pmm_alloc_range(paddr_t address, uint count, struct list_node *list)
162{
163 LTRACEF("address 0x%lx, count %u\n", address, count);
164
165 uint allocated = 0;
166 if (count == 0)
167 return 0;
168
169 address = ROUNDDOWN(address, PAGE_SIZE);
170
171 mutex_acquire(&lock);
172
173 /* walk through the arenas, looking to see if the physical page belongs to it */
174 pmm_arena_t *a;
175 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
176 while (allocated < count && ADDRESS_IN_ARENA(address, a)) {
177 size_t index = (address - a->base) / PAGE_SIZE;
178
179 DEBUG_ASSERT(index < a->size / PAGE_SIZE);
180
181 vm_page_t *page = &a->page_array[index];
182 if (page->flags & VM_PAGE_FLAG_NONFREE) {
183 /* we hit an allocated page */
184 break;
185 }
186
187 DEBUG_ASSERT(list_in_list(&page->node));
188
189 list_delete(&page->node);
190 page->flags |= VM_PAGE_FLAG_NONFREE;
191 list_add_tail(list, &page->node);
192
193 a->free_count--;
194 allocated++;
195 address += PAGE_SIZE;
196 }
197
198 if (allocated == count)
199 break;
200 }
201
202 mutex_release(&lock);
203 return allocated;
204}
205
206size_t pmm_free(struct list_node *list)
207{
208 LTRACEF("list %p\n", list);
209
210 mutex_acquire(&lock);
211
212 uint count = 0;
213 while (!list_is_empty(list)) {
214 vm_page_t *page = list_remove_head_type(list, vm_page_t, node);
215
216 DEBUG_ASSERT(!list_in_list(&page->node));
217 DEBUG_ASSERT(page->flags & VM_PAGE_FLAG_NONFREE);
218
219 /* see which arena this page belongs to and add it */
220 pmm_arena_t *a;
221 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
222 if (PAGE_BELONGS_TO_ARENA(page, a)) {
223 page->flags &= ~VM_PAGE_FLAG_NONFREE;
224
225 list_add_head(&a->free_list, &page->node);
226 a->free_count++;
227 count++;
228 break;
229 }
230 }
231 }
232
233 mutex_release(&lock);
234 return count;
235}
236
237size_t pmm_free_page(vm_page_t *page)
238{
239 DEBUG_ASSERT(page);
240
241 struct list_node list;
242 list_initialize(&list);
243
244 list_add_head(&list, &page->node);
245
246 return pmm_free(&list);
247}
248
249/* physically allocate a run from arenas marked as KMAP */
250void *pmm_alloc_kpages(uint count, struct list_node *list)
251{
252 LTRACEF("count %u\n", count);
253
254 // XXX do fast path for single page
255
256
257 paddr_t pa;
258 size_t alloc_count = pmm_alloc_contiguous(count, PAGE_SIZE_SHIFT, &pa, list);
259 if (alloc_count == 0)
260 return NULL;
261
262 return paddr_to_kvaddr(pa);
263}
264
265size_t pmm_free_kpages(void *_ptr, uint count)
266{
267 LTRACEF("ptr %p, count %u\n", _ptr, count);
268
269 uint8_t *ptr = (uint8_t *)_ptr;
270
271 struct list_node list;
272 list_initialize(&list);
273
274 while (count > 0) {
275 vm_page_t *p = address_to_page(kvaddr_to_paddr(ptr));
276 if (p) {
277 list_add_tail(&list, &p->node);
278 }
279
280 ptr += PAGE_SIZE;
281 count--;
282 }
283
284 return pmm_free(&list);
285}
286
287size_t pmm_alloc_contiguous(uint count, uint8_t alignment_log2, paddr_t *pa, struct list_node *list)
288{
289 LTRACEF("count %u, align %u\n", count, alignment_log2);
290
291 if (count == 0)
292 return 0;
293 if (alignment_log2 < PAGE_SIZE_SHIFT)
294 alignment_log2 = PAGE_SIZE_SHIFT;
295
296 mutex_acquire(&lock);
297
298 pmm_arena_t *a;
299 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
300 // XXX make this a flag to only search kmap?
301 if (a->flags & PMM_ARENA_FLAG_KMAP) {
302 /* walk the list starting at alignment boundaries.
303 * calculate the starting offset into this arena, based on the
304 * base address of the arena to handle the case where the arena
305 * is not aligned on the same boundary requested.
306 */
307 paddr_t rounded_base = ROUNDUP(a->base, 1UL << alignment_log2);
308 if (rounded_base < a->base || rounded_base > a->base + a->size - 1)
309 continue;
310
311 uint aligned_offset = (rounded_base - a->base) / PAGE_SIZE;
312 uint start = aligned_offset;
313 LTRACEF("starting search at aligned offset %u\n", start);
314 LTRACEF("arena base 0x%lx size %zu\n", a->base, a->size);
315
316retry:
317 /* search while we're still within the arena and have a chance of finding a slot
318 (start + count < end of arena) */
319 while ((start < a->size / PAGE_SIZE) &&
320 ((start + count) <= a->size / PAGE_SIZE)) {
321 vm_page_t *p = &a->page_array[start];
322 for (uint i = 0; i < count; i++) {
323 if (p->flags & VM_PAGE_FLAG_NONFREE) {
324 /* this run is broken, break out of the inner loop.
325 * start over at the next alignment boundary
326 */
327 start = ROUNDUP(start - aligned_offset + i + 1, 1UL << (alignment_log2 - PAGE_SIZE_SHIFT)) + aligned_offset;
328 goto retry;
329 }
330 p++;
331 }
332
333 /* we found a run */
334 LTRACEF("found run from pn %u to %u\n", start, start + count);
335
336 /* remove the pages from the run out of the free list */
337 for (uint i = start; i < start + count; i++) {
338 p = &a->page_array[i];
339 DEBUG_ASSERT(!(p->flags & VM_PAGE_FLAG_NONFREE));
340 DEBUG_ASSERT(list_in_list(&p->node));
341
342 list_delete(&p->node);
343 p->flags |= VM_PAGE_FLAG_NONFREE;
344 a->free_count--;
345
346 if (list)
347 list_add_tail(list, &p->node);
348 }
349
350 if (pa)
351 *pa = a->base + start * PAGE_SIZE;
352
353 mutex_release(&lock);
354
355 return count;
356 }
357 }
358 }
359
360 mutex_release(&lock);
361
362 LTRACEF("couldn't find run\n");
363 return 0;
364}
365
366static void dump_page(const vm_page_t *page)
367{
368 DEBUG_ASSERT(page);
369
370 printf("page %p: address 0x%lx flags 0x%x\n", page, page_to_address(page), page->flags);
371}
372
373static void dump_arena(const pmm_arena_t *arena, bool dump_pages)
374{
375 DEBUG_ASSERT(arena);
376
377 printf("arena %p: name '%s' base 0x%lx size 0x%zx priority %u flags 0x%x\n",
378 arena, arena->name, arena->base, arena->size, arena->priority, arena->flags);
379 printf("\tpage_array %p, free_count %zu\n",
380 arena->page_array, arena->free_count);
381
382 /* dump all of the pages */
383 if (dump_pages) {
384 for (size_t i = 0; i < arena->size / PAGE_SIZE; i++) {
385 dump_page(&arena->page_array[i]);
386 }
387 }
388
389 /* dump the free pages */
390 printf("\tfree ranges:\n");
391 ssize_t last = -1;
392 for (size_t i = 0; i < arena->size / PAGE_SIZE; i++) {
393 if (page_is_free(&arena->page_array[i])) {
394 if (last == -1) {
395 last = i;
396 }
397 } else {
398 if (last != -1) {
399 printf("\t\t0x%lx - 0x%lx\n", arena->base + last * PAGE_SIZE, arena->base + i * PAGE_SIZE);
400 }
401 last = -1;
402 }
403 }
404
405 if (last != -1) {
406 printf("\t\t0x%lx - 0x%lx\n", arena->base + last * PAGE_SIZE, arena->base + arena->size);
407 }
408}
409
410static int cmd_pmm(int argc, const cmd_args *argv)
411{
412 if (argc < 2) {
413notenoughargs:
414 printf("not enough arguments\n");
415usage:
416 printf("usage:\n");
417 printf("%s arenas\n", argv[0].str);
418 printf("%s alloc <count>\n", argv[0].str);
419 printf("%s alloc_range <address> <count>\n", argv[0].str);
420 printf("%s alloc_kpages <count>\n", argv[0].str);
421 printf("%s alloc_contig <count> <alignment>\n", argv[0].str);
422 printf("%s dump_alloced\n", argv[0].str);
423 printf("%s free_alloced\n", argv[0].str);
424 return ERR_GENERIC;
425 }
426
427 static struct list_node allocated = LIST_INITIAL_VALUE(allocated);
428
429 if (!strcmp(argv[1].str, "arenas")) {
430 pmm_arena_t *a;
431 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
432 dump_arena(a, false);
433 }
434 } else if (!strcmp(argv[1].str, "alloc")) {
435 if (argc < 3) goto notenoughargs;
436
437 struct list_node list;
438 list_initialize(&list);
439
440 uint count = pmm_alloc_pages(argv[2].u, &list);
441 printf("alloc returns %u\n", count);
442
443 vm_page_t *p;
444 list_for_every_entry(&list, p, vm_page_t, node) {
445 printf("\tpage %p, address 0x%lx\n", p, page_to_address(p));
446 }
447
448 /* add the pages to the local allocated list */
449 struct list_node *node;
450 while ((node = list_remove_head(&list))) {
451 list_add_tail(&allocated, node);
452 }
453 } else if (!strcmp(argv[1].str, "dump_alloced")) {
454 vm_page_t *page;
455
456 list_for_every_entry(&allocated, page, vm_page_t, node) {
457 dump_page(page);
458 }
459 } else if (!strcmp(argv[1].str, "alloc_range")) {
460 if (argc < 4) goto notenoughargs;
461
462 struct list_node list;
463 list_initialize(&list);
464
465 uint count = pmm_alloc_range(argv[2].u, argv[3].u, &list);
466 printf("alloc returns %u\n", count);
467
468 vm_page_t *p;
469 list_for_every_entry(&list, p, vm_page_t, node) {
470 printf("\tpage %p, address 0x%lx\n", p, page_to_address(p));
471 }
472
473 /* add the pages to the local allocated list */
474 struct list_node *node;
475 while ((node = list_remove_head(&list))) {
476 list_add_tail(&allocated, node);
477 }
478 } else if (!strcmp(argv[1].str, "alloc_kpages")) {
479 if (argc < 3) goto notenoughargs;
480
481 void *ptr = pmm_alloc_kpages(argv[2].u, NULL);
482 printf("pmm_alloc_kpages returns %p\n", ptr);
483 } else if (!strcmp(argv[1].str, "alloc_contig")) {
484 if (argc < 4) goto notenoughargs;
485
486 struct list_node list;
487 list_initialize(&list);
488
489 paddr_t pa;
490 size_t ret = pmm_alloc_contiguous(argv[2].u, argv[3].u, &pa, &list);
491 printf("pmm_alloc_contiguous returns %zu, address 0x%lx\n", ret, pa);
492 printf("address %% align = 0x%lx\n", pa % argv[3].u);
493
494 /* add the pages to the local allocated list */
495 struct list_node *node;
496 while ((node = list_remove_head(&list))) {
497 list_add_tail(&allocated, node);
498 }
499 } else if (!strcmp(argv[1].str, "free_alloced")) {
500 size_t err = pmm_free(&allocated);
501 printf("pmm_free returns %zu\n", err);
502 } else {
503 printf("unknown command\n");
504 goto usage;
505 }
506
507 return NO_ERROR;
508}
509
510STATIC_COMMAND_START
511#if LK_DEBUGLEVEL > 0
512STATIC_COMMAND("pmm", "physical memory manager", &cmd_pmm)
513#endif
514STATIC_COMMAND_END(pmm);
515
516
517
518