blob: a5c7aba61475d57b2cf2f1960106ac713b77fc30 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2014 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <kernel/vm.h>
24#include "vm_priv.h"
25
26#include <trace.h>
27#include <err.h>
28#include <string.h>
29#include <lk/init.h>
30#include <lib/console.h>
31#include <arch/mmu.h>
32#include <debug.h>
33
34#define LOCAL_TRACE 0
35
36extern int _start;
37extern int _end;
38
39/* mark the physical pages backing a range of virtual as in use.
40 * allocate the physical pages and throw them away */
41static void mark_pages_in_use(vaddr_t va, size_t len)
42{
43 LTRACEF("va 0x%lx, len 0x%zx\n", va, len);
44
45 struct list_node list;
46 list_initialize(&list);
47
48 /* make sure we are inclusive of all of the pages in the address range */
49 len = PAGE_ALIGN(len + (va & (PAGE_SIZE - 1)));
50 va = ROUNDDOWN(va, PAGE_SIZE);
51
52 LTRACEF("aligned va 0x%lx, len 0x%zx\n", va, len);
53
54 for (size_t offset = 0; offset < len; offset += PAGE_SIZE) {
55 uint flags;
56 paddr_t pa;
57
58 status_t err = arch_mmu_query(va + offset, &pa, &flags);
59 if (err >= 0) {
60 //LTRACEF("va 0x%x, pa 0x%x, flags 0x%x, err %d\n", va + offset, pa, flags, err);
61
62 /* alloate the range, throw the results away */
63 pmm_alloc_range(pa, 1, &list);
64 } else {
65 panic("Could not find pa for va 0x%lx\n", va);
66 }
67 }
68}
69
70static void vm_init_preheap(uint level)
71{
72 LTRACE_ENTRY;
73
74 /* mark all of the kernel pages in use */
75 LTRACEF("marking all kernel pages as used\n");
76 mark_pages_in_use((vaddr_t)&_start, ((uintptr_t)&_end - (uintptr_t)&_start));
77
78 /* mark the physical pages used by the boot time allocator */
79 if (boot_alloc_end != boot_alloc_start) {
80 LTRACEF("marking boot alloc used from 0x%lx to 0x%lx\n", boot_alloc_start, boot_alloc_end);
81
82 mark_pages_in_use(boot_alloc_start, boot_alloc_end - boot_alloc_start);
83 }
84}
85
86static void vm_init_postheap(uint level)
87{
88 LTRACE_ENTRY;
89
90 vmm_init();
91
92 /* create vmm regions to cover what is already there from the initial mapping table */
93 struct mmu_initial_mapping *map = mmu_initial_mappings;
94 while (map->size > 0) {
95 if (!(map->flags & MMU_INITIAL_MAPPING_TEMPORARY)) {
96 vmm_reserve_space(vmm_get_kernel_aspace(), map->name, map->size, map->virt);
97 }
98
99 map++;
100 }
101}
102
103void *paddr_to_kvaddr(paddr_t pa)
104{
105 /* slow path to do reverse lookup */
106 struct mmu_initial_mapping *map = mmu_initial_mappings;
107 while (map->size > 0) {
108 if (!(map->flags & MMU_INITIAL_MAPPING_TEMPORARY) &&
109 pa >= map->phys &&
110 pa <= map->phys + map->size - 1) {
111 return (void *)(map->virt + (pa - map->phys));
112 }
113 map++;
114 }
115 return NULL;
116}
117
118paddr_t kvaddr_to_paddr(void *ptr)
119{
120 status_t rc;
121 paddr_t pa;
122
123 rc = arch_mmu_query((vaddr_t)ptr, &pa, NULL);
124 if (rc)
125 return (paddr_t) NULL;
126 return pa;
127}
128
129static int cmd_vm(int argc, const cmd_args *argv)
130{
131 if (argc < 2) {
132notenoughargs:
133 printf("not enough arguments\n");
134usage:
135 printf("usage:\n");
136 printf("%s phys2virt <address>\n", argv[0].str);
137 printf("%s virt2phys <address>\n", argv[0].str);
138 printf("%s map <phys> <virt> <count> <flags>\n", argv[0].str);
139 printf("%s unmap <virt> <count>\n", argv[0].str);
140 return ERR_GENERIC;
141 }
142
143 if (!strcmp(argv[1].str, "phys2virt")) {
144 if (argc < 3) goto notenoughargs;
145
146 void *ptr = paddr_to_kvaddr(argv[2].u);
147 printf("paddr_to_kvaddr returns %p\n", ptr);
148 } else if (!strcmp(argv[1].str, "virt2phys")) {
149 if (argc < 3) goto notenoughargs;
150
151 paddr_t pa;
152 uint flags;
153 status_t err = arch_mmu_query(argv[2].u, &pa, &flags);
154 printf("arch_mmu_query returns %d\n", err);
155 if (err >= 0) {
156 printf("\tpa 0x%lx, flags 0x%x\n", pa, flags);
157 }
158 } else if (!strcmp(argv[1].str, "map")) {
159 if (argc < 6) goto notenoughargs;
160
161 int err = arch_mmu_map(argv[3].u, argv[2].u, argv[4].u, argv[5].u);
162 printf("arch_mmu_map returns %d\n", err);
163 } else if (!strcmp(argv[1].str, "unmap")) {
164 if (argc < 4) goto notenoughargs;
165
166 int err = arch_mmu_unmap(argv[2].u, argv[3].u);
167 printf("arch_mmu_unmap returns %d\n", err);
168 } else {
169 printf("unknown command\n");
170 goto usage;
171 }
172
173 return NO_ERROR;
174}
175
176STATIC_COMMAND_START
177#if LK_DEBUGLEVEL > 0
178STATIC_COMMAND("vm", "vm commands", &cmd_vm)
179#endif
180STATIC_COMMAND_END(vm);
181
182LK_INIT_HOOK(vm_preheap, &vm_init_preheap, LK_INIT_LEVEL_HEAP - 1);
183LK_INIT_HOOK(vm, &vm_init_postheap, LK_INIT_LEVEL_VM);