rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2014 Travis Geiselbrecht |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining |
| 5 | * a copy of this software and associated documentation files |
| 6 | * (the "Software"), to deal in the Software without restriction, |
| 7 | * including without limitation the rights to use, copy, modify, merge, |
| 8 | * publish, distribute, sublicense, and/or sell copies of the Software, |
| 9 | * and to permit persons to whom the Software is furnished to do so, |
| 10 | * subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice shall be |
| 13 | * included in all copies or substantial portions of the Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 16 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 17 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 18 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY |
| 19 | * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 20 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 21 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 22 | */ |
| 23 | #pragma once |
| 24 | |
| 25 | /* some assembly #defines, need to match the structure below */ |
| 26 | #if IS_64BIT |
| 27 | #define __MMU_INITIAL_MAPPING_PHYS_OFFSET 0 |
| 28 | #define __MMU_INITIAL_MAPPING_VIRT_OFFSET 8 |
| 29 | #define __MMU_INITIAL_MAPPING_SIZE_OFFSET 16 |
| 30 | #define __MMU_INITIAL_MAPPING_FLAGS_OFFSET 24 |
| 31 | #define __MMU_INITIAL_MAPPING_SIZE 40 |
| 32 | #else |
| 33 | #define __MMU_INITIAL_MAPPING_PHYS_OFFSET 0 |
| 34 | #define __MMU_INITIAL_MAPPING_VIRT_OFFSET 4 |
| 35 | #define __MMU_INITIAL_MAPPING_SIZE_OFFSET 8 |
| 36 | #define __MMU_INITIAL_MAPPING_FLAGS_OFFSET 12 |
| 37 | #define __MMU_INITIAL_MAPPING_SIZE 20 |
| 38 | #endif |
| 39 | |
| 40 | /* flags for initial mapping struct */ |
| 41 | #define MMU_INITIAL_MAPPING_TEMPORARY (0x1) |
| 42 | #define MMU_INITIAL_MAPPING_FLAG_UNCACHED (0x2) |
| 43 | #define MMU_INITIAL_MAPPING_FLAG_DEVICE (0x4) |
| 44 | #define MMU_INITIAL_MAPPING_FLAG_DYNAMIC (0x8) /* entry has to be patched up by platform_reset */ |
| 45 | |
| 46 | #ifndef ASSEMBLY |
| 47 | |
| 48 | #include <sys/types.h> |
| 49 | #include <stdint.h> |
| 50 | #include <compiler.h> |
| 51 | #include <list.h> |
| 52 | #include <stdlib.h> |
| 53 | #include <arch.h> |
| 54 | #include <arch/mmu.h> |
| 55 | |
| 56 | __BEGIN_CDECLS |
| 57 | |
| 58 | #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) |
| 59 | #define IS_PAGE_ALIGNED(x) IS_ALIGNED(x, PAGE_SIZE) |
| 60 | |
| 61 | struct mmu_initial_mapping { |
| 62 | paddr_t phys; |
| 63 | vaddr_t virt; |
| 64 | size_t size; |
| 65 | unsigned int flags; |
| 66 | const char *name; |
| 67 | }; |
| 68 | |
| 69 | /* Assert that the assembly macros above match this struct. */ |
| 70 | STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, phys) == __MMU_INITIAL_MAPPING_PHYS_OFFSET); |
| 71 | STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, virt) == __MMU_INITIAL_MAPPING_VIRT_OFFSET); |
| 72 | STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, size) == __MMU_INITIAL_MAPPING_SIZE_OFFSET); |
| 73 | STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, flags) == __MMU_INITIAL_MAPPING_FLAGS_OFFSET); |
| 74 | STATIC_ASSERT(sizeof(struct mmu_initial_mapping) == __MMU_INITIAL_MAPPING_SIZE); |
| 75 | |
| 76 | /* Platform or target must fill out one of these to set up the initial memory map |
| 77 | * for kernel and enough IO space to boot. |
| 78 | */ |
| 79 | extern struct mmu_initial_mapping mmu_initial_mappings[]; |
| 80 | |
| 81 | /* core per page structure */ |
| 82 | typedef struct vm_page { |
| 83 | struct list_node node; |
| 84 | |
| 85 | uint flags : 8; |
| 86 | uint ref : 24; |
| 87 | } vm_page_t; |
| 88 | |
| 89 | #define VM_PAGE_FLAG_NONFREE (0x1) |
| 90 | |
| 91 | /* kernel address space */ |
| 92 | #ifndef KERNEL_ASPACE_BASE |
| 93 | #define KERNEL_ASPACE_BASE ((vaddr_t)0x80000000UL) |
| 94 | #endif |
| 95 | #ifndef KERNEL_ASPACE_SIZE |
| 96 | #define KERNEL_ASPACE_SIZE ((vaddr_t)0x80000000UL) |
| 97 | #endif |
| 98 | |
| 99 | STATIC_ASSERT(KERNEL_ASPACE_BASE + (KERNEL_ASPACE_SIZE - 1) > KERNEL_ASPACE_BASE); |
| 100 | |
| 101 | static inline bool is_kernel_address(vaddr_t va) |
| 102 | { |
| 103 | return (va >= KERNEL_ASPACE_BASE && va <= (KERNEL_ASPACE_BASE + KERNEL_ASPACE_SIZE - 1)); |
| 104 | } |
| 105 | |
| 106 | /* user address space, defaults to below kernel space with a 16MB guard gap on either side */ |
| 107 | #ifndef USER_ASPACE_BASE |
| 108 | #define USER_ASPACE_BASE ((vaddr_t)0x01000000UL) |
| 109 | #endif |
| 110 | #ifndef USER_ASPACE_SIZE |
| 111 | #define USER_ASPACE_SIZE ((vaddr_t)KERNEL_ASPACE_BASE - USER_ASPACE_BASE - 0x01000000UL) |
| 112 | #endif |
| 113 | |
| 114 | STATIC_ASSERT(USER_ASPACE_BASE + (USER_ASPACE_SIZE - 1) > USER_ASPACE_BASE); |
| 115 | |
| 116 | static inline bool is_user_address(vaddr_t va) |
| 117 | { |
| 118 | return (va >= USER_ASPACE_BASE && va <= (USER_ASPACE_BASE + USER_ASPACE_SIZE - 1)); |
| 119 | } |
| 120 | |
| 121 | |
| 122 | /* physical allocator */ |
| 123 | typedef struct pmm_arena { |
| 124 | struct list_node node; |
| 125 | const char *name; |
| 126 | |
| 127 | uint flags; |
| 128 | uint priority; |
| 129 | |
| 130 | paddr_t base; |
| 131 | size_t size; |
| 132 | |
| 133 | size_t free_count; |
| 134 | |
| 135 | struct vm_page *page_array; |
| 136 | struct list_node free_list; |
| 137 | } pmm_arena_t; |
| 138 | |
| 139 | #define PMM_ARENA_FLAG_KMAP (0x1) /* this arena is already mapped and useful for kallocs */ |
| 140 | |
| 141 | /* Add a pre-filled memory arena to the physical allocator. */ |
| 142 | status_t pmm_add_arena(pmm_arena_t *arena); |
| 143 | |
| 144 | /* Allocate count pages of physical memory, adding to the tail of the passed list. |
| 145 | * The list must be initialized. |
| 146 | * Returns the number of pages allocated. |
| 147 | */ |
| 148 | size_t pmm_alloc_pages(uint count, struct list_node *list); |
| 149 | |
| 150 | /* Allocate a specific range of physical pages, adding to the tail of the passed list. |
| 151 | * The list must be initialized. |
| 152 | * Returns the number of pages allocated. |
| 153 | */ |
| 154 | size_t pmm_alloc_range(paddr_t address, uint count, struct list_node *list); |
| 155 | |
| 156 | /* Free a list of physical pages. |
| 157 | * Returns the number of pages freed. |
| 158 | */ |
| 159 | size_t pmm_free(struct list_node *list); |
| 160 | |
| 161 | /* Helper routine for the above. */ |
| 162 | size_t pmm_free_page(vm_page_t *page); |
| 163 | |
| 164 | /* Allocate a run of contiguous pages, aligned on log2 byte boundary (0-31) |
| 165 | * If the optional physical address pointer is passed, return the address. |
| 166 | * If the optional list is passed, append the allocate page structures to the tail of the list. |
| 167 | */ |
| 168 | size_t pmm_alloc_contiguous(uint count, uint8_t align_log2, paddr_t *pa, struct list_node *list); |
| 169 | |
| 170 | /* Allocate a run of pages out of the kernel area and return the pointer in kernel space. |
| 171 | * If the optional list is passed, append the allocate page structures to the tail of the list. |
| 172 | */ |
| 173 | void *pmm_alloc_kpages(uint count, struct list_node *list); |
| 174 | |
| 175 | /* Helper routine for pmm_alloc_kpages. */ |
| 176 | static inline void *pmm_alloc_kpage(void) { return pmm_alloc_kpages(1, NULL); } |
| 177 | |
| 178 | size_t pmm_free_kpages(void *ptr, uint count); |
| 179 | |
| 180 | /* physical to virtual */ |
| 181 | void *paddr_to_kvaddr(paddr_t pa); |
| 182 | |
| 183 | /* virtual to physical */ |
| 184 | paddr_t kvaddr_to_paddr(void *va); |
| 185 | |
| 186 | /* virtual allocator */ |
| 187 | typedef struct vmm_aspace { |
| 188 | struct list_node node; |
| 189 | char name[32]; |
| 190 | |
| 191 | uint flags; |
| 192 | |
| 193 | vaddr_t base; |
| 194 | size_t size; |
| 195 | |
| 196 | struct list_node region_list; |
| 197 | } vmm_aspace_t; |
| 198 | |
| 199 | typedef struct vmm_region { |
| 200 | struct list_node node; |
| 201 | char name[32]; |
| 202 | |
| 203 | uint flags; |
| 204 | uint arch_mmu_flags; |
| 205 | |
| 206 | vaddr_t base; |
| 207 | size_t size; |
| 208 | |
| 209 | struct list_node page_list; |
| 210 | } vmm_region_t; |
| 211 | |
| 212 | #define VMM_REGION_FLAG_RESERVED 0x1 |
| 213 | #define VMM_REGION_FLAG_PHYSICAL 0x2 |
| 214 | |
| 215 | /* grab a handle to the kernel address space */ |
| 216 | extern vmm_aspace_t _kernel_aspace; |
| 217 | static inline vmm_aspace_t *vmm_get_kernel_aspace(void) { |
| 218 | return &_kernel_aspace; |
| 219 | } |
| 220 | |
| 221 | /* reserve a chunk of address space to prevent allocations from that space */ |
| 222 | status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size, vaddr_t vaddr); |
| 223 | |
| 224 | /* allocate a region of virtual space that maps a physical piece of address space. |
| 225 | the physical pages that back this are not allocated from the pmm. */ |
| 226 | status_t vmm_alloc_physical_etc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, paddr_t *paddr, uint paddr_count, uint vmm_flags, uint arch_mmu_flags); |
| 227 | |
| 228 | /* allocate a region of virtual space that maps a physical piece of address space. |
| 229 | the physical pages that back this are not allocated from the pmm. */ |
| 230 | status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags); |
| 231 | |
| 232 | /* allocate a region of memory backed by newly allocated contiguous physical memory */ |
| 233 | status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, uint vmm_flags, uint arch_mmu_flags); |
| 234 | |
| 235 | /* allocate a region of memory backed by newly allocated physical memory */ |
| 236 | status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, uint vmm_flags, uint arch_mmu_flags); |
| 237 | |
| 238 | /* Unmap previously allocated region and free physical memory pages backing it (if any) */ |
| 239 | status_t vmm_free_region(vmm_aspace_t *aspace, vaddr_t va); |
| 240 | |
| 241 | /* For the above region creation routines. Allocate virtual space at the passed in pointer. */ |
| 242 | #define VMM_FLAG_VALLOC_SPECIFIC 0x1 |
| 243 | |
| 244 | /* allocate a new address space */ |
| 245 | status_t vmm_create_aspace(vmm_aspace_t **aspace, const char *name, uint flags); |
| 246 | |
| 247 | /* destroy everything in the address space */ |
| 248 | status_t vmm_free_aspace(vmm_aspace_t *aspace); |
| 249 | |
| 250 | #define VMM_FLAG_ASPACE_KERNEL 0x1 |
| 251 | |
| 252 | __END_CDECLS |
| 253 | |
| 254 | #endif // !ASSEMBLY |