rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2009 Corey Tabaka |
| 3 | * Copyright (c) 2015 Intel Corporation |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining |
| 6 | * a copy of this software and associated documentation files |
| 7 | * (the "Software"), to deal in the Software without restriction, |
| 8 | * including without limitation the rights to use, copy, modify, merge, |
| 9 | * publish, distribute, sublicense, and/or sell copies of the Software, |
| 10 | * and to permit persons to whom the Software is furnished to do so, |
| 11 | * subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be |
| 14 | * included in all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 17 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 19 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY |
| 20 | * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 21 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 22 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 23 | */ |
| 24 | |
| 25 | #include <err.h> |
| 26 | #include <trace.h> |
| 27 | #include <arch/x86/mmu.h> |
| 28 | #include <platform.h> |
| 29 | #include "platform_p.h" |
| 30 | #include <platform/pc.h> |
| 31 | #include <platform/multiboot.h> |
| 32 | #include <platform/console.h> |
| 33 | #include <platform/keyboard.h> |
| 34 | #include <dev/pci.h> |
| 35 | #include <dev/uart.h> |
| 36 | #include <arch/x86.h> |
| 37 | #include <arch/mmu.h> |
| 38 | #include <malloc.h> |
| 39 | #include <string.h> |
| 40 | #include <assert.h> |
| 41 | #include <kernel/vm.h> |
| 42 | |
| 43 | extern multiboot_info_t *_multiboot_info; |
| 44 | |
| 45 | #define DEFAULT_MEMEND (16*1024*1024) |
| 46 | |
| 47 | #ifdef WITH_KERNEL_VM |
| 48 | extern int _end; |
| 49 | static uintptr_t _heap_end = (uintptr_t)DEFAULT_MEMEND; |
| 50 | #else |
| 51 | extern uintptr_t _heap_end; |
| 52 | #endif |
| 53 | extern uint64_t __code_start; |
| 54 | extern uint64_t __code_end; |
| 55 | extern uint64_t __rodata_start; |
| 56 | extern uint64_t __rodata_end; |
| 57 | extern uint64_t __data_start; |
| 58 | extern uint64_t __data_end; |
| 59 | extern uint64_t __bss_start; |
| 60 | extern uint64_t __bss_end; |
| 61 | extern void pci_init(void); |
| 62 | extern void arch_mmu_init(void); |
| 63 | |
| 64 | /* Address width including virtual/physical address*/ |
| 65 | uint8_t g_vaddr_width = 0; |
| 66 | uint8_t g_paddr_width = 0; |
| 67 | |
| 68 | /* Kernel global CR3 */ |
| 69 | map_addr_t g_CR3 = 0; |
| 70 | |
| 71 | void platform_init_mmu_mappings(void) |
| 72 | { |
| 73 | struct map_range range; |
| 74 | arch_flags_t access; |
| 75 | map_addr_t *init_table, phy_init_table; |
| 76 | uint32_t addr_width; |
| 77 | |
| 78 | /* getting the address width from CPUID instr */ |
| 79 | /* Bits 07-00: Physical Address width info */ |
| 80 | /* Bits 15-08: Linear Address width info */ |
| 81 | addr_width = x86_get_address_width(); |
| 82 | g_paddr_width = (uint8_t)(addr_width & 0xFF); |
| 83 | g_vaddr_width = (uint8_t)((addr_width >> 8) & 0xFF); |
| 84 | |
| 85 | /* Creating the First page in the page table hirerachy */ |
| 86 | /* Can be pml4, pdpt or pdt based on x86_64, x86 PAE mode & x86 non-PAE mode respectively */ |
| 87 | init_table = memalign(PAGE_SIZE, PAGE_SIZE); |
| 88 | ASSERT(init_table); |
| 89 | memset(init_table, 0, PAGE_SIZE); |
| 90 | |
| 91 | phy_init_table = (map_addr_t)X86_VIRT_TO_PHYS(init_table); |
| 92 | |
| 93 | /* kernel code section mapping */ |
| 94 | access = ARCH_MMU_FLAG_PERM_RO; |
| 95 | range.start_vaddr = range.start_paddr = (map_addr_t) &__code_start; |
| 96 | range.size = ((map_addr_t)&__code_end) - ((map_addr_t)&__code_start); |
| 97 | x86_mmu_map_range(phy_init_table, &range, access); |
| 98 | |
| 99 | /* kernel data section mapping */ |
| 100 | access = 0; |
| 101 | #if defined(ARCH_X86_64) || defined(PAE_MODE_ENABLED) |
| 102 | access |= ARCH_MMU_FLAG_PERM_NO_EXECUTE; |
| 103 | #endif |
| 104 | range.start_vaddr = range.start_paddr = (map_addr_t) &__data_start; |
| 105 | range.size = ((map_addr_t)&__data_end) - ((map_addr_t)&__data_start); |
| 106 | x86_mmu_map_range(phy_init_table, &range, access); |
| 107 | |
| 108 | /* kernel rodata section mapping */ |
| 109 | access = ARCH_MMU_FLAG_PERM_RO; |
| 110 | #if defined(ARCH_X86_64) || defined(PAE_MODE_ENABLED) |
| 111 | access |= ARCH_MMU_FLAG_PERM_NO_EXECUTE; |
| 112 | #endif |
| 113 | range.start_vaddr = range.start_paddr = (map_addr_t) &__rodata_start; |
| 114 | range.size = ((map_addr_t)&__rodata_end) - ((map_addr_t)&__rodata_start); |
| 115 | x86_mmu_map_range(phy_init_table, &range, access); |
| 116 | |
| 117 | /* kernel bss section and kernel heap mappings */ |
| 118 | access = 0; |
| 119 | #ifdef ARCH_X86_64 |
| 120 | access |= ARCH_MMU_FLAG_PERM_NO_EXECUTE; |
| 121 | #endif |
| 122 | range.start_vaddr = range.start_paddr = (map_addr_t) &__bss_start; |
| 123 | range.size = ((map_addr_t)_heap_end) - ((map_addr_t)&__bss_start); |
| 124 | x86_mmu_map_range(phy_init_table, &range, access); |
| 125 | |
| 126 | /* Mapping for BIOS, devices */ |
| 127 | access = 0; |
| 128 | range.start_vaddr = range.start_paddr = (map_addr_t) 0; |
| 129 | range.size = ((map_addr_t)&__code_start); |
| 130 | x86_mmu_map_range(phy_init_table, &range, access); |
| 131 | |
| 132 | /* Moving to the new CR3 */ |
| 133 | g_CR3 = (map_addr_t)phy_init_table; |
| 134 | x86_set_cr3((map_addr_t)phy_init_table); |
| 135 | } |
| 136 | |
| 137 | #if WITH_KERNEL_VM |
| 138 | struct mmu_initial_mapping mmu_initial_mappings[] = { |
| 139 | /* all of detected memory */ |
| 140 | { |
| 141 | .phys = MEMBASE, |
| 142 | .virt = MEMBASE, |
| 143 | .size = DEFAULT_MEMEND - MEMBASE, |
| 144 | .flags = 0, |
| 145 | .name = "memory" |
| 146 | }, |
| 147 | |
| 148 | /* null entry to terminate the list */ |
| 149 | { 0 } |
| 150 | }; |
| 151 | |
| 152 | /* set up the size of the identity map of physical ram to virtual at the base |
| 153 | * of the kernel to match what we detected in platform_init_multiboot_info() |
| 154 | */ |
| 155 | void initial_mapping_init(void) |
| 156 | { |
| 157 | /* tweak the amount of physical memory map we have mapped |
| 158 | * in the mmu_initial_mappings table, which is used by the vm |
| 159 | * for reverse lookups of memory in the kernel area */ |
| 160 | mmu_initial_mappings[0].size = _heap_end - mmu_initial_mappings[0].virt; |
| 161 | } |
| 162 | |
| 163 | static pmm_arena_t mem_arena = { |
| 164 | .name = "memory", |
| 165 | .base = MEMBASE, /* start 2MB into memory */ |
| 166 | .size = DEFAULT_MEMEND, /* default amount of memory in case we don't have multiboot */ |
| 167 | .priority = 1, |
| 168 | .flags = PMM_ARENA_FLAG_KMAP |
| 169 | }; |
| 170 | |
| 171 | /* set up the size of the physical memory map based on the end of memory we detected in |
| 172 | * platform_init_multiboot_info() |
| 173 | */ |
| 174 | void mem_arena_init(void) |
| 175 | { |
| 176 | uintptr_t mem_base = ((uintptr_t)MEMBASE); |
| 177 | uintptr_t mem_size = (uintptr_t)_heap_end - (uintptr_t)mem_base; |
| 178 | |
| 179 | mem_arena.base = PAGE_ALIGN(mem_base); |
| 180 | mem_arena.size = PAGE_ALIGN(mem_size); |
| 181 | } |
| 182 | #endif |
| 183 | |
| 184 | void platform_init_multiboot_info(void) |
| 185 | { |
| 186 | if (_multiboot_info) { |
| 187 | if (_multiboot_info->flags & MB_INFO_MEM_SIZE) { |
| 188 | _heap_end = _multiboot_info->mem_upper * 1024; |
| 189 | } |
| 190 | |
| 191 | if (_multiboot_info->flags & MB_INFO_MMAP) { |
| 192 | memory_map_t *mmap = (memory_map_t *)(uintptr_t)(_multiboot_info->mmap_addr - 4); |
| 193 | |
| 194 | for (uint i = 0; i < _multiboot_info->mmap_length / sizeof(memory_map_t); i++) { |
| 195 | |
| 196 | if (mmap[i].type == MB_MMAP_TYPE_AVAILABLE && mmap[i].base_addr_low >= _heap_end) { |
| 197 | _heap_end = mmap[i].base_addr_low + mmap[i].length_low; |
| 198 | } else if (mmap[i].type != MB_MMAP_TYPE_AVAILABLE && mmap[i].base_addr_low >= _heap_end) { |
| 199 | /* |
| 200 | * break on first memory hole above default heap end for now. |
| 201 | * later we can add facilities for adding free chunks to the |
| 202 | * heap for each segregated memory region. |
| 203 | */ |
| 204 | break; |
| 205 | } |
| 206 | } |
| 207 | } |
| 208 | } |
| 209 | } |
| 210 | |
| 211 | void platform_early_init(void) |
| 212 | { |
| 213 | |
| 214 | platform_init_uart(); |
| 215 | |
| 216 | /* get the text console working */ |
| 217 | platform_init_console(); |
| 218 | |
| 219 | /* initialize the interrupt controller */ |
| 220 | platform_init_interrupts(); |
| 221 | |
| 222 | /* initialize the timer */ |
| 223 | platform_init_timer(); |
| 224 | |
| 225 | /* look at multiboot to determine our memory size */ |
| 226 | platform_init_multiboot_info(); |
| 227 | |
| 228 | #ifdef WITH_KERNEL_VM |
| 229 | initial_mapping_init(); |
| 230 | mem_arena_init(); |
| 231 | pmm_add_arena(&mem_arena); |
| 232 | #endif |
| 233 | } |
| 234 | |
| 235 | void platform_init(void) |
| 236 | { |
| 237 | uart_init(); |
| 238 | |
| 239 | platform_init_keyboard(); |
| 240 | #if defined(ARCH_X86) |
| 241 | pci_init(); |
| 242 | #endif |
| 243 | |
| 244 | /* MMU init for x86 Archs done after the heap is setup */ |
| 245 | // XXX move this into arch/ |
| 246 | arch_mmu_init(); |
| 247 | platform_init_mmu_mappings(); |
| 248 | } |
| 249 | |
| 250 | /* vim: set noexpandtab: */ |