b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * Xtensa KASAN shadow map initialization |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General Public |
| 5 | * License. See the file "COPYING" in the main directory of this archive |
| 6 | * for more details. |
| 7 | * |
| 8 | * Copyright (C) 2017 Cadence Design Systems Inc. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/memblock.h> |
| 12 | #include <linux/init_task.h> |
| 13 | #include <linux/kasan.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <asm/initialize_mmu.h> |
| 16 | #include <asm/tlbflush.h> |
| 17 | #include <asm/traps.h> |
| 18 | |
| 19 | void __init kasan_early_init(void) |
| 20 | { |
| 21 | unsigned long vaddr = KASAN_SHADOW_START; |
| 22 | pgd_t *pgd = pgd_offset_k(vaddr); |
| 23 | pmd_t *pmd = pmd_offset(pgd, vaddr); |
| 24 | int i; |
| 25 | |
| 26 | for (i = 0; i < PTRS_PER_PTE; ++i) |
| 27 | set_pte(kasan_early_shadow_pte + i, |
| 28 | mk_pte(virt_to_page(kasan_early_shadow_page), |
| 29 | PAGE_KERNEL)); |
| 30 | |
| 31 | for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { |
| 32 | BUG_ON(!pmd_none(*pmd)); |
| 33 | set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte)); |
| 34 | } |
| 35 | early_trap_init(); |
| 36 | } |
| 37 | |
| 38 | static void __init populate(void *start, void *end) |
| 39 | { |
| 40 | unsigned long n_pages = (end - start) / PAGE_SIZE; |
| 41 | unsigned long n_pmds = n_pages / PTRS_PER_PTE; |
| 42 | unsigned long i, j; |
| 43 | unsigned long vaddr = (unsigned long)start; |
| 44 | pgd_t *pgd = pgd_offset_k(vaddr); |
| 45 | pmd_t *pmd = pmd_offset(pgd, vaddr); |
| 46 | pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); |
| 47 | |
| 48 | if (!pte) |
| 49 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 50 | __func__, n_pages * sizeof(pte_t), PAGE_SIZE); |
| 51 | |
| 52 | pr_debug("%s: %p - %p\n", __func__, start, end); |
| 53 | |
| 54 | for (i = j = 0; i < n_pmds; ++i) { |
| 55 | int k; |
| 56 | |
| 57 | for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { |
| 58 | phys_addr_t phys = |
| 59 | memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, |
| 60 | 0, |
| 61 | MEMBLOCK_ALLOC_ANYWHERE); |
| 62 | |
| 63 | if (!phys) |
| 64 | panic("Failed to allocate page table page\n"); |
| 65 | |
| 66 | set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE) |
| 71 | set_pmd(pmd + i, __pmd((unsigned long)pte)); |
| 72 | |
| 73 | local_flush_tlb_all(); |
| 74 | memset(start, 0, end - start); |
| 75 | } |
| 76 | |
| 77 | void __init kasan_init(void) |
| 78 | { |
| 79 | int i; |
| 80 | |
| 81 | BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - |
| 82 | (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); |
| 83 | BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); |
| 84 | |
| 85 | /* |
| 86 | * Replace shadow map pages that cover addresses from VMALLOC area |
| 87 | * start to the end of KSEG with clean writable pages. |
| 88 | */ |
| 89 | populate(kasan_mem_to_shadow((void *)VMALLOC_START), |
| 90 | kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); |
| 91 | |
| 92 | /* |
| 93 | * Write protect kasan_early_shadow_page and zero-initialize it again. |
| 94 | */ |
| 95 | for (i = 0; i < PTRS_PER_PTE; ++i) |
| 96 | set_pte(kasan_early_shadow_pte + i, |
| 97 | mk_pte(virt_to_page(kasan_early_shadow_page), |
| 98 | PAGE_KERNEL_RO)); |
| 99 | |
| 100 | local_flush_tlb_all(); |
| 101 | memset(kasan_early_shadow_page, 0, PAGE_SIZE); |
| 102 | |
| 103 | /* At this point kasan is fully initialized. Enable error messages. */ |
| 104 | current->kasan_depth = 0; |
| 105 | pr_info("KernelAddressSanitizer initialized\n"); |
| 106 | } |