b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
| 3 | * Copyright (C) 2012 ARM Ltd. |
| 4 | */ |
| 5 | #ifndef __ASM_MMU_H |
| 6 | #define __ASM_MMU_H |
| 7 | |
| 8 | #include <asm/cputype.h> |
| 9 | |
| 10 | #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ |
| 11 | #define USER_ASID_BIT 48 |
| 12 | #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) |
| 13 | #define TTBR_ASID_MASK (UL(0xffff) << 48) |
| 14 | |
| 15 | #define BP_HARDEN_EL2_SLOTS 4 |
| 16 | |
| 17 | #ifndef __ASSEMBLY__ |
| 18 | |
| 19 | typedef struct { |
| 20 | atomic64_t id; |
| 21 | void *vdso; |
| 22 | unsigned long flags; |
| 23 | } mm_context_t; |
| 24 | |
| 25 | /* |
| 26 | * This macro is only used by the TLBI code, which cannot race with an |
| 27 | * ASID change and therefore doesn't need to reload the counter using |
| 28 | * atomic64_read. |
| 29 | */ |
| 30 | #define ASID(mm) ((mm)->context.id.counter & 0xffff) |
| 31 | |
| 32 | static __always_inline bool arm64_kernel_unmapped_at_el0(void) |
| 33 | { |
| 34 | return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && |
| 35 | cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); |
| 36 | } |
| 37 | |
| 38 | static inline bool arm64_kernel_use_ng_mappings(void) |
| 39 | { |
| 40 | bool tx1_bug; |
| 41 | |
| 42 | /* What's a kpti? Use global mappings if we don't know. */ |
| 43 | if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) |
| 44 | return false; |
| 45 | |
| 46 | /* |
| 47 | * Note: this function is called before the CPU capabilities have |
| 48 | * been configured, so our early mappings will be global. If we |
| 49 | * later determine that kpti is required, then |
| 50 | * kpti_install_ng_mappings() will make them non-global. |
| 51 | */ |
| 52 | if (arm64_kernel_unmapped_at_el0()) |
| 53 | return true; |
| 54 | |
| 55 | if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) |
| 56 | return false; |
| 57 | |
| 58 | /* |
| 59 | * KASLR is enabled so we're going to be enabling kpti on non-broken |
| 60 | * CPUs regardless of their susceptibility to Meltdown. Rather |
| 61 | * than force everybody to go through the G -> nG dance later on, |
| 62 | * just put down non-global mappings from the beginning. |
| 63 | */ |
| 64 | if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { |
| 65 | tx1_bug = false; |
| 66 | #ifndef MODULE |
| 67 | } else if (!static_branch_likely(&arm64_const_caps_ready)) { |
| 68 | extern const struct midr_range cavium_erratum_27456_cpus[]; |
| 69 | |
| 70 | tx1_bug = is_midr_in_range_list(read_cpuid_id(), |
| 71 | cavium_erratum_27456_cpus); |
| 72 | #endif |
| 73 | } else { |
| 74 | tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); |
| 75 | } |
| 76 | |
| 77 | return !tx1_bug && kaslr_offset() > 0; |
| 78 | } |
| 79 | |
| 80 | typedef void (*bp_hardening_cb_t)(void); |
| 81 | |
| 82 | struct bp_hardening_data { |
| 83 | int hyp_vectors_slot; |
| 84 | bp_hardening_cb_t fn; |
| 85 | |
| 86 | /* |
| 87 | * template_start is only used by the BHB mitigation to identify the |
| 88 | * hyp_vectors_slot sequence. |
| 89 | */ |
| 90 | const char *template_start; |
| 91 | }; |
| 92 | |
| 93 | #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \ |
| 94 | defined(CONFIG_HARDEN_EL2_VECTORS)) |
| 95 | extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[]; |
| 96 | extern atomic_t arm64_el2_vector_last_slot; |
| 97 | #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */ |
| 98 | |
| 99 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
| 100 | DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); |
| 101 | |
| 102 | static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) |
| 103 | { |
| 104 | return this_cpu_ptr(&bp_hardening_data); |
| 105 | } |
| 106 | |
| 107 | static inline void arm64_apply_bp_hardening(void) |
| 108 | { |
| 109 | struct bp_hardening_data *d; |
| 110 | |
| 111 | if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) |
| 112 | return; |
| 113 | |
| 114 | d = arm64_get_bp_hardening_data(); |
| 115 | if (d->fn) |
| 116 | d->fn(); |
| 117 | } |
| 118 | #else |
| 119 | static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) |
| 120 | { |
| 121 | return NULL; |
| 122 | } |
| 123 | |
| 124 | static inline void arm64_apply_bp_hardening(void) { } |
| 125 | #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ |
| 126 | |
| 127 | extern void arm64_memblock_init(void); |
| 128 | extern void paging_init(void); |
| 129 | extern void bootmem_init(void); |
| 130 | extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); |
| 131 | extern void init_mem_pgprot(void); |
| 132 | extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
| 133 | unsigned long virt, phys_addr_t size, |
| 134 | pgprot_t prot, bool page_mappings_only); |
| 135 | extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); |
| 136 | extern void mark_linear_text_alias_ro(void); |
| 137 | |
| 138 | #define INIT_MM_CONTEXT(name) \ |
| 139 | .pgd = init_pg_dir, |
| 140 | |
| 141 | #endif /* !__ASSEMBLY__ */ |
| 142 | #endif |