b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Clang Control Flow Integrity (CFI) error and slowpath handling. |
| 4 | * |
| 5 | * Copyright (C) 2019 Google LLC |
| 6 | */ |
| 7 | |
| 8 | #include <linux/gfp.h> |
| 9 | #include <linux/hardirq.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/mutex.h> |
| 12 | #include <linux/printk.h> |
| 13 | #include <linux/ratelimit.h> |
| 14 | #include <linux/rcupdate.h> |
| 15 | #include <asm/cacheflush.h> |
| 16 | #include <asm/set_memory.h> |
| 17 | |
| 18 | /* Compiler-defined handler names */ |
| 19 | #ifdef CONFIG_CFI_PERMISSIVE |
| 20 | #define cfi_failure_handler __ubsan_handle_cfi_check_fail |
| 21 | #define cfi_slowpath_handler __cfi_slowpath_diag |
| 22 | #else /* enforcing */ |
| 23 | #define cfi_failure_handler __ubsan_handle_cfi_check_fail_abort |
| 24 | #define cfi_slowpath_handler __cfi_slowpath |
| 25 | #endif /* CONFIG_CFI_PERMISSIVE */ |
| 26 | |
| 27 | static inline void handle_cfi_failure(void *ptr) |
| 28 | { |
| 29 | if (IS_ENABLED(CONFIG_CFI_PERMISSIVE)) |
| 30 | WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr); |
| 31 | else |
| 32 | panic("CFI failure (target: %pS)\n", ptr); |
| 33 | } |
| 34 | |
| 35 | #ifdef CONFIG_MODULES |
| 36 | #ifdef CONFIG_CFI_CLANG_SHADOW |
| 37 | struct shadow_range { |
| 38 | /* Module address range */ |
| 39 | unsigned long mod_min_addr; |
| 40 | unsigned long mod_max_addr; |
| 41 | /* Module page range */ |
| 42 | unsigned long min_page; |
| 43 | unsigned long max_page; |
| 44 | }; |
| 45 | |
| 46 | #define SHADOW_ORDER 2 |
| 47 | #define SHADOW_PAGES (1 << SHADOW_ORDER) |
| 48 | #define SHADOW_SIZE \ |
| 49 | ((SHADOW_PAGES * PAGE_SIZE - sizeof(struct shadow_range)) / sizeof(u16)) |
| 50 | #define SHADOW_INVALID 0xFFFF |
| 51 | |
| 52 | struct cfi_shadow { |
| 53 | /* Page range covered by the shadow */ |
| 54 | struct shadow_range r; |
| 55 | /* Page offsets to __cfi_check functions in modules */ |
| 56 | u16 shadow[SHADOW_SIZE]; |
| 57 | }; |
| 58 | |
| 59 | static DEFINE_MUTEX(shadow_update_lock); |
| 60 | static struct cfi_shadow __rcu *cfi_shadow __read_mostly; |
| 61 | |
| 62 | static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr) |
| 63 | { |
| 64 | unsigned long index; |
| 65 | unsigned long page = ptr >> PAGE_SHIFT; |
| 66 | |
| 67 | if (unlikely(page < s->r.min_page)) |
| 68 | return -1; /* Outside of module area */ |
| 69 | |
| 70 | index = page - s->r.min_page; |
| 71 | |
| 72 | if (index >= SHADOW_SIZE) |
| 73 | return -1; /* Cannot be addressed with shadow */ |
| 74 | |
| 75 | return (int)index; |
| 76 | } |
| 77 | |
| 78 | static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s, |
| 79 | int index) |
| 80 | { |
| 81 | if (unlikely(index < 0 || index >= SHADOW_SIZE)) |
| 82 | return 0; |
| 83 | |
| 84 | if (unlikely(s->shadow[index] == SHADOW_INVALID)) |
| 85 | return 0; |
| 86 | |
| 87 | return (s->r.min_page + s->shadow[index]) << PAGE_SHIFT; |
| 88 | } |
| 89 | |
| 90 | static inline unsigned long shadow_to_page(const struct cfi_shadow *s, |
| 91 | int index) |
| 92 | { |
| 93 | if (unlikely(index < 0 || index >= SHADOW_SIZE)) |
| 94 | return 0; |
| 95 | |
| 96 | return (s->r.min_page + index) << PAGE_SHIFT; |
| 97 | } |
| 98 | |
| 99 | static void prepare_next_shadow(const struct cfi_shadow __rcu *prev, |
| 100 | struct cfi_shadow *next) |
| 101 | { |
| 102 | int i, index, check; |
| 103 | |
| 104 | /* Mark everything invalid */ |
| 105 | memset(next->shadow, 0xFF, sizeof(next->shadow)); |
| 106 | |
| 107 | if (!prev) |
| 108 | return; /* No previous shadow */ |
| 109 | |
| 110 | /* If the base address didn't change, update is not needed */ |
| 111 | if (prev->r.min_page == next->r.min_page) { |
| 112 | memcpy(next->shadow, prev->shadow, sizeof(next->shadow)); |
| 113 | return; |
| 114 | } |
| 115 | |
| 116 | /* Convert the previous shadow to the new address range */ |
| 117 | for (i = 0; i < SHADOW_SIZE; ++i) { |
| 118 | if (prev->shadow[i] == SHADOW_INVALID) |
| 119 | continue; |
| 120 | |
| 121 | index = ptr_to_shadow(next, shadow_to_page(prev, i)); |
| 122 | if (index < 0) |
| 123 | continue; |
| 124 | |
| 125 | check = ptr_to_shadow(next, |
| 126 | shadow_to_ptr(prev, prev->shadow[i])); |
| 127 | if (check < 0) |
| 128 | continue; |
| 129 | |
| 130 | next->shadow[index] = (u16)check; |
| 131 | } |
| 132 | } |
| 133 | |
| 134 | static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod) |
| 135 | { |
| 136 | unsigned long ptr; |
| 137 | unsigned long min_page_addr; |
| 138 | unsigned long max_page_addr; |
| 139 | unsigned long check = (unsigned long)mod->cfi_check; |
| 140 | int check_index = ptr_to_shadow(s, check); |
| 141 | |
| 142 | if (unlikely((check & PAGE_MASK) != check)) |
| 143 | return; /* Must be page aligned */ |
| 144 | |
| 145 | if (check_index < 0) |
| 146 | return; /* Module not addressable with shadow */ |
| 147 | |
| 148 | min_page_addr = (unsigned long)mod->core_layout.base & PAGE_MASK; |
| 149 | max_page_addr = (unsigned long)mod->core_layout.base + |
| 150 | mod->core_layout.text_size; |
| 151 | max_page_addr &= PAGE_MASK; |
| 152 | |
| 153 | /* For each page, store the check function index in the shadow */ |
| 154 | for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) { |
| 155 | int index = ptr_to_shadow(s, ptr); |
| 156 | |
| 157 | if (index >= 0) { |
| 158 | /* Each page must only contain one module */ |
| 159 | WARN_ON(s->shadow[index] != SHADOW_INVALID); |
| 160 | s->shadow[index] = (u16)check_index; |
| 161 | } |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod) |
| 166 | { |
| 167 | unsigned long ptr; |
| 168 | unsigned long min_page_addr; |
| 169 | unsigned long max_page_addr; |
| 170 | |
| 171 | min_page_addr = (unsigned long)mod->core_layout.base & PAGE_MASK; |
| 172 | max_page_addr = (unsigned long)mod->core_layout.base + |
| 173 | mod->core_layout.text_size; |
| 174 | max_page_addr &= PAGE_MASK; |
| 175 | |
| 176 | for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) { |
| 177 | int index = ptr_to_shadow(s, ptr); |
| 178 | |
| 179 | if (index >= 0) |
| 180 | s->shadow[index] = SHADOW_INVALID; |
| 181 | } |
| 182 | } |
| 183 | |
| 184 | typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *); |
| 185 | |
| 186 | static void update_shadow(struct module *mod, unsigned long min_addr, |
| 187 | unsigned long max_addr, update_shadow_fn fn) |
| 188 | { |
| 189 | struct cfi_shadow *prev; |
| 190 | struct cfi_shadow *next = (struct cfi_shadow *) |
| 191 | __get_free_pages(GFP_KERNEL, SHADOW_ORDER); |
| 192 | |
| 193 | next->r.mod_min_addr = min_addr; |
| 194 | next->r.mod_max_addr = max_addr; |
| 195 | next->r.min_page = min_addr >> PAGE_SHIFT; |
| 196 | next->r.max_page = max_addr >> PAGE_SHIFT; |
| 197 | |
| 198 | mutex_lock(&shadow_update_lock); |
| 199 | prev = rcu_dereference_protected(cfi_shadow, 1); |
| 200 | prepare_next_shadow(prev, next); |
| 201 | |
| 202 | fn(next, mod); |
| 203 | set_memory_ro((unsigned long)next, SHADOW_PAGES); |
| 204 | rcu_assign_pointer(cfi_shadow, next); |
| 205 | |
| 206 | mutex_unlock(&shadow_update_lock); |
| 207 | synchronize_rcu_expedited(); |
| 208 | |
| 209 | if (prev) { |
| 210 | set_memory_rw((unsigned long)prev, SHADOW_PAGES); |
| 211 | free_pages((unsigned long)prev, SHADOW_ORDER); |
| 212 | } |
| 213 | } |
| 214 | |
| 215 | void cfi_module_add(struct module *mod, unsigned long min_addr, |
| 216 | unsigned long max_addr) |
| 217 | { |
| 218 | update_shadow(mod, min_addr, max_addr, add_module_to_shadow); |
| 219 | } |
| 220 | EXPORT_SYMBOL_GPL(cfi_module_add); |
| 221 | |
| 222 | void cfi_module_remove(struct module *mod, unsigned long min_addr, |
| 223 | unsigned long max_addr) |
| 224 | { |
| 225 | update_shadow(mod, min_addr, max_addr, remove_module_from_shadow); |
| 226 | } |
| 227 | EXPORT_SYMBOL_GPL(cfi_module_remove); |
| 228 | |
| 229 | static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s, |
| 230 | unsigned long ptr) |
| 231 | { |
| 232 | int index; |
| 233 | |
| 234 | if (unlikely(!s)) |
| 235 | return NULL; /* No shadow available */ |
| 236 | |
| 237 | if (ptr < s->r.mod_min_addr || ptr > s->r.mod_max_addr) |
| 238 | return NULL; /* Not in a mapped module */ |
| 239 | |
| 240 | index = ptr_to_shadow(s, ptr); |
| 241 | if (index < 0) |
| 242 | return NULL; /* Cannot be addressed with shadow */ |
| 243 | |
| 244 | return (cfi_check_fn)shadow_to_ptr(s, index); |
| 245 | } |
| 246 | #endif /* CONFIG_CFI_CLANG_SHADOW */ |
| 247 | |
| 248 | static inline cfi_check_fn find_module_cfi_check(void *ptr) |
| 249 | { |
| 250 | cfi_check_fn f = CFI_CHECK_FN; |
| 251 | struct module *mod; |
| 252 | |
| 253 | preempt_disable(); |
| 254 | mod = __module_address((unsigned long)ptr); |
| 255 | if (mod) |
| 256 | f = mod->cfi_check; |
| 257 | preempt_enable(); |
| 258 | |
| 259 | return f; |
| 260 | } |
| 261 | |
| 262 | static inline cfi_check_fn find_cfi_check(void *ptr) |
| 263 | { |
| 264 | bool rcu; |
| 265 | cfi_check_fn f; |
| 266 | |
| 267 | rcu = rcu_is_watching(); |
| 268 | if (!rcu) |
| 269 | rcu_nmi_enter(); |
| 270 | |
| 271 | #ifdef CONFIG_CFI_CLANG_SHADOW |
| 272 | /* Look up the __cfi_check function to use */ |
| 273 | rcu_read_lock_sched(); |
| 274 | f = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), |
| 275 | (unsigned long)ptr); |
| 276 | rcu_read_unlock_sched(); |
| 277 | |
| 278 | if (f) |
| 279 | goto out; |
| 280 | |
| 281 | /* |
| 282 | * Fall back to find_module_cfi_check, which works also for a larger |
| 283 | * module address space, but is slower. |
| 284 | */ |
| 285 | #endif /* CONFIG_CFI_CLANG_SHADOW */ |
| 286 | |
| 287 | f = find_module_cfi_check(ptr); |
| 288 | |
| 289 | out: |
| 290 | if (!rcu) |
| 291 | rcu_nmi_exit(); |
| 292 | |
| 293 | return f; |
| 294 | } |
| 295 | |
| 296 | void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag) |
| 297 | { |
| 298 | cfi_check_fn check = find_cfi_check(ptr); |
| 299 | |
| 300 | if (likely(check)) |
| 301 | check(id, ptr, diag); |
| 302 | else /* Don't allow unchecked modules */ |
| 303 | handle_cfi_failure(ptr); |
| 304 | } |
| 305 | EXPORT_SYMBOL_GPL(cfi_slowpath_handler); |
| 306 | #endif /* CONFIG_MODULES */ |
| 307 | |
| 308 | void cfi_failure_handler(void *data, void *ptr, void *vtable) |
| 309 | { |
| 310 | handle_cfi_failure(ptr); |
| 311 | } |
| 312 | EXPORT_SYMBOL_GPL(cfi_failure_handler); |
| 313 | |
| 314 | void __cfi_check_fail(void *data, void *ptr) |
| 315 | { |
| 316 | handle_cfi_failure(ptr); |
| 317 | } |