blob: 1f2d7d832c6811c220029c878e1aa47a7eae9550 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file contains some kasan initialization code.
4 *
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/bootmem.h>
15#include <linux/init.h>
16#include <linux/kasan.h>
17#include <linux/kernel.h>
18#include <linux/memblock.h>
19#include <linux/mm.h>
20#include <linux/pfn.h>
21#include <linux/slab.h>
22
23#include <asm/page.h>
24#include <asm/pgalloc.h>
25
26#include "kasan.h"
27
28/*
29 * This page serves two purposes:
30 * - It used as early shadow memory. The entire shadow region populated
31 * with this page, before we will be able to setup normal shadow memory.
32 * - Latter it reused it as zero shadow to cover large ranges of memory
33 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
34 */
35unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
36
37#if CONFIG_PGTABLE_LEVELS > 4
38p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
39static inline bool kasan_p4d_table(pgd_t pgd)
40{
41 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
42}
43#else
44static inline bool kasan_p4d_table(pgd_t pgd)
45{
46 return false;
47}
48#endif
49#if CONFIG_PGTABLE_LEVELS > 3
50pud_t kasan_early_shadow_pud[PTRS_PER_PUD] __page_aligned_bss;
51static inline bool kasan_pud_table(p4d_t p4d)
52{
53 return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
54}
55#else
56static inline bool kasan_pud_table(p4d_t p4d)
57{
58 return false;
59}
60#endif
61#if CONFIG_PGTABLE_LEVELS > 2
62pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD] __page_aligned_bss;
63static inline bool kasan_pmd_table(pud_t pud)
64{
65 return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
66}
67#else
68static inline bool kasan_pmd_table(pud_t pud)
69{
70 return false;
71}
72#endif
73pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
74
75static inline bool kasan_pte_table(pmd_t pmd)
76{
77 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
78}
79
80static inline bool kasan_early_shadow_page_entry(pte_t pte)
81{
82 return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
83}
84
85static __init void *early_alloc(size_t size, int node)
86{
87 return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
88 BOOTMEM_ALLOC_ACCESSIBLE, node);
89}
90
91static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
92 unsigned long end)
93{
94 pte_t *pte = pte_offset_kernel(pmd, addr);
95 pte_t zero_pte;
96
97 zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)),
98 PAGE_KERNEL);
99 zero_pte = pte_wrprotect(zero_pte);
100
101 while (addr + PAGE_SIZE <= end) {
102 set_pte_at(&init_mm, addr, pte, zero_pte);
103 addr += PAGE_SIZE;
104 pte = pte_offset_kernel(pmd, addr);
105 }
106}
107
108static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
109 unsigned long end)
110{
111 pmd_t *pmd = pmd_offset(pud, addr);
112 unsigned long next;
113
114 do {
115 next = pmd_addr_end(addr, end);
116
117 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
118 pmd_populate_kernel(&init_mm, pmd,
119 lm_alias(kasan_early_shadow_pte));
120 continue;
121 }
122
123 if (pmd_none(*pmd)) {
124 pte_t *p;
125
126 if (slab_is_available())
127 p = pte_alloc_one_kernel(&init_mm, addr);
128 else
129 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
130 if (!p)
131 return -ENOMEM;
132
133 pmd_populate_kernel(&init_mm, pmd, p);
134 }
135 zero_pte_populate(pmd, addr, next);
136 } while (pmd++, addr = next, addr != end);
137
138 return 0;
139}
140
141static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
142 unsigned long end)
143{
144 pud_t *pud = pud_offset(p4d, addr);
145 unsigned long next;
146
147 do {
148 next = pud_addr_end(addr, end);
149 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
150 pmd_t *pmd;
151
152 pud_populate(&init_mm, pud,
153 lm_alias(kasan_early_shadow_pmd));
154 pmd = pmd_offset(pud, addr);
155 pmd_populate_kernel(&init_mm, pmd,
156 lm_alias(kasan_early_shadow_pte));
157 continue;
158 }
159
160 if (pud_none(*pud)) {
161 pmd_t *p;
162
163 if (slab_is_available()) {
164 p = pmd_alloc(&init_mm, pud, addr);
165 if (!p)
166 return -ENOMEM;
167 } else {
168 pud_populate(&init_mm, pud,
169 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
170 }
171 }
172 zero_pmd_populate(pud, addr, next);
173 } while (pud++, addr = next, addr != end);
174
175 return 0;
176}
177
178static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
179 unsigned long end)
180{
181 p4d_t *p4d = p4d_offset(pgd, addr);
182 unsigned long next;
183
184 do {
185 next = p4d_addr_end(addr, end);
186 if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
187 pud_t *pud;
188 pmd_t *pmd;
189
190 p4d_populate(&init_mm, p4d,
191 lm_alias(kasan_early_shadow_pud));
192 pud = pud_offset(p4d, addr);
193 pud_populate(&init_mm, pud,
194 lm_alias(kasan_early_shadow_pmd));
195 pmd = pmd_offset(pud, addr);
196 pmd_populate_kernel(&init_mm, pmd,
197 lm_alias(kasan_early_shadow_pte));
198 continue;
199 }
200
201 if (p4d_none(*p4d)) {
202 pud_t *p;
203
204 if (slab_is_available()) {
205 p = pud_alloc(&init_mm, p4d, addr);
206 if (!p)
207 return -ENOMEM;
208 } else {
209 p4d_populate(&init_mm, p4d,
210 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
211 }
212 }
213 zero_pud_populate(p4d, addr, next);
214 } while (p4d++, addr = next, addr != end);
215
216 return 0;
217}
218
219/**
220 * kasan_populate_early_shadow - populate shadow memory region with
221 * kasan_early_shadow_page
222 * @shadow_start - start of the memory range to populate
223 * @shadow_end - end of the memory range to populate
224 */
225int __ref kasan_populate_early_shadow(const void *shadow_start,
226 const void *shadow_end)
227{
228 unsigned long addr = (unsigned long)shadow_start;
229 unsigned long end = (unsigned long)shadow_end;
230 pgd_t *pgd = pgd_offset_k(addr);
231 unsigned long next;
232
233 do {
234 next = pgd_addr_end(addr, end);
235
236 if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
237 p4d_t *p4d;
238 pud_t *pud;
239 pmd_t *pmd;
240
241 /*
242 * kasan_early_shadow_pud should be populated with pmds
243 * at this moment.
244 * [pud,pmd]_populate*() below needed only for
245 * 3,2 - level page tables where we don't have
246 * puds,pmds, so pgd_populate(), pud_populate()
247 * is noops.
248 *
249 * The ifndef is required to avoid build breakage.
250 *
251 * With 5level-fixup.h, pgd_populate() is not nop and
252 * we reference kasan_early_shadow_p4d. It's not defined
253 * unless 5-level paging enabled.
254 *
255 * The ifndef can be dropped once all KASAN-enabled
256 * architectures will switch to pgtable-nop4d.h.
257 */
258#ifndef __ARCH_HAS_5LEVEL_HACK
259 pgd_populate(&init_mm, pgd,
260 lm_alias(kasan_early_shadow_p4d));
261#endif
262 p4d = p4d_offset(pgd, addr);
263 p4d_populate(&init_mm, p4d,
264 lm_alias(kasan_early_shadow_pud));
265 pud = pud_offset(p4d, addr);
266 pud_populate(&init_mm, pud,
267 lm_alias(kasan_early_shadow_pmd));
268 pmd = pmd_offset(pud, addr);
269 pmd_populate_kernel(&init_mm, pmd,
270 lm_alias(kasan_early_shadow_pte));
271 continue;
272 }
273
274 if (pgd_none(*pgd)) {
275 p4d_t *p;
276
277 if (slab_is_available()) {
278 p = p4d_alloc(&init_mm, pgd, addr);
279 if (!p)
280 return -ENOMEM;
281 } else {
282 pgd_populate(&init_mm, pgd,
283 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
284 }
285 }
286 zero_p4d_populate(pgd, addr, next);
287 } while (pgd++, addr = next, addr != end);
288
289 return 0;
290}
291
292static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
293{
294 pte_t *pte;
295 int i;
296
297 for (i = 0; i < PTRS_PER_PTE; i++) {
298 pte = pte_start + i;
299 if (!pte_none(*pte))
300 return;
301 }
302
303 pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
304 pmd_clear(pmd);
305}
306
307static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
308{
309 pmd_t *pmd;
310 int i;
311
312 for (i = 0; i < PTRS_PER_PMD; i++) {
313 pmd = pmd_start + i;
314 if (!pmd_none(*pmd))
315 return;
316 }
317
318 pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
319 pud_clear(pud);
320}
321
322static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
323{
324 pud_t *pud;
325 int i;
326
327 for (i = 0; i < PTRS_PER_PUD; i++) {
328 pud = pud_start + i;
329 if (!pud_none(*pud))
330 return;
331 }
332
333 pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
334 p4d_clear(p4d);
335}
336
337static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
338{
339 p4d_t *p4d;
340 int i;
341
342 for (i = 0; i < PTRS_PER_P4D; i++) {
343 p4d = p4d_start + i;
344 if (!p4d_none(*p4d))
345 return;
346 }
347
348 p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
349 pgd_clear(pgd);
350}
351
352static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
353 unsigned long end)
354{
355 unsigned long next;
356
357 for (; addr < end; addr = next, pte++) {
358 next = (addr + PAGE_SIZE) & PAGE_MASK;
359 if (next > end)
360 next = end;
361
362 if (!pte_present(*pte))
363 continue;
364
365 if (WARN_ON(!kasan_early_shadow_page_entry(*pte)))
366 continue;
367 pte_clear(&init_mm, addr, pte);
368 }
369}
370
371static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
372 unsigned long end)
373{
374 unsigned long next;
375
376 for (; addr < end; addr = next, pmd++) {
377 pte_t *pte;
378
379 next = pmd_addr_end(addr, end);
380
381 if (!pmd_present(*pmd))
382 continue;
383
384 if (kasan_pte_table(*pmd)) {
385 if (IS_ALIGNED(addr, PMD_SIZE) &&
386 IS_ALIGNED(next, PMD_SIZE))
387 pmd_clear(pmd);
388 continue;
389 }
390 pte = pte_offset_kernel(pmd, addr);
391 kasan_remove_pte_table(pte, addr, next);
392 kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
393 }
394}
395
396static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
397 unsigned long end)
398{
399 unsigned long next;
400
401 for (; addr < end; addr = next, pud++) {
402 pmd_t *pmd, *pmd_base;
403
404 next = pud_addr_end(addr, end);
405
406 if (!pud_present(*pud))
407 continue;
408
409 if (kasan_pmd_table(*pud)) {
410 if (IS_ALIGNED(addr, PUD_SIZE) &&
411 IS_ALIGNED(next, PUD_SIZE))
412 pud_clear(pud);
413 continue;
414 }
415 pmd = pmd_offset(pud, addr);
416 pmd_base = pmd_offset(pud, 0);
417 kasan_remove_pmd_table(pmd, addr, next);
418 kasan_free_pmd(pmd_base, pud);
419 }
420}
421
422static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
423 unsigned long end)
424{
425 unsigned long next;
426
427 for (; addr < end; addr = next, p4d++) {
428 pud_t *pud;
429
430 next = p4d_addr_end(addr, end);
431
432 if (!p4d_present(*p4d))
433 continue;
434
435 if (kasan_pud_table(*p4d)) {
436 if (IS_ALIGNED(addr, P4D_SIZE) &&
437 IS_ALIGNED(next, P4D_SIZE))
438 p4d_clear(p4d);
439 continue;
440 }
441 pud = pud_offset(p4d, addr);
442 kasan_remove_pud_table(pud, addr, next);
443 kasan_free_pud(pud_offset(p4d, 0), p4d);
444 }
445}
446
447void kasan_remove_zero_shadow(void *start, unsigned long size)
448{
449 unsigned long addr, end, next;
450 pgd_t *pgd;
451
452 addr = (unsigned long)kasan_mem_to_shadow(start);
453 end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
454
455 if (WARN_ON((unsigned long)start %
456 (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
457 WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
458 return;
459
460 for (; addr < end; addr = next) {
461 p4d_t *p4d;
462
463 next = pgd_addr_end(addr, end);
464
465 pgd = pgd_offset_k(addr);
466 if (!pgd_present(*pgd))
467 continue;
468
469 if (kasan_p4d_table(*pgd)) {
470 if (IS_ALIGNED(addr, PGDIR_SIZE) &&
471 IS_ALIGNED(next, PGDIR_SIZE))
472 pgd_clear(pgd);
473 continue;
474 }
475
476 p4d = p4d_offset(pgd, addr);
477 kasan_remove_p4d_table(p4d, addr, next);
478 kasan_free_p4d(p4d_offset(pgd, 0), pgd);
479 }
480}
481
482int kasan_add_zero_shadow(void *start, unsigned long size)
483{
484 int ret;
485 void *shadow_start, *shadow_end;
486
487 shadow_start = kasan_mem_to_shadow(start);
488 shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
489
490 if (WARN_ON((unsigned long)start %
491 (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
492 WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
493 return -EINVAL;
494
495 ret = kasan_populate_early_shadow(shadow_start, shadow_end);
496 if (ret)
497 kasan_remove_zero_shadow(shadow_start,
498 size >> KASAN_SHADOW_SCALE_SHIFT);
499 return ret;
500}