blob: 622d5968c9795198471e8f2271a52762322b12f4 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * This code is based in part on work published here:
14 *
15 * https://github.com/IAIK/KAISER
16 *
17 * The original work was written by and and signed off by for the Linux
18 * kernel by:
19 *
20 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
21 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
22 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
23 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
24 *
25 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
26 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
27 * Andy Lutomirsky <luto@amacapital.net>
28 */
29#include <linux/kernel.h>
30#include <linux/errno.h>
31#include <linux/string.h>
32#include <linux/types.h>
33#include <linux/bug.h>
34#include <linux/init.h>
35#include <linux/spinlock.h>
36#include <linux/mm.h>
37#include <linux/uaccess.h>
38#include <linux/cpu.h>
39
40#include <asm/cpufeature.h>
41#include <asm/hypervisor.h>
42#include <asm/vsyscall.h>
43#include <asm/cmdline.h>
44#include <asm/pti.h>
45#include <asm/pgtable.h>
46#include <asm/pgalloc.h>
47#include <asm/tlbflush.h>
48#include <asm/desc.h>
49#include <asm/sections.h>
50
51#undef pr_fmt
52#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
53
54/* Backporting helper */
55#ifndef __GFP_NOTRACK
56#define __GFP_NOTRACK 0
57#endif
58
59/*
60 * Define the page-table levels we clone for user-space on 32
61 * and 64 bit.
62 */
63#ifdef CONFIG_X86_64
64#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
65#else
66#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
67#endif
68
69static void __init pti_print_if_insecure(const char *reason)
70{
71 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
72 pr_info("%s\n", reason);
73}
74
75static void __init pti_print_if_secure(const char *reason)
76{
77 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
78 pr_info("%s\n", reason);
79}
80
81enum pti_mode {
82 PTI_AUTO = 0,
83 PTI_FORCE_OFF,
84 PTI_FORCE_ON
85} pti_mode;
86
87void __init pti_check_boottime_disable(void)
88{
89 char arg[5];
90 int ret;
91
92 /* Assume mode is auto unless overridden. */
93 pti_mode = PTI_AUTO;
94
95 if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
96 pti_mode = PTI_FORCE_OFF;
97 pti_print_if_insecure("disabled on XEN PV.");
98 return;
99 }
100
101 ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
102 if (ret > 0) {
103 if (ret == 3 && !strncmp(arg, "off", 3)) {
104 pti_mode = PTI_FORCE_OFF;
105 pti_print_if_insecure("disabled on command line.");
106 return;
107 }
108 if (ret == 2 && !strncmp(arg, "on", 2)) {
109 pti_mode = PTI_FORCE_ON;
110 pti_print_if_secure("force enabled on command line.");
111 goto enable;
112 }
113 if (ret == 4 && !strncmp(arg, "auto", 4)) {
114 pti_mode = PTI_AUTO;
115 goto autosel;
116 }
117 }
118
119 if (cmdline_find_option_bool(boot_command_line, "nopti") ||
120 cpu_mitigations_off()) {
121 pti_mode = PTI_FORCE_OFF;
122 pti_print_if_insecure("disabled on command line.");
123 return;
124 }
125
126autosel:
127 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
128 return;
129enable:
130 setup_force_cpu_cap(X86_FEATURE_PTI);
131}
132
133pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
134{
135 /*
136 * Changes to the high (kernel) portion of the kernelmode page
137 * tables are not automatically propagated to the usermode tables.
138 *
139 * Users should keep in mind that, unlike the kernelmode tables,
140 * there is no vmalloc_fault equivalent for the usermode tables.
141 * Top-level entries added to init_mm's usermode pgd after boot
142 * will not be automatically propagated to other mms.
143 */
144 if (!pgdp_maps_userspace(pgdp))
145 return pgd;
146
147 /*
148 * The user page tables get the full PGD, accessible from
149 * userspace:
150 */
151 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
152
153 /*
154 * If this is normal user memory, make it NX in the kernel
155 * pagetables so that, if we somehow screw up and return to
156 * usermode with the kernel CR3 loaded, we'll get a page fault
157 * instead of allowing user code to execute with the wrong CR3.
158 *
159 * As exceptions, we don't set NX if:
160 * - _PAGE_USER is not set. This could be an executable
161 * EFI runtime mapping or something similar, and the kernel
162 * may execute from it
163 * - we don't have NX support
164 * - we're clearing the PGD (i.e. the new pgd is not present).
165 */
166 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
167 (__supported_pte_mask & _PAGE_NX))
168 pgd.pgd |= _PAGE_NX;
169
170 /* return the copy of the PGD we want the kernel to use: */
171 return pgd;
172}
173
174/*
175 * Walk the user copy of the page tables (optionally) trying to allocate
176 * page table pages on the way down.
177 *
178 * Returns a pointer to a P4D on success, or NULL on failure.
179 */
180static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
181{
182 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
183 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
184
185 if (address < PAGE_OFFSET) {
186 WARN_ONCE(1, "attempt to walk user address\n");
187 return NULL;
188 }
189
190 if (pgd_none(*pgd)) {
191 unsigned long new_p4d_page = __get_free_page(gfp);
192 if (WARN_ON_ONCE(!new_p4d_page))
193 return NULL;
194
195 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
196 }
197 BUILD_BUG_ON(pgd_large(*pgd) != 0);
198
199 return p4d_offset(pgd, address);
200}
201
202/*
203 * Walk the user copy of the page tables (optionally) trying to allocate
204 * page table pages on the way down.
205 *
206 * Returns a pointer to a PMD on success, or NULL on failure.
207 */
208static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
209{
210 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
211 p4d_t *p4d;
212 pud_t *pud;
213
214 p4d = pti_user_pagetable_walk_p4d(address);
215 if (!p4d)
216 return NULL;
217
218 BUILD_BUG_ON(p4d_large(*p4d) != 0);
219 if (p4d_none(*p4d)) {
220 unsigned long new_pud_page = __get_free_page(gfp);
221 if (WARN_ON_ONCE(!new_pud_page))
222 return NULL;
223
224 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
225 }
226
227 pud = pud_offset(p4d, address);
228 /* The user page tables do not use large mappings: */
229 if (pud_large(*pud)) {
230 WARN_ON(1);
231 return NULL;
232 }
233 if (pud_none(*pud)) {
234 unsigned long new_pmd_page = __get_free_page(gfp);
235 if (WARN_ON_ONCE(!new_pmd_page))
236 return NULL;
237
238 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
239 }
240
241 return pmd_offset(pud, address);
242}
243
244/*
245 * Walk the shadow copy of the page tables (optionally) trying to allocate
246 * page table pages on the way down. Does not support large pages.
247 *
248 * Note: this is only used when mapping *new* kernel data into the
249 * user/shadow page tables. It is never used for userspace data.
250 *
251 * Returns a pointer to a PTE on success, or NULL on failure.
252 */
253static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
254{
255 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
256 pmd_t *pmd;
257 pte_t *pte;
258
259 pmd = pti_user_pagetable_walk_pmd(address);
260 if (!pmd)
261 return NULL;
262
263 /* We can't do anything sensible if we hit a large mapping. */
264 if (pmd_large(*pmd)) {
265 WARN_ON(1);
266 return NULL;
267 }
268
269 if (pmd_none(*pmd)) {
270 unsigned long new_pte_page = __get_free_page(gfp);
271 if (!new_pte_page)
272 return NULL;
273
274 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
275 }
276
277 pte = pte_offset_kernel(pmd, address);
278 if (pte_flags(*pte) & _PAGE_USER) {
279 WARN_ONCE(1, "attempt to walk to user pte\n");
280 return NULL;
281 }
282 return pte;
283}
284
285#ifdef CONFIG_X86_VSYSCALL_EMULATION
286static void __init pti_setup_vsyscall(void)
287{
288 pte_t *pte, *target_pte;
289 unsigned int level;
290
291 pte = lookup_address(VSYSCALL_ADDR, &level);
292 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
293 return;
294
295 target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
296 if (WARN_ON(!target_pte))
297 return;
298
299 *target_pte = *pte;
300 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
301}
302#else
303static void __init pti_setup_vsyscall(void) { }
304#endif
305
306enum pti_clone_level {
307 PTI_CLONE_PMD,
308 PTI_CLONE_PTE,
309};
310
311static void
312pti_clone_pgtable(unsigned long start, unsigned long end,
313 enum pti_clone_level level)
314{
315 unsigned long addr;
316
317 /*
318 * Clone the populated PMDs which cover start to end. These PMD areas
319 * can have holes.
320 */
321 for (addr = start; addr < end;) {
322 pte_t *pte, *target_pte;
323 pmd_t *pmd, *target_pmd;
324 pgd_t *pgd;
325 p4d_t *p4d;
326 pud_t *pud;
327
328 /* Overflow check */
329 if (addr < start)
330 break;
331
332 pgd = pgd_offset_k(addr);
333 if (WARN_ON(pgd_none(*pgd)))
334 return;
335 p4d = p4d_offset(pgd, addr);
336 if (WARN_ON(p4d_none(*p4d)))
337 return;
338
339 pud = pud_offset(p4d, addr);
340 if (pud_none(*pud)) {
341 WARN_ON_ONCE(addr & ~PUD_MASK);
342 addr = round_up(addr + 1, PUD_SIZE);
343 continue;
344 }
345
346 pmd = pmd_offset(pud, addr);
347 if (pmd_none(*pmd)) {
348 WARN_ON_ONCE(addr & ~PMD_MASK);
349 addr = round_up(addr + 1, PMD_SIZE);
350 continue;
351 }
352
353 if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
354 target_pmd = pti_user_pagetable_walk_pmd(addr);
355 if (WARN_ON(!target_pmd))
356 return;
357
358 /*
359 * Only clone present PMDs. This ensures only setting
360 * _PAGE_GLOBAL on present PMDs. This should only be
361 * called on well-known addresses anyway, so a non-
362 * present PMD would be a surprise.
363 */
364 if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
365 return;
366
367 /*
368 * Setting 'target_pmd' below creates a mapping in both
369 * the user and kernel page tables. It is effectively
370 * global, so set it as global in both copies. Note:
371 * the X86_FEATURE_PGE check is not _required_ because
372 * the CPU ignores _PAGE_GLOBAL when PGE is not
373 * supported. The check keeps consistentency with
374 * code that only set this bit when supported.
375 */
376 if (boot_cpu_has(X86_FEATURE_PGE))
377 *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
378
379 /*
380 * Copy the PMD. That is, the kernelmode and usermode
381 * tables will share the last-level page tables of this
382 * address range
383 */
384 *target_pmd = *pmd;
385
386 addr += PMD_SIZE;
387
388 } else if (level == PTI_CLONE_PTE) {
389
390 /* Walk the page-table down to the pte level */
391 pte = pte_offset_kernel(pmd, addr);
392 if (pte_none(*pte)) {
393 addr += PAGE_SIZE;
394 continue;
395 }
396
397 /* Only clone present PTEs */
398 if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
399 return;
400
401 /* Allocate PTE in the user page-table */
402 target_pte = pti_user_pagetable_walk_pte(addr);
403 if (WARN_ON(!target_pte))
404 return;
405
406 /* Set GLOBAL bit in both PTEs */
407 if (boot_cpu_has(X86_FEATURE_PGE))
408 *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
409
410 /* Clone the PTE */
411 *target_pte = *pte;
412
413 addr += PAGE_SIZE;
414
415 } else {
416 BUG();
417 }
418 }
419}
420
421#ifdef CONFIG_X86_64
422/*
423 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
424 * next-level entry on 5-level systems.
425 */
426static void __init pti_clone_p4d(unsigned long addr)
427{
428 p4d_t *kernel_p4d, *user_p4d;
429 pgd_t *kernel_pgd;
430
431 user_p4d = pti_user_pagetable_walk_p4d(addr);
432 if (!user_p4d)
433 return;
434
435 kernel_pgd = pgd_offset_k(addr);
436 kernel_p4d = p4d_offset(kernel_pgd, addr);
437 *user_p4d = *kernel_p4d;
438}
439
440/*
441 * Clone the CPU_ENTRY_AREA into the user space visible page table.
442 */
443static void __init pti_clone_user_shared(void)
444{
445 pti_clone_p4d(CPU_ENTRY_AREA_BASE);
446}
447
448#else /* CONFIG_X86_64 */
449
450/*
451 * On 32 bit PAE systems with 1GB of Kernel address space there is only
452 * one pgd/p4d for the whole kernel. Cloning that would map the whole
453 * address space into the user page-tables, making PTI useless. So clone
454 * the page-table on the PMD level to prevent that.
455 */
456static void __init pti_clone_user_shared(void)
457{
458 unsigned long start, end;
459
460 start = CPU_ENTRY_AREA_BASE;
461 end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
462
463 pti_clone_pgtable(start, end, PTI_CLONE_PMD);
464}
465#endif /* CONFIG_X86_64 */
466
467/*
468 * Clone the ESPFIX P4D into the user space visible page table
469 */
470static void __init pti_setup_espfix64(void)
471{
472#ifdef CONFIG_X86_ESPFIX64
473 pti_clone_p4d(ESPFIX_BASE_ADDR);
474#endif
475}
476
477/*
478 * Clone the populated PMDs of the entry and irqentry text and force it RO.
479 */
480static void pti_clone_entry_text(void)
481{
482 pti_clone_pgtable((unsigned long) __entry_text_start,
483 (unsigned long) __irqentry_text_end,
484 PTI_CLONE_PMD);
485}
486
487/*
488 * Global pages and PCIDs are both ways to make kernel TLB entries
489 * live longer, reduce TLB misses and improve kernel performance.
490 * But, leaving all kernel text Global makes it potentially accessible
491 * to Meltdown-style attacks which make it trivial to find gadgets or
492 * defeat KASLR.
493 *
494 * Only use global pages when it is really worth it.
495 */
496static inline bool pti_kernel_image_global_ok(void)
497{
498 /*
499 * Systems with PCIDs get litlle benefit from global
500 * kernel text and are not worth the downsides.
501 */
502 if (cpu_feature_enabled(X86_FEATURE_PCID))
503 return false;
504
505 /*
506 * Only do global kernel image for pti=auto. Do the most
507 * secure thing (not global) if pti=on specified.
508 */
509 if (pti_mode != PTI_AUTO)
510 return false;
511
512 /*
513 * K8 may not tolerate the cleared _PAGE_RW on the userspace
514 * global kernel image pages. Do the safe thing (disable
515 * global kernel image). This is unlikely to ever be
516 * noticed because PTI is disabled by default on AMD CPUs.
517 */
518 if (boot_cpu_has(X86_FEATURE_K8))
519 return false;
520
521 /*
522 * RANDSTRUCT derives its hardening benefits from the
523 * attacker's lack of knowledge about the layout of kernel
524 * data structures. Keep the kernel image non-global in
525 * cases where RANDSTRUCT is in use to help keep the layout a
526 * secret.
527 */
528 if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
529 return false;
530
531 return true;
532}
533
534/*
535 * This is the only user for these and it is not arch-generic
536 * like the other set_memory.h functions. Just extern them.
537 */
538extern int set_memory_nonglobal(unsigned long addr, int numpages);
539extern int set_memory_global(unsigned long addr, int numpages);
540
541/*
542 * For some configurations, map all of kernel text into the user page
543 * tables. This reduces TLB misses, especially on non-PCID systems.
544 */
545static void pti_clone_kernel_text(void)
546{
547 /*
548 * rodata is part of the kernel image and is normally
549 * readable on the filesystem or on the web. But, do not
550 * clone the areas past rodata, they might contain secrets.
551 */
552 unsigned long start = PFN_ALIGN(_text);
553 unsigned long end_clone = (unsigned long)__end_rodata_aligned;
554 unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
555
556 if (!pti_kernel_image_global_ok())
557 return;
558
559 pr_debug("mapping partial kernel image into user address space\n");
560
561 /*
562 * Note that this will undo _some_ of the work that
563 * pti_set_kernel_image_nonglobal() did to clear the
564 * global bit.
565 */
566 pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
567
568 /*
569 * pti_clone_pgtable() will set the global bit in any PMDs
570 * that it clones, but we also need to get any PTEs in
571 * the last level for areas that are not huge-page-aligned.
572 */
573
574 /* Set the global bit for normal non-__init kernel text: */
575 set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
576}
577
578void pti_set_kernel_image_nonglobal(void)
579{
580 /*
581 * The identity map is created with PMDs, regardless of the
582 * actual length of the kernel. We need to clear
583 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
584 * of the image.
585 */
586 unsigned long start = PFN_ALIGN(_text);
587 unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
588
589 /*
590 * This clears _PAGE_GLOBAL from the entire kernel image.
591 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
592 * areas that are mapped to userspace.
593 */
594 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
595}
596
597/*
598 * Initialize kernel page table isolation
599 */
600void __init pti_init(void)
601{
602 if (!static_cpu_has(X86_FEATURE_PTI))
603 return;
604
605 pr_info("enabled\n");
606
607#ifdef CONFIG_X86_32
608 /*
609 * We check for X86_FEATURE_PCID here. But the init-code will
610 * clear the feature flag on 32 bit because the feature is not
611 * supported on 32 bit anyway. To print the warning we need to
612 * check with cpuid directly again.
613 */
614 if (cpuid_ecx(0x1) & BIT(17)) {
615 /* Use printk to work around pr_fmt() */
616 printk(KERN_WARNING "\n");
617 printk(KERN_WARNING "************************************************************\n");
618 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
619 printk(KERN_WARNING "** **\n");
620 printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
621 printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
622 printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
623 printk(KERN_WARNING "** **\n");
624 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
625 printk(KERN_WARNING "************************************************************\n");
626 }
627#endif
628
629 pti_clone_user_shared();
630
631 /* Undo all global bits from the init pagetables in head_64.S: */
632 pti_set_kernel_image_nonglobal();
633 /* Replace some of the global bits just for shared entry text: */
634 pti_clone_entry_text();
635 pti_setup_espfix64();
636 pti_setup_vsyscall();
637}
638
639/*
640 * Finalize the kernel mappings in the userspace page-table. Some of the
641 * mappings for the kernel image might have changed since pti_init()
642 * cloned them. This is because parts of the kernel image have been
643 * mapped RO and/or NX. These changes need to be cloned again to the
644 * userspace page-table.
645 */
646void pti_finalize(void)
647{
648 if (!boot_cpu_has(X86_FEATURE_PTI))
649 return;
650 /*
651 * We need to clone everything (again) that maps parts of the
652 * kernel image.
653 */
654 pti_clone_entry_text();
655 pti_clone_kernel_text();
656
657 debug_checkwx_user();
658}