blob: 4884d83f08a1c08c26e42ded5b815079f1c9f193 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2009 Corey Tabaka
3 * Copyright (c) 2015 Intel Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files
7 * (the "Software"), to deal in the Software without restriction,
8 * including without limitation the rights to use, copy, modify, merge,
9 * publish, distribute, sublicense, and/or sell copies of the Software,
10 * and to permit persons to whom the Software is furnished to do so,
11 * subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <debug.h>
26#include <sys/types.h>
27#include <compiler.h>
28#include <arch.h>
29#include <arch/x86.h>
30#include <arch/x86/mmu.h>
31#include <stdlib.h>
32#include <string.h>
33#include <arch/mmu.h>
34#include <assert.h>
35#include <err.h>
36#include <arch/arch_ops.h>
37
38extern map_addr_t g_CR3;
39
40#ifdef PAE_MODE_ENABLED
41/* PDP table address is 32 bit wide when on PAE mode, but the PDP entries are 64 bit wide */
42static inline map_addr_t get_pdp_entry_from_pdp_table(vaddr_t vaddr, map_addr_t pdpt)
43{
44 uint32_t pdp_index;
45 map_addr_t *pdp_table;
46
47 pdp_index = ((vaddr >> PDP_SHIFT) & ((1ul << PDPT_ADDR_OFFSET) - 1));
48 pdp_table = (map_addr_t *)(pdpt & X86_PDPT_ADDR_MASK);
49 return X86_PHYS_TO_VIRT(pdp_table[pdp_index]);
50}
51
52static inline map_addr_t get_pfn_from_pt(map_addr_t pt)
53{
54 map_addr_t pfn;
55
56 pfn = (pt & X86_2MB_PAGE_FRAME);
57 return X86_PHYS_TO_VIRT(pfn);
58}
59
60#else
61static inline map_addr_t get_pfn_from_pde(map_addr_t pde)
62{
63 map_addr_t pfn;
64
65 pfn = (pde & X86_4MB_PAGE_FRAME);
66 return X86_PHYS_TO_VIRT(pfn);
67}
68#endif
69
70static inline map_addr_t get_pd_entry_from_pd_table(vaddr_t vaddr, map_addr_t pdt)
71{
72 uint32_t pd_index;
73 map_addr_t *pd_table;
74
75 pd_index = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1));
76 pd_table = (map_addr_t *)(pdt & X86_PG_FRAME);
77 return X86_PHYS_TO_VIRT(pd_table[pd_index]);
78}
79
80static inline map_addr_t get_pt_entry_from_page_table(vaddr_t vaddr, map_addr_t pt)
81{
82 uint32_t pt_index;
83 map_addr_t *pt_table;
84
85 pt_index = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1));
86 pt_table = (map_addr_t *)(pt & X86_PG_FRAME);
87 return X86_PHYS_TO_VIRT(pt_table[pt_index]);
88}
89
90static inline map_addr_t get_pfn_from_pte(map_addr_t pte)
91{
92 map_addr_t pfn;
93
94 pfn = (pte & X86_PG_FRAME);
95 return X86_PHYS_TO_VIRT(pfn);
96}
97
98/**
99 * @brief Returning the x86 arch flags from generic mmu flags
100 */
101arch_flags_t get_x86_arch_flags(arch_flags_t flags)
102{
103 arch_flags_t arch_flags = 0;
104
105 if (!(flags & ARCH_MMU_FLAG_PERM_RO))
106 arch_flags |= X86_MMU_PG_RW;
107
108 if (flags & ARCH_MMU_FLAG_PERM_USER)
109 arch_flags |= X86_MMU_PG_U;
110
111 if (flags & ARCH_MMU_FLAG_UNCACHED)
112 arch_flags |= X86_MMU_CACHE_DISABLE;
113
114#ifdef PAE_MODE_ENABLED
115 if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE)
116 arch_flags |= X86_MMU_PG_NX;
117#endif
118 return arch_flags;
119}
120
121/**
122 * @brief Returning the generic mmu flags from x86 arch flags
123 */
124uint get_arch_mmu_flags(arch_flags_t flags)
125{
126 arch_flags_t mmu_flags = 0;
127
128 if (!(flags & X86_MMU_PG_RW))
129 mmu_flags |= ARCH_MMU_FLAG_PERM_RO;
130
131 if (flags & X86_MMU_PG_U)
132 mmu_flags |= ARCH_MMU_FLAG_PERM_USER;
133
134 if (flags & X86_MMU_CACHE_DISABLE)
135 mmu_flags |= ARCH_MMU_FLAG_UNCACHED;
136
137#ifdef PAE_MODE_ENABLED
138 if (flags & X86_MMU_PG_NX)
139 mmu_flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
140#endif
141 return (uint)mmu_flags;
142}
143
144/**
145 * @brief Walk the page table structures - supported for both PAE & non-PAE modes
146 *
147 */
148status_t x86_mmu_get_mapping(map_addr_t init_table, vaddr_t vaddr, uint32_t *ret_level,
149 arch_flags_t *mmu_flags, map_addr_t *last_valid_entry)
150{
151 map_addr_t pt, pte, pdt;
152#ifdef PAE_MODE_ENABLED
153 map_addr_t pdpt;
154#endif
155
156 DEBUG_ASSERT(init_table);
157 if ((!ret_level) || (!last_valid_entry) || (!mmu_flags)) {
158 return ERR_INVALID_ARGS;
159 }
160
161 *mmu_flags = 0;
162
163#ifdef PAE_MODE_ENABLED
164 pdpt = init_table; /* First level table in PAE mode is pdpt */
165 *ret_level = PDP_L;
166 *last_valid_entry = pdpt;
167
168 pdt = get_pdp_entry_from_pdp_table(vaddr, pdpt);
169 if ((pdt & X86_MMU_PG_P) == 0) {
170 *ret_level = PDP_L;
171 *last_valid_entry = pdpt;
172 return ERR_NOT_FOUND;
173 }
174
175 pt = get_pd_entry_from_pd_table(vaddr, pdt);
176 if ((pt & X86_MMU_PG_P) == 0) {
177 *ret_level = PD_L;
178 *last_valid_entry = pdt;
179 return ERR_NOT_FOUND;
180 }
181#else
182 pdt = init_table; /* First table in non PAE mode is pdt */
183 *ret_level = PD_L;
184 *last_valid_entry = pdt;
185
186 pt = get_pd_entry_from_pd_table(vaddr, pdt);
187 if ((pt & X86_MMU_PG_P) == 0)
188 return ERR_NOT_FOUND;
189#endif
190
191 /* 4 MB pages (non PAE mode) and 2 MB pages (PAE mode) */
192 /* In this case, the page directory entry is NOT actually a PT (page table) */
193 if (pt & X86_MMU_PG_PS) {
194#ifdef PAE_MODE_ENABLED
195 /* Getting the Page frame & adding the 4KB page offset from the vaddr */
196 *last_valid_entry = get_pfn_from_pt(pt) + (vaddr & PAGE_OFFSET_MASK_2MB);
197#else
198 /* Getting the Page frame & adding the 4MB page offset from the vaddr */
199 *last_valid_entry = get_pfn_from_pde(pt) + (vaddr & PAGE_OFFSET_MASK_4MB);
200#endif
201 *mmu_flags = get_arch_mmu_flags((X86_PHYS_TO_VIRT(pt)) & X86_FLAGS_MASK);
202 goto last;
203 }
204
205 /* 4 KB pages */
206 pte = get_pt_entry_from_page_table(vaddr, pt);
207 if ((pte & X86_MMU_PG_P) == 0) {
208 *ret_level = PT_L;
209 *last_valid_entry = pt;
210 return ERR_NOT_FOUND;
211 }
212
213 /* Getting the Page frame & adding the 4KB page offset from the vaddr */
214 *last_valid_entry = get_pfn_from_pte(pte) + (vaddr & PAGE_OFFSET_MASK_4KB);
215 *mmu_flags = get_arch_mmu_flags((X86_PHYS_TO_VIRT(pte)) & X86_FLAGS_MASK);
216last:
217 *ret_level = PF_L;
218 return NO_ERROR;
219}
220
221/**
222 * Walk the page table structures to see if the mapping between a virtual address
223 * and a physical address exists. Also, check the flags.
224 *
225 */
226status_t x86_mmu_check_mapping(map_addr_t init_table, map_addr_t paddr,
227 vaddr_t vaddr, arch_flags_t in_flags,
228 uint32_t *ret_level, arch_flags_t *ret_flags,
229 map_addr_t *last_valid_entry)
230{
231 status_t status;
232 arch_flags_t existing_flags = 0;
233
234 DEBUG_ASSERT(init_table);
235 if ((!ret_level) || (!last_valid_entry) || (!ret_flags) ||
236 (!IS_ALIGNED(vaddr, PAGE_SIZE)) ||
237 (!IS_ALIGNED(paddr, PAGE_SIZE))) {
238 return ERR_INVALID_ARGS;
239 }
240
241 status = x86_mmu_get_mapping(init_table, vaddr, ret_level, &existing_flags, last_valid_entry);
242 if (status || ((*last_valid_entry) != paddr)) {
243 /* We did not reach till we check the access flags for the mapping */
244 *ret_flags = in_flags;
245 return ERR_NOT_FOUND;
246 }
247
248 /* Checking the access flags for the mapped address. If it is not zero, then
249 * the access flags are different & the return flag will have those access bits
250 * which are different.
251 */
252 *ret_flags = (in_flags ^ get_x86_arch_flags(existing_flags)) & X86_DIRTY_ACCESS_MASK;
253
254 if (!(*ret_flags))
255 return NO_ERROR;
256
257 return ERR_NOT_FOUND;
258}
259
260#ifdef PAE_MODE_ENABLED
261static void update_pdp_entry(vaddr_t vaddr, map_addr_t pdpt, map_addr_t *m, arch_flags_t flags)
262{
263 uint32_t pdp_index;
264
265 map_addr_t *pdp_table = (map_addr_t *)(pdpt & X86_PG_FRAME);
266 pdp_index = ((vaddr >> PDP_SHIFT) & ((1ul << PDPT_ADDR_OFFSET) - 1));
267 pdp_table[pdp_index] = (map_addr_t)m;
268 pdp_table[pdp_index] |= X86_MMU_PG_P;
269}
270#endif
271
272static void update_pt_entry(vaddr_t vaddr, map_addr_t paddr, map_addr_t pt, arch_flags_t flags)
273{
274 uint32_t pt_index;
275
276 map_addr_t *pt_table = (map_addr_t *)(pt & X86_PG_FRAME);
277 pt_index = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1));
278 pt_table[pt_index] = paddr;
279 pt_table[pt_index] |= flags | X86_MMU_PG_P; /* last level - actual page being mapped */
280 if (!(flags & X86_MMU_PG_U))
281 pt_table[pt_index] |= X86_MMU_PG_G; /* setting global flag for kernel pages */
282}
283
284static void update_pd_entry(vaddr_t vaddr, map_addr_t pdt, map_addr_t *m, arch_flags_t flags)
285{
286 uint32_t pd_index;
287
288 map_addr_t *pd_table = (map_addr_t *)(pdt & X86_PG_FRAME);
289 pd_index = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1));
290 pd_table[pd_index] = (map_addr_t)m;
291 pd_table[pd_index] |= X86_MMU_PG_P | X86_MMU_PG_RW;
292 if (flags & X86_MMU_PG_U)
293 pd_table[pd_index] |= X86_MMU_PG_U;
294 else
295 pd_table[pd_index] |= X86_MMU_PG_G; /* setting global flag for kernel pages */
296}
297
298/**
299 * @brief Allocating a new page table
300 */
301static map_addr_t *_map_alloc_page(void)
302{
303 map_addr_t *page_ptr = memalign(PAGE_SIZE, PAGE_SIZE);
304
305 if (page_ptr)
306 memset(page_ptr, 0, PAGE_SIZE);
307
308 return page_ptr;
309}
310
311addr_t *x86_create_new_cr3(void)
312{
313 map_addr_t *kernel_table, *new_table = NULL;
314
315 if (!g_CR3)
316 return 0;
317
318 kernel_table = (map_addr_t *)X86_PHYS_TO_VIRT(g_CR3);
319
320 /* Allocate a new Page to generate a new paging structure for a new CR3 */
321 new_table = _map_alloc_page();
322 ASSERT(new_table);
323
324 /* Copying the kernel mapping as-is */
325 memcpy(new_table, kernel_table, PAGE_SIZE);
326
327 return (addr_t *)new_table;
328}
329
330/**
331 * @brief Returning the kernel CR3
332 */
333map_addr_t get_kernel_cr3(void)
334{
335 return g_CR3;
336}
337
338/**
339 * @brief Add a new mapping for the given virtual address & physical address
340 *
341 * This is a API which handles the mapping b/w a virtual address & physical address
342 * either by checking if the mapping already exists and is valid OR by adding a
343 * new mapping with the required flags.
344 *
345 */
346status_t x86_mmu_add_mapping(map_addr_t init_table, map_addr_t paddr,
347 vaddr_t vaddr, arch_flags_t mmu_flags)
348{
349#ifdef PAE_MODE_ENABLED
350 map_addr_t pdt;
351 uint32_t pd_new = 0;
352#endif
353 map_addr_t pt, *m = NULL;
354 status_t ret = NO_ERROR;
355
356 DEBUG_ASSERT(init_table);
357 if ((!IS_ALIGNED(vaddr, PAGE_SIZE)) || (!IS_ALIGNED(paddr, PAGE_SIZE)) )
358 return ERR_INVALID_ARGS;
359
360#ifdef PAE_MODE_ENABLED
361 pdt = get_pdp_entry_from_pdp_table(vaddr, init_table);
362 if ((pdt & X86_MMU_PG_P) == 0) {
363 /* Creating a new pd table */
364 m = _map_alloc_page();
365 if (m == NULL) {
366 ret = ERR_NO_MEMORY;
367 goto clean;
368 }
369 update_pdp_entry(vaddr, init_table, m, get_x86_arch_flags(mmu_flags));
370 pdt = (map_addr_t)m;
371 pd_new = 1;
372 }
373
374 if (!pd_new)
375 pt = get_pd_entry_from_pd_table(vaddr, pdt);
376
377 if (pd_new || (pt & X86_MMU_PG_P) == 0) {
378 /* Creating a new pt */
379 m = _map_alloc_page();
380 if (m == NULL) {
381 ret = ERR_NO_MEMORY;
382 if (pd_new)
383 goto clean_pd;
384 goto clean;
385 }
386
387 update_pd_entry(vaddr, pdt, m, get_x86_arch_flags(mmu_flags));
388 pt = (map_addr_t)m;
389 }
390#else
391 pt = get_pd_entry_from_pd_table(vaddr, init_table);
392 if ((pt & X86_MMU_PG_P) == 0) {
393 /* Creating a new pt */
394 m = _map_alloc_page();
395 if (m == NULL) {
396 ret = ERR_NO_MEMORY;
397 goto clean;
398 }
399
400 update_pd_entry(vaddr, init_table, m, get_x86_arch_flags(mmu_flags));
401 pt = (map_addr_t)m;
402 }
403#endif
404
405 /* Updating the page table entry with the paddr and access flags required for the mapping */
406 update_pt_entry(vaddr, paddr, pt, get_x86_arch_flags(mmu_flags));
407 ret = NO_ERROR;
408#ifdef PAE_MODE_ENABLED
409 goto clean;
410
411clean_pd:
412 if (pd_new)
413 free((map_addr_t *)pdt);
414#endif
415clean:
416 return ret;
417}
418
419/**
420 * @brief x86 MMU unmap an entry in the page tables recursively and clear out tables
421 *
422 */
423static void x86_mmu_unmap_entry(vaddr_t vaddr, int level, map_addr_t table_entry)
424{
425 uint32_t offset = 0, next_level_offset = 0;
426 map_addr_t *table, *next_table_addr, value;
427
428 next_table_addr = NULL;
429 table = (map_addr_t *)(X86_VIRT_TO_PHYS(table_entry) & X86_PG_FRAME);
430
431 switch (level) {
432#ifdef PAE_MODE_ENABLED
433 case PDP_L:
434 offset = ((vaddr >> PDP_SHIFT) & ((1 << PDPT_ADDR_OFFSET) - 1));
435 next_table_addr = (map_addr_t *)X86_PHYS_TO_VIRT(table[offset]);
436 if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
437 return;
438 break;
439#endif
440 case PD_L:
441 offset = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1));
442 next_table_addr = (map_addr_t *)X86_PHYS_TO_VIRT(table[offset]);
443 if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
444 return;
445 break;
446 case PT_L:
447 offset = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1));
448 next_table_addr = (map_addr_t *)X86_PHYS_TO_VIRT(table[offset]);
449 if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
450 return;
451 break;
452 case PF_L:
453 /* Reached page frame, Let's go back */
454 default:
455 return;
456 }
457
458 level -= 1;
459 x86_mmu_unmap_entry(vaddr, level, (map_addr_t)next_table_addr);
460 level += 1;
461
462 next_table_addr = (map_addr_t *)((map_addr_t)(X86_VIRT_TO_PHYS(next_table_addr)) & X86_PG_FRAME);
463 if (level > PT_L) {
464 /* Check all entries of next level table for present bit */
465 for (next_level_offset = 0; next_level_offset < NO_OF_PT_ENTRIES; next_level_offset++) {
466 if ((next_table_addr[next_level_offset] & X86_MMU_PG_P) != 0)
467 return; /* There is an entry in the next level table */
468 }
469 free(next_table_addr);
470 }
471 /* All present bits for all entries in next level table for this address are 0 */
472 if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) != 0) {
473 arch_disable_ints();
474 value = table[offset];
475 value = value & X86_PTE_NOT_PRESENT;
476 table[offset] = value;
477 arch_enable_ints();
478 }
479}
480
481status_t x86_mmu_unmap(map_addr_t init_table, vaddr_t vaddr, uint count)
482{
483 vaddr_t next_aligned_v_addr;
484
485 DEBUG_ASSERT(init_table);
486 if (!IS_ALIGNED(vaddr, PAGE_SIZE))
487 return ERR_INVALID_ARGS;
488
489 if (count == 0)
490 return NO_ERROR;
491
492 next_aligned_v_addr = vaddr;
493 while (count > 0) {
494#ifdef PAE_MODE_ENABLED
495 x86_mmu_unmap_entry(next_aligned_v_addr, X86_PAE_PAGING_LEVELS, init_table);
496#else
497 x86_mmu_unmap_entry(next_aligned_v_addr, X86_PAGING_LEVELS, init_table);
498#endif
499 next_aligned_v_addr += PAGE_SIZE;
500 count--;
501 }
502 return NO_ERROR;
503}
504
505int arch_mmu_unmap(vaddr_t vaddr, uint count)
506{
507 map_addr_t init_table_from_cr3;
508
509 if (!IS_ALIGNED(vaddr, PAGE_SIZE))
510 return ERR_INVALID_ARGS;
511
512 if (count == 0)
513 return NO_ERROR;
514
515 DEBUG_ASSERT(x86_get_cr3());
516 init_table_from_cr3 = x86_get_cr3();
517
518 return (x86_mmu_unmap(init_table_from_cr3, vaddr, count));
519}
520
521/**
522 * @brief Mapping a section/range with specific permissions
523 *
524 */
525status_t x86_mmu_map_range(map_addr_t init_table, struct map_range *range, arch_flags_t flags)
526{
527 vaddr_t next_aligned_v_addr;
528 map_addr_t next_aligned_p_addr;
529 status_t map_status;
530 uint32_t no_of_pages, index;
531
532 DEBUG_ASSERT(init_table);
533 if (!range)
534 return ERR_INVALID_ARGS;
535
536 /* Calculating the number of 4k pages */
537 if (IS_ALIGNED(range->size, PAGE_SIZE))
538 no_of_pages = (range->size) >> PAGE_DIV_SHIFT;
539 else
540 no_of_pages = ((range->size) >> PAGE_DIV_SHIFT) + 1;
541
542 next_aligned_v_addr = range->start_vaddr;
543 next_aligned_p_addr = range->start_paddr;
544
545 for (index = 0; index < no_of_pages; index++) {
546 map_status = x86_mmu_add_mapping(init_table, next_aligned_p_addr, next_aligned_v_addr, flags);
547 if (map_status) {
548 dprintf(SPEW, "Add mapping failed with err=%d\n", map_status);
549 /* Unmap the partial mapping - if any */
550 x86_mmu_unmap(init_table, range->start_vaddr, index);
551 return map_status;
552 }
553 next_aligned_v_addr += PAGE_SIZE;
554 next_aligned_p_addr += PAGE_SIZE;
555 }
556
557 return NO_ERROR;
558}
559
560status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
561{
562 uint32_t ret_level, current_cr3_val;
563 map_addr_t last_valid_entry;
564 arch_flags_t ret_flags;
565 status_t stat;
566
567 if (!paddr)
568 return ERR_INVALID_ARGS;
569
570 DEBUG_ASSERT(x86_get_cr3());
571 current_cr3_val = (map_addr_t)x86_get_cr3();
572
573 stat = x86_mmu_get_mapping(current_cr3_val, vaddr, &ret_level, &ret_flags, &last_valid_entry);
574 if (stat)
575 return stat;
576
577 *paddr = (paddr_t)last_valid_entry;
578
579 /* converting x86 arch specific flags to arch mmu flags */
580 if (flags)
581 *flags = ret_flags;
582
583 return NO_ERROR;
584}
585
586int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
587{
588 uint32_t current_cr3_val;
589 struct map_range range;
590
591 if ((!IS_ALIGNED(paddr, PAGE_SIZE)) || (!IS_ALIGNED(vaddr, PAGE_SIZE)))
592 return ERR_INVALID_ARGS;
593
594 if (count == 0)
595 return NO_ERROR;
596
597 DEBUG_ASSERT(x86_get_cr3());
598 current_cr3_val = (map_addr_t)x86_get_cr3();
599
600 range.start_vaddr = vaddr;
601 range.start_paddr = (map_addr_t)paddr;
602 range.size = count * PAGE_SIZE;
603
604 return (x86_mmu_map_range(current_cr3_val, &range, flags));
605}
606
607/**
608 * @brief x86 MMU basic initialization
609 *
610 */
611void arch_mmu_init(void)
612{
613 volatile uint32_t cr0;
614
615 /* Set WP bit in CR0*/
616 cr0 = x86_get_cr0();
617 cr0 |= X86_CR0_WP;
618 x86_set_cr0(cr0);
619
620#ifdef PAE_MODE_ENABLED
621 volatile uint32_t efer_msr, cr4;
622
623 /* Setting the SMEP & SMAP bit in CR4 */
624 cr4 = x86_get_cr4();
625 if (check_smep_avail())
626 cr4 |= X86_CR4_SMEP;
627 if (check_smap_avail())
628 cr4 |=X86_CR4_SMAP;
629 x86_set_cr4(cr4);
630
631 /* Set NXE bit in MSR_EFER*/
632 efer_msr = read_msr(x86_MSR_EFER);
633 efer_msr |= x86_EFER_NXE;
634 write_msr(x86_MSR_EFER, efer_msr);
635#endif
636}