blob: 331474296e6f1c83d4e4d8aca7c15bfa1cc98bbd [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/* SPDX-License-Identifier: GPL-2.0-only */
2/* CPU virtualization extensions handling
3 *
4 * This should carry the code for handling CPU virtualization extensions
5 * that needs to live in the kernel core.
6 *
7 * Author: Eduardo Habkost <ehabkost@redhat.com>
8 *
9 * Copyright (C) 2008, Red Hat Inc.
10 *
11 * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
12 */
13#ifndef _ASM_X86_VIRTEX_H
14#define _ASM_X86_VIRTEX_H
15
16#include <asm/processor.h>
17
18#include <asm/vmx.h>
19#include <asm/svm.h>
20#include <asm/tlbflush.h>
21
22/*
23 * VMX functions:
24 */
25
26static inline int cpu_has_vmx(void)
27{
28 unsigned long ecx = cpuid_ecx(1);
29 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
30}
31
32
33/**
34 * cpu_vmxoff() - Disable VMX on the current CPU
35 *
36 * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
37 *
38 * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
39 * atomically track post-VMXON state, e.g. this may be called in NMI context.
40 * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
41 * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
42 * magically in RM, VM86, compat mode, or at CPL>0.
43 */
44static inline void cpu_vmxoff(void)
45{
46 asm_volatile_goto("1: vmxoff\n\t"
47 _ASM_EXTABLE(1b, %l[fault]) :::: fault);
48fault:
49 cr4_clear_bits(X86_CR4_VMXE);
50}
51
52static inline int cpu_vmx_enabled(void)
53{
54 return __read_cr4() & X86_CR4_VMXE;
55}
56
57/** Disable VMX if it is enabled on the current CPU
58 *
59 * You shouldn't call this if cpu_has_vmx() returns 0.
60 */
61static inline void __cpu_emergency_vmxoff(void)
62{
63 if (cpu_vmx_enabled())
64 cpu_vmxoff();
65}
66
67/** Disable VMX if it is supported and enabled on the current CPU
68 */
69static inline void cpu_emergency_vmxoff(void)
70{
71 if (cpu_has_vmx())
72 __cpu_emergency_vmxoff();
73}
74
75
76
77
78/*
79 * SVM functions:
80 */
81
82/** Check if the CPU has SVM support
83 *
84 * You can use the 'msg' arg to get a message describing the problem,
85 * if the function returns zero. Simply pass NULL if you are not interested
86 * on the messages; gcc should take care of not generating code for
87 * the messages on this case.
88 */
89static inline int cpu_has_svm(const char **msg)
90{
91 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
92 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
93 if (msg)
94 *msg = "not amd or hygon";
95 return 0;
96 }
97
98 if (!boot_cpu_has(X86_FEATURE_SVM)) {
99 if (msg)
100 *msg = "svm not available";
101 return 0;
102 }
103 return 1;
104}
105
106
107/** Disable SVM on the current CPU
108 *
109 * You should call this only if cpu_has_svm() returned true.
110 */
111static inline void cpu_svm_disable(void)
112{
113 uint64_t efer;
114
115 wrmsrl(MSR_VM_HSAVE_PA, 0);
116 rdmsrl(MSR_EFER, efer);
117 if (efer & EFER_SVME) {
118 /*
119 * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
120 * aren't blocked, e.g. if a fatal error occurred between CLGI
121 * and STGI. Note, STGI may #UD if SVM is disabled from NMI
122 * context between reading EFER and executing STGI. In that
123 * case, GIF must already be set, otherwise the NMI would have
124 * been blocked, so just eat the fault.
125 */
126 asm_volatile_goto("1: stgi\n\t"
127 _ASM_EXTABLE(1b, %l[fault])
128 ::: "memory" : fault);
129fault:
130 wrmsrl(MSR_EFER, efer & ~EFER_SVME);
131 }
132}
133
134/** Makes sure SVM is disabled, if it is supported on the CPU
135 */
136static inline void cpu_emergency_svm_disable(void)
137{
138 if (cpu_has_svm(NULL))
139 cpu_svm_disable();
140}
141
142#endif /* _ASM_X86_VIRTEX_H */