| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|  | 2 | #ifndef ARCH_X86_KVM_X86_H | 
|  | 3 | #define ARCH_X86_KVM_X86_H | 
|  | 4 |  | 
|  | 5 | #include <linux/kvm_host.h> | 
|  | 6 | #include <asm/pvclock.h> | 
|  | 7 | #include "kvm_cache_regs.h" | 
|  | 8 |  | 
|  | 9 | #define KVM_DEFAULT_PLE_GAP		128 | 
|  | 10 | #define KVM_VMX_DEFAULT_PLE_WINDOW	4096 | 
|  | 11 | #define KVM_DEFAULT_PLE_WINDOW_GROW	2 | 
|  | 12 | #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0 | 
|  | 13 | #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX | 
|  | 14 | #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX | 
|  | 15 | #define KVM_SVM_DEFAULT_PLE_WINDOW	3000 | 
|  | 16 |  | 
|  | 17 | static inline unsigned int __grow_ple_window(unsigned int val, | 
|  | 18 | unsigned int base, unsigned int modifier, unsigned int max) | 
|  | 19 | { | 
|  | 20 | u64 ret = val; | 
|  | 21 |  | 
|  | 22 | if (modifier < 1) | 
|  | 23 | return base; | 
|  | 24 |  | 
|  | 25 | if (modifier < base) | 
|  | 26 | ret *= modifier; | 
|  | 27 | else | 
|  | 28 | ret += modifier; | 
|  | 29 |  | 
|  | 30 | return min(ret, (u64)max); | 
|  | 31 | } | 
|  | 32 |  | 
|  | 33 | static inline unsigned int __shrink_ple_window(unsigned int val, | 
|  | 34 | unsigned int base, unsigned int modifier, unsigned int min) | 
|  | 35 | { | 
|  | 36 | if (modifier < 1) | 
|  | 37 | return base; | 
|  | 38 |  | 
|  | 39 | if (modifier < base) | 
|  | 40 | val /= modifier; | 
|  | 41 | else | 
|  | 42 | val -= modifier; | 
|  | 43 |  | 
|  | 44 | return max(val, min); | 
|  | 45 | } | 
|  | 46 |  | 
|  | 47 | #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL | 
|  | 48 |  | 
|  | 49 | static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) | 
|  | 50 | { | 
|  | 51 | vcpu->arch.exception.pending = false; | 
|  | 52 | vcpu->arch.exception.injected = false; | 
|  | 53 | } | 
|  | 54 |  | 
|  | 55 | static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, | 
|  | 56 | bool soft) | 
|  | 57 | { | 
|  | 58 | vcpu->arch.interrupt.injected = true; | 
|  | 59 | vcpu->arch.interrupt.soft = soft; | 
|  | 60 | vcpu->arch.interrupt.nr = vector; | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 | static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) | 
|  | 64 | { | 
|  | 65 | vcpu->arch.interrupt.injected = false; | 
|  | 66 | } | 
|  | 67 |  | 
|  | 68 | static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) | 
|  | 69 | { | 
|  | 70 | return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || | 
|  | 71 | vcpu->arch.nmi_injected; | 
|  | 72 | } | 
|  | 73 |  | 
|  | 74 | static inline bool kvm_exception_is_soft(unsigned int nr) | 
|  | 75 | { | 
|  | 76 | return (nr == BP_VECTOR) || (nr == OF_VECTOR); | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | static inline bool is_protmode(struct kvm_vcpu *vcpu) | 
|  | 80 | { | 
|  | 81 | return kvm_read_cr0_bits(vcpu, X86_CR0_PE); | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | static inline int is_long_mode(struct kvm_vcpu *vcpu) | 
|  | 85 | { | 
|  | 86 | #ifdef CONFIG_X86_64 | 
|  | 87 | return vcpu->arch.efer & EFER_LMA; | 
|  | 88 | #else | 
|  | 89 | return 0; | 
|  | 90 | #endif | 
|  | 91 | } | 
|  | 92 |  | 
|  | 93 | static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) | 
|  | 94 | { | 
|  | 95 | int cs_db, cs_l; | 
|  | 96 |  | 
|  | 97 | if (!is_long_mode(vcpu)) | 
|  | 98 | return false; | 
|  | 99 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 
|  | 100 | return cs_l; | 
|  | 101 | } | 
|  | 102 |  | 
|  | 103 | static inline bool is_la57_mode(struct kvm_vcpu *vcpu) | 
|  | 104 | { | 
|  | 105 | #ifdef CONFIG_X86_64 | 
|  | 106 | return (vcpu->arch.efer & EFER_LMA) && | 
|  | 107 | kvm_read_cr4_bits(vcpu, X86_CR4_LA57); | 
|  | 108 | #else | 
|  | 109 | return 0; | 
|  | 110 | #endif | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | static inline bool x86_exception_has_error_code(unsigned int vector) | 
|  | 114 | { | 
|  | 115 | static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | | 
|  | 116 | BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | | 
|  | 117 | BIT(PF_VECTOR) | BIT(AC_VECTOR); | 
|  | 118 |  | 
|  | 119 | return (1U << vector) & exception_has_error_code; | 
|  | 120 | } | 
|  | 121 |  | 
|  | 122 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) | 
|  | 123 | { | 
|  | 124 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | static inline int is_pae(struct kvm_vcpu *vcpu) | 
|  | 128 | { | 
|  | 129 | return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); | 
|  | 130 | } | 
|  | 131 |  | 
|  | 132 | static inline int is_pse(struct kvm_vcpu *vcpu) | 
|  | 133 | { | 
|  | 134 | return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | static inline int is_paging(struct kvm_vcpu *vcpu) | 
|  | 138 | { | 
|  | 139 | return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | static inline bool is_pae_paging(struct kvm_vcpu *vcpu) | 
|  | 143 | { | 
|  | 144 | return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); | 
|  | 145 | } | 
|  | 146 |  | 
|  | 147 | static inline u32 bit(int bitno) | 
|  | 148 | { | 
|  | 149 | return 1 << (bitno & 31); | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) | 
|  | 153 | { | 
|  | 154 | return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; | 
|  | 155 | } | 
|  | 156 |  | 
|  | 157 | static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) | 
|  | 158 | { | 
|  | 159 | return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48; | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | static inline u64 get_canonical(u64 la, u8 vaddr_bits) | 
|  | 163 | { | 
|  | 164 | return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); | 
|  | 165 | } | 
|  | 166 |  | 
|  | 167 | static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) | 
|  | 168 | { | 
|  | 169 | #ifdef CONFIG_X86_64 | 
|  | 170 | return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; | 
|  | 171 | #else | 
|  | 172 | return false; | 
|  | 173 | #endif | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 | static inline bool emul_is_noncanonical_address(u64 la, | 
|  | 177 | struct x86_emulate_ctxt *ctxt) | 
|  | 178 | { | 
|  | 179 | #ifdef CONFIG_X86_64 | 
|  | 180 | return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; | 
|  | 181 | #else | 
|  | 182 | return false; | 
|  | 183 | #endif | 
|  | 184 | } | 
|  | 185 |  | 
|  | 186 | static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, | 
|  | 187 | gva_t gva, gfn_t gfn, unsigned access) | 
|  | 188 | { | 
|  | 189 | u64 gen = kvm_memslots(vcpu->kvm)->generation; | 
|  | 190 |  | 
|  | 191 | if (unlikely(gen & 1)) | 
|  | 192 | return; | 
|  | 193 |  | 
|  | 194 | /* | 
|  | 195 | * If this is a shadow nested page table, the "GVA" is | 
|  | 196 | * actually a nGPA. | 
|  | 197 | */ | 
|  | 198 | vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; | 
|  | 199 | vcpu->arch.access = access; | 
|  | 200 | vcpu->arch.mmio_gfn = gfn; | 
|  | 201 | vcpu->arch.mmio_gen = gen; | 
|  | 202 | } | 
|  | 203 |  | 
|  | 204 | static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) | 
|  | 205 | { | 
|  | 206 | return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | /* | 
|  | 210 | * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we | 
|  | 211 | * clear all mmio cache info. | 
|  | 212 | */ | 
|  | 213 | #define MMIO_GVA_ANY (~(gva_t)0) | 
|  | 214 |  | 
|  | 215 | static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) | 
|  | 216 | { | 
|  | 217 | if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) | 
|  | 218 | return; | 
|  | 219 |  | 
|  | 220 | vcpu->arch.mmio_gva = 0; | 
|  | 221 | } | 
|  | 222 |  | 
|  | 223 | static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) | 
|  | 224 | { | 
|  | 225 | if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && | 
|  | 226 | vcpu->arch.mmio_gva == (gva & PAGE_MASK)) | 
|  | 227 | return true; | 
|  | 228 |  | 
|  | 229 | return false; | 
|  | 230 | } | 
|  | 231 |  | 
|  | 232 | static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) | 
|  | 233 | { | 
|  | 234 | if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && | 
|  | 235 | vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) | 
|  | 236 | return true; | 
|  | 237 |  | 
|  | 238 | return false; | 
|  | 239 | } | 
|  | 240 |  | 
|  | 241 | static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, | 
|  | 242 | enum kvm_reg reg) | 
|  | 243 | { | 
|  | 244 | unsigned long val = kvm_register_read(vcpu, reg); | 
|  | 245 |  | 
|  | 246 | return is_64_bit_mode(vcpu) ? val : (u32)val; | 
|  | 247 | } | 
|  | 248 |  | 
|  | 249 | static inline void kvm_register_writel(struct kvm_vcpu *vcpu, | 
|  | 250 | enum kvm_reg reg, | 
|  | 251 | unsigned long val) | 
|  | 252 | { | 
|  | 253 | if (!is_64_bit_mode(vcpu)) | 
|  | 254 | val = (u32)val; | 
|  | 255 | return kvm_register_write(vcpu, reg, val); | 
|  | 256 | } | 
|  | 257 |  | 
|  | 258 | static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) | 
|  | 259 | { | 
|  | 260 | return !(kvm->arch.disabled_quirks & quirk); | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | void kvm_set_pending_timer(struct kvm_vcpu *vcpu); | 
|  | 264 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); | 
|  | 265 |  | 
|  | 266 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); | 
|  | 267 | u64 get_kvmclock_ns(struct kvm *kvm); | 
|  | 268 |  | 
|  | 269 | int kvm_read_guest_virt(struct kvm_vcpu *vcpu, | 
|  | 270 | gva_t addr, void *val, unsigned int bytes, | 
|  | 271 | struct x86_exception *exception); | 
|  | 272 |  | 
|  | 273 | int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, | 
|  | 274 | gva_t addr, void *val, unsigned int bytes, | 
|  | 275 | struct x86_exception *exception); | 
|  | 276 |  | 
|  | 277 | int handle_ud(struct kvm_vcpu *vcpu); | 
|  | 278 |  | 
|  | 279 | void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); | 
|  | 280 | u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); | 
|  | 281 | bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); | 
|  | 282 | int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); | 
|  | 283 | int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | 
|  | 284 | bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, | 
|  | 285 | int page_num); | 
|  | 286 | bool kvm_vector_hashing_enabled(void); | 
|  | 287 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, | 
|  | 288 | int emulation_type, void *insn, int insn_len); | 
|  | 289 |  | 
|  | 290 | #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | 
|  | 291 | | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ | 
|  | 292 | | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ | 
|  | 293 | | XFEATURE_MASK_PKRU) | 
|  | 294 | extern u64 host_xcr0; | 
|  | 295 |  | 
|  | 296 | extern u64 kvm_supported_xcr0(void); | 
|  | 297 |  | 
|  | 298 | extern unsigned int min_timer_period_us; | 
|  | 299 |  | 
|  | 300 | extern unsigned int lapic_timer_advance_ns; | 
|  | 301 |  | 
|  | 302 | extern bool enable_vmware_backdoor; | 
|  | 303 |  | 
|  | 304 | extern struct static_key kvm_no_apic_vcpu; | 
|  | 305 |  | 
|  | 306 | static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) | 
|  | 307 | { | 
|  | 308 | return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, | 
|  | 309 | vcpu->arch.virtual_tsc_shift); | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | /* Same "calling convention" as do_div: | 
|  | 313 | * - divide (n << 32) by base | 
|  | 314 | * - put result in n | 
|  | 315 | * - return remainder | 
|  | 316 | */ | 
|  | 317 | #define do_shl32_div32(n, base)					\ | 
|  | 318 | ({							\ | 
|  | 319 | u32 __quot, __rem;					\ | 
|  | 320 | asm("divl %2" : "=a" (__quot), "=d" (__rem)		\ | 
|  | 321 | : "rm" (base), "0" (0), "1" ((u32) n));	\ | 
|  | 322 | n = __quot;						\ | 
|  | 323 | __rem;						\ | 
|  | 324 | }) | 
|  | 325 |  | 
|  | 326 | static inline bool kvm_mwait_in_guest(struct kvm *kvm) | 
|  | 327 | { | 
|  | 328 | return kvm->arch.mwait_in_guest; | 
|  | 329 | } | 
|  | 330 |  | 
|  | 331 | static inline bool kvm_hlt_in_guest(struct kvm *kvm) | 
|  | 332 | { | 
|  | 333 | return kvm->arch.hlt_in_guest; | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | static inline bool kvm_pause_in_guest(struct kvm *kvm) | 
|  | 337 | { | 
|  | 338 | return kvm->arch.pause_in_guest; | 
|  | 339 | } | 
|  | 340 |  | 
|  | 341 | DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu); | 
|  | 342 |  | 
|  | 343 | static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu) | 
|  | 344 | { | 
|  | 345 | __this_cpu_write(current_vcpu, vcpu); | 
|  | 346 | } | 
|  | 347 |  | 
|  | 348 | static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) | 
|  | 349 | { | 
|  | 350 | __this_cpu_write(current_vcpu, NULL); | 
|  | 351 | } | 
|  | 352 |  | 
|  | 353 |  | 
|  | 354 | static inline bool kvm_pat_valid(u64 data) | 
|  | 355 | { | 
|  | 356 | if (data & 0xF8F8F8F8F8F8F8F8ull) | 
|  | 357 | return false; | 
|  | 358 | /* 0, 1, 4, 5, 6, 7 are valid values.  */ | 
|  | 359 | return (data | ((data & 0x0202020202020202ull) << 1)) == data; | 
|  | 360 | } | 
|  | 361 |  | 
|  | 362 | void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu); | 
|  | 363 | void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu); | 
|  | 364 |  | 
|  | 365 | #endif |