blob: c1747fcb86d36921353161500ff2612678d53286 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 */
6
7#ifndef __ARM_KVM_EMULATE_H__
8#define __ARM_KVM_EMULATE_H__
9
10#include <linux/kvm_host.h>
11#include <asm/kvm_asm.h>
12#include <asm/kvm_mmio.h>
13#include <asm/kvm_arm.h>
14#include <asm/cputype.h>
15
16/* arm64 compatibility macros */
17#define PSR_AA32_MODE_FIQ FIQ_MODE
18#define PSR_AA32_MODE_SVC SVC_MODE
19#define PSR_AA32_MODE_ABT ABT_MODE
20#define PSR_AA32_MODE_UND UND_MODE
21#define PSR_AA32_T_BIT PSR_T_BIT
22#define PSR_AA32_F_BIT PSR_F_BIT
23#define PSR_AA32_I_BIT PSR_I_BIT
24#define PSR_AA32_A_BIT PSR_A_BIT
25#define PSR_AA32_E_BIT PSR_E_BIT
26#define PSR_AA32_IT_MASK PSR_IT_MASK
27#define PSR_AA32_GE_MASK 0x000f0000
28#define PSR_AA32_DIT_BIT 0x00200000
29#define PSR_AA32_PAN_BIT 0x00400000
30#define PSR_AA32_SSBS_BIT 0x00800000
31#define PSR_AA32_Q_BIT PSR_Q_BIT
32#define PSR_AA32_V_BIT PSR_V_BIT
33#define PSR_AA32_C_BIT PSR_C_BIT
34#define PSR_AA32_Z_BIT PSR_Z_BIT
35#define PSR_AA32_N_BIT PSR_N_BIT
36
37unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
38
39static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
40{
41 return vcpu_reg(vcpu, reg_num);
42}
43
44unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
45
46static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
47{
48 return *__vcpu_spsr(vcpu);
49}
50
51static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
52{
53 *__vcpu_spsr(vcpu) = v;
54}
55
56static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
57{
58 return spsr;
59}
60
61static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
62 u8 reg_num)
63{
64 return *vcpu_reg(vcpu, reg_num);
65}
66
67static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
68 unsigned long val)
69{
70 *vcpu_reg(vcpu, reg_num) = val;
71}
72
73bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
74void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
75void kvm_inject_undef32(struct kvm_vcpu *vcpu);
76void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
77void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
78void kvm_inject_vabt(struct kvm_vcpu *vcpu);
79
80static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
81{
82 kvm_inject_undef32(vcpu);
83}
84
85static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
86{
87 kvm_inject_dabt32(vcpu, addr);
88}
89
90static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
91{
92 kvm_inject_pabt32(vcpu, addr);
93}
94
95static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
96{
97 return kvm_condition_valid32(vcpu);
98}
99
100static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
101{
102 kvm_skip_instr32(vcpu, is_wide_instr);
103}
104
105static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
106{
107 vcpu->arch.hcr = HCR_GUEST_MASK;
108}
109
110static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
111{
112 return (unsigned long *)&vcpu->arch.hcr;
113}
114
115static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
116{
117 vcpu->arch.hcr &= ~HCR_TWE;
118}
119
120static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
121{
122 vcpu->arch.hcr |= HCR_TWE;
123}
124
125static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
126{
127 return true;
128}
129
130static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
131{
132 return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
133}
134
135static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
136{
137 return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
138}
139
140static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
141{
142 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
143}
144
145static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
146{
147 unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
148 return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
149}
150
151static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
152{
153 unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
154 return cpsr_mode > USR_MODE;
155}
156
157static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
158{
159 return vcpu->arch.fault.hsr;
160}
161
162static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
163{
164 u32 hsr = kvm_vcpu_get_hsr(vcpu);
165
166 if (hsr & HSR_CV)
167 return (hsr & HSR_COND) >> HSR_COND_SHIFT;
168
169 return -1;
170}
171
172static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
173{
174 return vcpu->arch.fault.hxfar;
175}
176
177static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
178{
179 return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
180}
181
182static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
183{
184 return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
185}
186
187static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
188{
189 return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
190}
191
192static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
193{
194 return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
195}
196
197static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
198{
199 return false;
200}
201
202static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
203{
204 return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
205}
206
207static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
208{
209 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
210}
211
212static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
213{
214 return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
215}
216
217/* Get Access Size from a data abort */
218static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
219{
220 switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
221 case 0:
222 return 1;
223 case 1:
224 return 2;
225 case 2:
226 return 4;
227 default:
228 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
229 return -EFAULT;
230 }
231}
232
233/* This one is not specific to Data Abort */
234static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
235{
236 return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
237}
238
239static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
240{
241 return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
242}
243
244static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
245{
246 return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
247}
248
249static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
250{
251 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
252}
253
254static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
255{
256 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
257}
258
259static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
260{
261 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
262}
263
264static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
265{
266 switch (kvm_vcpu_trap_get_fault(vcpu)) {
267 case FSC_SEA:
268 case FSC_SEA_TTW0:
269 case FSC_SEA_TTW1:
270 case FSC_SEA_TTW2:
271 case FSC_SEA_TTW3:
272 case FSC_SECC:
273 case FSC_SECC_TTW0:
274 case FSC_SECC_TTW1:
275 case FSC_SECC_TTW2:
276 case FSC_SECC_TTW3:
277 return true;
278 default:
279 return false;
280 }
281}
282
283static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
284{
285 if (kvm_vcpu_trap_is_iabt(vcpu))
286 return false;
287
288 return kvm_vcpu_dabt_iswrite(vcpu);
289}
290
291static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
292{
293 return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
294}
295
296static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
297{
298 return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
299}
300
301static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
302{
303 return false;
304}
305
306static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
307 bool flag)
308{
309}
310
311static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
312{
313 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
314}
315
316static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
317{
318 return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
319}
320
321static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
322 unsigned long data,
323 unsigned int len)
324{
325 if (kvm_vcpu_is_be(vcpu)) {
326 switch (len) {
327 case 1:
328 return data & 0xff;
329 case 2:
330 return be16_to_cpu(data & 0xffff);
331 default:
332 return be32_to_cpu(data);
333 }
334 } else {
335 switch (len) {
336 case 1:
337 return data & 0xff;
338 case 2:
339 return le16_to_cpu(data & 0xffff);
340 default:
341 return le32_to_cpu(data);
342 }
343 }
344}
345
346static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
347 unsigned long data,
348 unsigned int len)
349{
350 if (kvm_vcpu_is_be(vcpu)) {
351 switch (len) {
352 case 1:
353 return data & 0xff;
354 case 2:
355 return cpu_to_be16(data & 0xffff);
356 default:
357 return cpu_to_be32(data);
358 }
359 } else {
360 switch (len) {
361 case 1:
362 return data & 0xff;
363 case 2:
364 return cpu_to_le16(data & 0xffff);
365 default:
366 return cpu_to_le32(data);
367 }
368 }
369}
370
371static inline bool vcpu_has_ptrauth(struct kvm_vcpu *vcpu) { return false; }
372static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { }
373
374#endif /* __ARM_KVM_EMULATE_H__ */