blob: f5b27694ece28e89f3e7e3858b6e225cf7a3d504 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6#include <linux/jump_label.h>
7
8#include <asm/kvm_asm.h>
9#include <asm/kvm_hyp.h>
10#include <asm/kvm_mmu.h>
11#include "../../vfp/vfpinstr.h"
12
13__asm__(".arch_extension virt");
14
15/*
16 * Activate the traps, saving the host's fpexc register before
17 * overwriting it. We'll restore it on VM exit.
18 */
19static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
20{
21 u32 val;
22
23 /*
24 * We are about to set HCPTR.TCP10/11 to trap all floating point
25 * register accesses to HYP, however, the ARM ARM clearly states that
26 * traps are only taken to HYP if the operation would not otherwise
27 * trap to SVC. Therefore, always make sure that for 32-bit guests,
28 * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
29 */
30 val = fmrx(FPEXC);
31 *fpexc_host = val;
32 if (!(val & FPEXC_EN)) {
33 fmxr(FPEXC, val | FPEXC_EN);
34 isb();
35 }
36
37 write_sysreg(vcpu->arch.hcr, HCR);
38 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
39 write_sysreg(HSTR_T(15), HSTR);
40 write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
41 val = read_sysreg(HDCR);
42 val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
43 val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
44 write_sysreg(val, HDCR);
45}
46
47static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
48{
49 u32 val;
50
51 /*
52 * If we pended a virtual abort, preserve it until it gets
53 * cleared. See B1.9.9 (Virtual Abort exception) for details,
54 * but the crucial bit is the zeroing of HCR.VA in the
55 * pseudocode.
56 */
57 if (vcpu->arch.hcr & HCR_VA)
58 vcpu->arch.hcr = read_sysreg(HCR);
59
60 write_sysreg(0, HCR);
61 write_sysreg(0, HSTR);
62 val = read_sysreg(HDCR);
63 write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
64 write_sysreg(0, HCPTR);
65}
66
67static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
68{
69 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
70 write_sysreg(kvm_get_vttbr(kvm), VTTBR);
71 write_sysreg(vcpu->arch.midr, VPIDR);
72}
73
74static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
75{
76 write_sysreg(0, VTTBR);
77 write_sysreg(read_sysreg(MIDR), VPIDR);
78}
79
80
81static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
82{
83 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
84 __vgic_v3_save_state(vcpu);
85 __vgic_v3_deactivate_traps(vcpu);
86 }
87}
88
89static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
90{
91 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
92 __vgic_v3_activate_traps(vcpu);
93 __vgic_v3_restore_state(vcpu);
94 }
95}
96
97static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
98{
99 u32 hsr = read_sysreg(HSR);
100 u8 ec = hsr >> HSR_EC_SHIFT;
101 u32 hpfar, far;
102
103 vcpu->arch.fault.hsr = hsr;
104
105 if (ec == HSR_EC_IABT)
106 far = read_sysreg(HIFAR);
107 else if (ec == HSR_EC_DABT)
108 far = read_sysreg(HDFAR);
109 else
110 return true;
111
112 /*
113 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
114 *
115 * Abort on the stage 2 translation for a memory access from a
116 * Non-secure PL1 or PL0 mode:
117 *
118 * For any Access flag fault or Translation fault, and also for any
119 * Permission fault on the stage 2 translation of a memory access
120 * made as part of a translation table walk for a stage 1 translation,
121 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
122 * is UNKNOWN.
123 */
124 if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
125 u64 par, tmp;
126
127 par = read_sysreg(PAR);
128 write_sysreg(far, ATS1CPR);
129 isb();
130
131 tmp = read_sysreg(PAR);
132 write_sysreg(par, PAR);
133
134 if (unlikely(tmp & 1))
135 return false; /* Translation failed, back to guest */
136
137 hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
138 } else {
139 hpfar = read_sysreg(HPFAR);
140 }
141
142 vcpu->arch.fault.hxfar = far;
143 vcpu->arch.fault.hpfar = hpfar;
144 return true;
145}
146
147int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
148{
149 struct kvm_cpu_context *host_ctxt;
150 struct kvm_cpu_context *guest_ctxt;
151 bool fp_enabled;
152 u64 exit_code;
153 u32 fpexc;
154
155 vcpu = kern_hyp_va(vcpu);
156 write_sysreg(vcpu, HTPIDR);
157
158 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
159 guest_ctxt = &vcpu->arch.ctxt;
160
161 __sysreg_save_state(host_ctxt);
162 __banked_save_state(host_ctxt);
163
164 __activate_traps(vcpu, &fpexc);
165 __activate_vm(vcpu);
166
167 __vgic_restore_state(vcpu);
168 __timer_enable_traps(vcpu);
169
170 __sysreg_restore_state(guest_ctxt);
171 __banked_restore_state(guest_ctxt);
172
173 /* Jump in the fire! */
174again:
175 exit_code = __guest_enter(vcpu, host_ctxt);
176 /* And we're baaack! */
177
178 if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
179 goto again;
180
181 fp_enabled = __vfp_enabled();
182
183 __banked_save_state(guest_ctxt);
184 __sysreg_save_state(guest_ctxt);
185 __timer_disable_traps(vcpu);
186
187 __vgic_save_state(vcpu);
188
189 __deactivate_traps(vcpu);
190 __deactivate_vm(vcpu);
191
192 __banked_restore_state(host_ctxt);
193 __sysreg_restore_state(host_ctxt);
194
195 if (fp_enabled) {
196 __vfp_save_state(&guest_ctxt->vfp);
197 __vfp_restore_state(&host_ctxt->vfp);
198 }
199
200 fmxr(FPEXC, fpexc);
201
202 return exit_code;
203}
204
205static const char * const __hyp_panic_string[] = {
206 [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x",
207 [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
208 [ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x",
209 [ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
210 [ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
211 [ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x",
212 [ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x",
213 [ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x",
214};
215
216void __hyp_text __noreturn __hyp_panic(int cause)
217{
218 u32 elr = read_special(ELR_hyp);
219 u32 val;
220
221 if (cause == ARM_EXCEPTION_DATA_ABORT)
222 val = read_sysreg(HDFAR);
223 else
224 val = read_special(SPSR);
225
226 if (read_sysreg(VTTBR)) {
227 struct kvm_vcpu *vcpu;
228 struct kvm_cpu_context *host_ctxt;
229
230 vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
231 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
232 __timer_disable_traps(vcpu);
233 __deactivate_traps(vcpu);
234 __deactivate_vm(vcpu);
235 __banked_restore_state(host_ctxt);
236 __sysreg_restore_state(host_ctxt);
237 }
238
239 /* Call panic for real */
240 __hyp_do_panic(__hyp_panic_string[cause], elr, val);
241
242 unreachable();
243}