blob: fe3d7811a908c73de535eef289cbae2cd1a9cc02 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9#include <asm/kvm_arm.h>
10#include <asm/kvm_asm.h>
11
12 .arch_extension virt
13
14 .text
15 .pushsection .hyp.text, "ax"
16
17.macro load_vcpu reg
18 mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR
19.endm
20
21/********************************************************************
22 * Hypervisor exception vector and handlers
23 *
24 *
25 * The KVM/ARM Hypervisor ABI is defined as follows:
26 *
27 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
28 * instruction is issued since all traps are disabled when running the host
29 * kernel as per the Hyp-mode initialization at boot time.
30 *
31 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
32 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
33 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
34 * instructions are called from within Hyp-mode.
35 *
36 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
37 * Switching to Hyp mode is done through a simple HVC #0 instruction. The
38 * exception vector code will check that the HVC comes from VMID==0.
39 * - r0 contains a pointer to a HYP function
40 * - r1, r2, and r3 contain arguments to the above function.
41 * - The HYP function will be called with its arguments in r0, r1 and r2.
42 * On HYP function return, we return directly to SVC.
43 *
44 * Note that the above is used to execute code in Hyp-mode from a host-kernel
45 * point of view, and is a different concept from performing a world-switch and
46 * executing guest code SVC mode (with a VMID != 0).
47 */
48
49 .align 5
50__kvm_hyp_vector:
51 .global __kvm_hyp_vector
52
53 @ Hyp-mode exception vector
54 W(b) hyp_reset
55 W(b) hyp_undef
56 W(b) hyp_svc
57 W(b) hyp_pabt
58 W(b) hyp_dabt
59 W(b) hyp_hvc
60 W(b) hyp_irq
61 W(b) hyp_fiq
62
63#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
64 .align 5
65__kvm_hyp_vector_ic_inv:
66 .global __kvm_hyp_vector_ic_inv
67
68 /*
69 * We encode the exception entry in the bottom 3 bits of
70 * SP, and we have to guarantee to be 8 bytes aligned.
71 */
72 W(add) sp, sp, #1 /* Reset 7 */
73 W(add) sp, sp, #1 /* Undef 6 */
74 W(add) sp, sp, #1 /* Syscall 5 */
75 W(add) sp, sp, #1 /* Prefetch abort 4 */
76 W(add) sp, sp, #1 /* Data abort 3 */
77 W(add) sp, sp, #1 /* HVC 2 */
78 W(add) sp, sp, #1 /* IRQ 1 */
79 W(nop) /* FIQ 0 */
80
81 mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
82 isb
83
84 b decode_vectors
85
86 .align 5
87__kvm_hyp_vector_bp_inv:
88 .global __kvm_hyp_vector_bp_inv
89
90 /*
91 * We encode the exception entry in the bottom 3 bits of
92 * SP, and we have to guarantee to be 8 bytes aligned.
93 */
94 W(add) sp, sp, #1 /* Reset 7 */
95 W(add) sp, sp, #1 /* Undef 6 */
96 W(add) sp, sp, #1 /* Syscall 5 */
97 W(add) sp, sp, #1 /* Prefetch abort 4 */
98 W(add) sp, sp, #1 /* Data abort 3 */
99 W(add) sp, sp, #1 /* HVC 2 */
100 W(add) sp, sp, #1 /* IRQ 1 */
101 W(nop) /* FIQ 0 */
102
103 mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
104 isb
105
106decode_vectors:
107
108#ifdef CONFIG_THUMB2_KERNEL
109 /*
110 * Yet another silly hack: Use VPIDR as a temp register.
111 * Thumb2 is really a pain, as SP cannot be used with most
112 * of the bitwise instructions. The vect_br macro ensures
113 * things gets cleaned-up.
114 */
115 mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
116 mov r0, sp
117 and r0, r0, #7
118 sub sp, sp, r0
119 push {r1, r2}
120 mov r1, r0
121 mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
122 mrc p15, 0, r2, c0, c0, 0 /* MIDR */
123 mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
124#endif
125
126.macro vect_br val, targ
127ARM( eor sp, sp, #\val )
128ARM( tst sp, #7 )
129ARM( eorne sp, sp, #\val )
130
131THUMB( cmp r1, #\val )
132THUMB( popeq {r1, r2} )
133
134 beq \targ
135.endm
136
137 vect_br 0, hyp_fiq
138 vect_br 1, hyp_irq
139 vect_br 2, hyp_hvc
140 vect_br 3, hyp_dabt
141 vect_br 4, hyp_pabt
142 vect_br 5, hyp_svc
143 vect_br 6, hyp_undef
144 vect_br 7, hyp_reset
145#endif
146
147.macro invalid_vector label, cause
148 .align
149\label: mov r0, #\cause
150 b __hyp_panic
151.endm
152
153 invalid_vector hyp_reset ARM_EXCEPTION_RESET
154 invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED
155 invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE
156 invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT
157 invalid_vector hyp_fiq ARM_EXCEPTION_FIQ
158
159ENTRY(__hyp_do_panic)
160 mrs lr, cpsr
161 bic lr, lr, #MODE_MASK
162 orr lr, lr, #SVC_MODE
163THUMB( orr lr, lr, #PSR_T_BIT )
164 msr spsr_cxsf, lr
165 ldr lr, =panic
166 msr ELR_hyp, lr
167 ldr lr, =__kvm_call_hyp
168 clrex
169 eret
170ENDPROC(__hyp_do_panic)
171
172hyp_hvc:
173 /*
174 * Getting here is either because of a trap from a guest,
175 * or from executing HVC from the host kernel, which means
176 * "do something in Hyp mode".
177 */
178 push {r0, r1, r2}
179
180 @ Check syndrome register
181 mrc p15, 4, r1, c5, c2, 0 @ HSR
182 lsr r0, r1, #HSR_EC_SHIFT
183 cmp r0, #HSR_EC_HVC
184 bne guest_trap @ Not HVC instr.
185
186 /*
187 * Let's check if the HVC came from VMID 0 and allow simple
188 * switch to Hyp mode
189 */
190 mrrc p15, 6, r0, r2, c2
191 lsr r2, r2, #16
192 and r2, r2, #0xff
193 cmp r2, #0
194 bne guest_hvc_trap @ Guest called HVC
195
196 /*
197 * Getting here means host called HVC, we shift parameters and branch
198 * to Hyp function.
199 */
200 pop {r0, r1, r2}
201
202 /*
203 * Check if we have a kernel function, which is guaranteed to be
204 * bigger than the maximum hyp stub hypercall
205 */
206 cmp r0, #HVC_STUB_HCALL_NR
207 bhs 1f
208
209 /*
210 * Not a kernel function, treat it as a stub hypercall.
211 * Compute the physical address for __kvm_handle_stub_hvc
212 * (as the code lives in the idmaped page) and branch there.
213 * We hijack ip (r12) as a tmp register.
214 */
215 push {r1}
216 ldr r1, =kimage_voffset
217 ldr r1, [r1]
218 ldr ip, =__kvm_handle_stub_hvc
219 sub ip, ip, r1
220 pop {r1}
221
222 bx ip
223
2241:
225 /*
226 * Pushing r2 here is just a way of keeping the stack aligned to
227 * 8 bytes on any path that can trigger a HYP exception. Here,
228 * we may well be about to jump into the guest, and the guest
229 * exit would otherwise be badly decoded by our fancy
230 * "decode-exception-without-a-branch" code...
231 */
232 push {r2, lr}
233
234 mov lr, r0
235 mov r0, r1
236 mov r1, r2
237 mov r2, r3
238
239THUMB( orr lr, #1)
240 blx lr @ Call the HYP function
241
242 pop {r2, lr}
243 eret
244
245guest_hvc_trap:
246 movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
247 movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
248 ldr r0, [sp] @ Guest's r0
249 teq r0, r2
250 bne guest_trap
251 add sp, sp, #12
252 @ Returns:
253 @ r0 = 0
254 @ r1 = HSR value (perfectly predictable)
255 @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
256 mov r0, #0
257 eret
258
259guest_trap:
260 load_vcpu r0 @ Load VCPU pointer to r0
261
262#ifdef CONFIG_VFPv3
263 @ Check for a VFP access
264 lsr r1, r1, #HSR_EC_SHIFT
265 cmp r1, #HSR_EC_CP_0_13
266 beq __vfp_guest_restore
267#endif
268
269 mov r1, #ARM_EXCEPTION_HVC
270 b __guest_exit
271
272hyp_irq:
273 push {r0, r1, r2}
274 mov r1, #ARM_EXCEPTION_IRQ
275 load_vcpu r0 @ Load VCPU pointer to r0
276 b __guest_exit
277
278hyp_dabt:
279 push {r0, r1}
280 mrs r0, ELR_hyp
281 ldr r1, =abort_guest_exit_start
282THUMB( add r1, r1, #1)
283 cmp r0, r1
284 ldrne r1, =abort_guest_exit_end
285THUMB( addne r1, r1, #1)
286 cmpne r0, r1
287 pop {r0, r1}
288 bne __hyp_panic
289
290 orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT)
291 eret
292
293 .ltorg
294
295 .popsection