blob: cb28c12d0a64b91cff92c9268f06d1c2f5344204 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/assembler.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_mmu.h>
23#include <asm/pgtable-hwdef.h>
24#include <asm/sysreg.h>
25#include <asm/virt.h>
26
27 .text
28 .pushsection .hyp.idmap.text, "ax"
29
30 .align 11
31
32ENTRY(__kvm_hyp_init)
33 ventry __invalid // Synchronous EL2t
34 ventry __invalid // IRQ EL2t
35 ventry __invalid // FIQ EL2t
36 ventry __invalid // Error EL2t
37
38 ventry __invalid // Synchronous EL2h
39 ventry __invalid // IRQ EL2h
40 ventry __invalid // FIQ EL2h
41 ventry __invalid // Error EL2h
42
43 ventry __do_hyp_init // Synchronous 64-bit EL1
44 ventry __invalid // IRQ 64-bit EL1
45 ventry __invalid // FIQ 64-bit EL1
46 ventry __invalid // Error 64-bit EL1
47
48 ventry __invalid // Synchronous 32-bit EL1
49 ventry __invalid // IRQ 32-bit EL1
50 ventry __invalid // FIQ 32-bit EL1
51 ventry __invalid // Error 32-bit EL1
52
53__invalid:
54 b .
55
56 /*
57 * x0: HYP pgd
58 * x1: HYP stack
59 * x2: HYP vectors
60 */
61__do_hyp_init:
62 /* Check for a stub HVC call */
63 cmp x0, #HVC_STUB_HCALL_NR
64 b.lo __kvm_handle_stub_hvc
65
66 msr ttbr0_el2, x0
67
68 mrs x4, tcr_el1
69 ldr x5, =TCR_EL2_MASK
70 and x4, x4, x5
71 mov x5, #TCR_EL2_RES1
72 orr x4, x4, x5
73
74#ifndef CONFIG_ARM64_VA_BITS_48
75 /*
76 * If we are running with VA_BITS < 48, we may be running with an extra
77 * level of translation in the ID map. This is only the case if system
78 * RAM is out of range for the currently configured page size and number
79 * of translation levels, in which case we will also need the extra
80 * level for the HYP ID map, or we won't be able to enable the EL2 MMU.
81 *
82 * However, at EL2, there is only one TTBR register, and we can't switch
83 * between translation tables *and* update TCR_EL2.T0SZ at the same
84 * time. Bottom line: we need the extra level in *both* our translation
85 * tables.
86 *
87 * So use the same T0SZ value we use for the ID map.
88 */
89 ldr_l x5, idmap_t0sz
90 bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
91#endif
92 /*
93 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
94 * TCR_EL2.
95 */
96 mrs x5, ID_AA64MMFR0_EL1
97 bfi x4, x5, #16, #3
98
99 msr tcr_el2, x4
100
101 mrs x4, mair_el1
102 msr mair_el2, x4
103 isb
104
105 /* Invalidate the stale TLBs from Bootloader */
106 tlbi alle2
107 dsb sy
108
109 /*
110 * Preserve all the RES1 bits while setting the default flags,
111 * as well as the EE bit on BE. Drop the A flag since the compiler
112 * is allowed to generate unaligned accesses.
113 */
114 ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
115CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
116 msr sctlr_el2, x4
117 isb
118
119 /* Set the stack and new vectors */
120 kern_hyp_va x1
121 mov sp, x1
122 kern_hyp_va x2
123 msr vbar_el2, x2
124
125 /* copy tpidr_el1 into tpidr_el2 for use by HYP */
126 mrs x1, tpidr_el1
127 msr tpidr_el2, x1
128
129 /* Hello, World! */
130 eret
131ENDPROC(__kvm_hyp_init)
132
133ENTRY(__kvm_handle_stub_hvc)
134 cmp x0, #HVC_SOFT_RESTART
135 b.ne 1f
136
137 /* This is where we're about to jump, staying at EL2 */
138 msr elr_el2, x1
139 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
140 msr spsr_el2, x0
141
142 /* Shuffle the arguments, and don't come back */
143 mov x0, x2
144 mov x1, x3
145 mov x2, x4
146 b reset
147
1481: cmp x0, #HVC_RESET_VECTORS
149 b.ne 1f
150
151 /*
152 * Set the HVC_RESET_VECTORS return code before entering the common
153 * path so that we do not clobber x0-x2 in case we are coming via
154 * HVC_SOFT_RESTART.
155 */
156 mov x0, xzr
157reset:
158 /* Reset kvm back to the hyp stub. */
159 mrs x5, sctlr_el2
160 ldr x6, =SCTLR_ELx_FLAGS
161 bic x5, x5, x6 // Clear SCTL_M and etc
162 pre_disable_mmu_workaround
163 msr sctlr_el2, x5
164 isb
165
166 /* Install stub vectors */
167 adr_l x5, __hyp_stub_vectors
168 msr vbar_el2, x5
169 eret
170
1711: /* Bad stub call */
172 ldr x0, =HVC_STUB_ERR
173 eret
174
175ENDPROC(__kvm_handle_stub_hvc)
176
177 .ltorg
178
179 .popsection