blob: 29e5675c6d4f2ea3c1cccc48532c0ebbf7b15906 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/jump_label.h>
3#include <asm/unwind_hints.h>
4#include <asm/cpufeatures.h>
5#include <asm/page_types.h>
6#include <asm/percpu.h>
7#include <asm/asm-offsets.h>
8#include <asm/processor-flags.h>
9#include <asm/msr.h>
10#include <asm/nospec-branch.h>
11
12/*
13
14 x86 function call convention, 64-bit:
15 -------------------------------------
16 arguments | callee-saved | extra caller-saved | return
17 [callee-clobbered] | | [callee-clobbered] |
18 ---------------------------------------------------------------------------
19 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
20
21 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
22 functions when it sees tail-call optimization possibilities) rflags is
23 clobbered. Leftover arguments are passed over the stack frame.)
24
25 [*] In the frame-pointers case rbp is fixed to the stack frame.
26
27 [**] for struct return values wider than 64 bits the return convention is a
28 bit more complex: up to 128 bits width we return small structures
29 straight in rax, rdx. For structures larger than that (3 words or
30 larger) the caller puts a pointer to an on-stack return struct
31 [allocated in the caller's stack frame] into the first argument - i.e.
32 into rdi. All other arguments shift up by one in this case.
33 Fortunately this case is rare in the kernel.
34
35For 32-bit we have the following conventions - kernel is built with
36-mregparm=3 and -freg-struct-return:
37
38 x86 function calling convention, 32-bit:
39 ----------------------------------------
40 arguments | callee-saved | extra caller-saved | return
41 [callee-clobbered] | | [callee-clobbered] |
42 -------------------------------------------------------------------------
43 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
44
45 ( here too esp is obviously invariant across normal function calls. eflags
46 is clobbered. Leftover arguments are passed over the stack frame. )
47
48 [*] In the frame-pointers case ebp is fixed to the stack frame.
49
50 [**] We build with -freg-struct-return, which on 32-bit means similar
51 semantics as on 64-bit: edx can be used for a second return value
52 (i.e. covering integer and structure sizes up to 64 bits) - after that
53 it gets more complex and more expensive: 3-word or larger struct returns
54 get done in the caller's frame and the pointer to the return struct goes
55 into regparm0, i.e. eax - the other arguments shift up and the
56 function's register parameters degenerate to regparm=2 in essence.
57
58*/
59
60#ifdef CONFIG_X86_64
61
62/*
63 * 64-bit system call stack frame layout defines and helpers,
64 * for assembly code:
65 */
66
67/* The layout forms the "struct pt_regs" on the stack: */
68/*
69 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
70 * unless syscall needs a complete, fully filled "struct pt_regs".
71 */
72#define R15 0*8
73#define R14 1*8
74#define R13 2*8
75#define R12 3*8
76#define RBP 4*8
77#define RBX 5*8
78/* These regs are callee-clobbered. Always saved on kernel entry. */
79#define R11 6*8
80#define R10 7*8
81#define R9 8*8
82#define R8 9*8
83#define RAX 10*8
84#define RCX 11*8
85#define RDX 12*8
86#define RSI 13*8
87#define RDI 14*8
88/*
89 * On syscall entry, this is syscall#. On CPU exception, this is error code.
90 * On hw interrupt, it's IRQ number:
91 */
92#define ORIG_RAX 15*8
93/* Return frame for iretq */
94#define RIP 16*8
95#define CS 17*8
96#define EFLAGS 18*8
97#define RSP 19*8
98#define SS 20*8
99
100#define SIZEOF_PTREGS 21*8
101
102.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
103 .if \save_ret
104 pushq %rsi /* pt_regs->si */
105 movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
106 movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
107 .else
108 pushq %rdi /* pt_regs->di */
109 pushq %rsi /* pt_regs->si */
110 .endif
111 pushq \rdx /* pt_regs->dx */
112 pushq %rcx /* pt_regs->cx */
113 pushq \rax /* pt_regs->ax */
114 pushq %r8 /* pt_regs->r8 */
115 pushq %r9 /* pt_regs->r9 */
116 pushq %r10 /* pt_regs->r10 */
117 pushq %r11 /* pt_regs->r11 */
118 pushq %rbx /* pt_regs->rbx */
119 pushq %rbp /* pt_regs->rbp */
120 pushq %r12 /* pt_regs->r12 */
121 pushq %r13 /* pt_regs->r13 */
122 pushq %r14 /* pt_regs->r14 */
123 pushq %r15 /* pt_regs->r15 */
124 UNWIND_HINT_REGS
125
126 .if \save_ret
127 pushq %rsi /* return address on top of stack */
128 .endif
129
130 /*
131 * Sanitize registers of values that a speculation attack might
132 * otherwise want to exploit. The lower registers are likely clobbered
133 * well before they could be put to use in a speculative execution
134 * gadget.
135 */
136 xorl %edx, %edx /* nospec dx */
137 xorl %ecx, %ecx /* nospec cx */
138 xorl %r8d, %r8d /* nospec r8 */
139 xorl %r9d, %r9d /* nospec r9 */
140 xorl %r10d, %r10d /* nospec r10 */
141 xorl %r11d, %r11d /* nospec r11 */
142 xorl %ebx, %ebx /* nospec rbx */
143 xorl %ebp, %ebp /* nospec rbp */
144 xorl %r12d, %r12d /* nospec r12 */
145 xorl %r13d, %r13d /* nospec r13 */
146 xorl %r14d, %r14d /* nospec r14 */
147 xorl %r15d, %r15d /* nospec r15 */
148
149.endm
150
151.macro POP_REGS pop_rdi=1
152 popq %r15
153 popq %r14
154 popq %r13
155 popq %r12
156 popq %rbp
157 popq %rbx
158 popq %r11
159 popq %r10
160 popq %r9
161 popq %r8
162 popq %rax
163 popq %rcx
164 popq %rdx
165 popq %rsi
166 .if \pop_rdi
167 popq %rdi
168 .endif
169.endm
170
171#ifdef CONFIG_PAGE_TABLE_ISOLATION
172
173/*
174 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
175 * halves:
176 */
177#define PTI_USER_PGTABLE_BIT PAGE_SHIFT
178#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT)
179#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT
180#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT)
181#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
182
183.macro SET_NOFLUSH_BIT reg:req
184 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg
185.endm
186
187.macro ADJUST_KERNEL_CR3 reg:req
188 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
189 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
190 andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
191.endm
192
193.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
194 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
195 mov %cr3, \scratch_reg
196 ADJUST_KERNEL_CR3 \scratch_reg
197 mov \scratch_reg, %cr3
198.Lend_\@:
199.endm
200
201#define THIS_CPU_user_pcid_flush_mask \
202 PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
203
204.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
205 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
206 mov %cr3, \scratch_reg
207
208 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
209
210 /*
211 * Test if the ASID needs a flush.
212 */
213 movq \scratch_reg, \scratch_reg2
214 andq $(0x7FF), \scratch_reg /* mask ASID */
215 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
216 jnc .Lnoflush_\@
217
218 /* Flush needed, clear the bit */
219 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
220 movq \scratch_reg2, \scratch_reg
221 jmp .Lwrcr3_pcid_\@
222
223.Lnoflush_\@:
224 movq \scratch_reg2, \scratch_reg
225 SET_NOFLUSH_BIT \scratch_reg
226
227.Lwrcr3_pcid_\@:
228 /* Flip the ASID to the user version */
229 orq $(PTI_USER_PCID_MASK), \scratch_reg
230
231.Lwrcr3_\@:
232 /* Flip the PGD to the user version */
233 orq $(PTI_USER_PGTABLE_MASK), \scratch_reg
234 mov \scratch_reg, %cr3
235.Lend_\@:
236.endm
237
238.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
239 pushq %rax
240 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
241 popq %rax
242.endm
243
244.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
245 ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
246 movq %cr3, \scratch_reg
247 movq \scratch_reg, \save_reg
248 /*
249 * Test the user pagetable bit. If set, then the user page tables
250 * are active. If clear CR3 already has the kernel page table
251 * active.
252 */
253 bt $PTI_USER_PGTABLE_BIT, \scratch_reg
254 jnc .Ldone_\@
255
256 ADJUST_KERNEL_CR3 \scratch_reg
257 movq \scratch_reg, %cr3
258
259.Ldone_\@:
260.endm
261
262.macro RESTORE_CR3 scratch_reg:req save_reg:req
263 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
264
265 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
266
267 /*
268 * KERNEL pages can always resume with NOFLUSH as we do
269 * explicit flushes.
270 */
271 bt $PTI_USER_PGTABLE_BIT, \save_reg
272 jnc .Lnoflush_\@
273
274 /*
275 * Check if there's a pending flush for the user ASID we're
276 * about to set.
277 */
278 movq \save_reg, \scratch_reg
279 andq $(0x7FF), \scratch_reg
280 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
281 jnc .Lnoflush_\@
282
283 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
284 jmp .Lwrcr3_\@
285
286.Lnoflush_\@:
287 SET_NOFLUSH_BIT \save_reg
288
289.Lwrcr3_\@:
290 /*
291 * The CR3 write could be avoided when not changing its value,
292 * but would require a CR3 read *and* a scratch register.
293 */
294 movq \save_reg, %cr3
295.Lend_\@:
296.endm
297
298#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
299
300.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
301.endm
302.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
303.endm
304.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
305.endm
306.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
307.endm
308.macro RESTORE_CR3 scratch_reg:req save_reg:req
309.endm
310
311#endif
312
313/*
314 * IBRS kernel mitigation for Spectre_v2.
315 *
316 * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
317 * the regs it uses (AX, CX, DX). Must be called before the first RET
318 * instruction (NOTE! UNTRAIN_RET includes a RET instruction)
319 *
320 * The optional argument is used to save/restore the current value,
321 * which is used on the paranoid paths.
322 *
323 * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
324 */
325.macro IBRS_ENTER save_reg
326 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
327 movl $MSR_IA32_SPEC_CTRL, %ecx
328
329.ifnb \save_reg
330 rdmsr
331 shl $32, %rdx
332 or %rdx, %rax
333 mov %rax, \save_reg
334 test $SPEC_CTRL_IBRS, %eax
335 jz .Ldo_wrmsr_\@
336 lfence
337 jmp .Lend_\@
338.Ldo_wrmsr_\@:
339.endif
340
341 movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
342 movl %edx, %eax
343 shr $32, %rdx
344 wrmsr
345.Lend_\@:
346.endm
347
348/*
349 * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
350 * regs. Must be called after the last RET.
351 */
352.macro IBRS_EXIT save_reg
353 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
354 movl $MSR_IA32_SPEC_CTRL, %ecx
355
356.ifnb \save_reg
357 mov \save_reg, %rdx
358.else
359 movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
360 andl $(~SPEC_CTRL_IBRS), %edx
361.endif
362
363 movl %edx, %eax
364 shr $32, %rdx
365 wrmsr
366.Lend_\@:
367.endm
368
369/*
370 * Mitigate Spectre v1 for conditional swapgs code paths.
371 *
372 * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
373 * prevent a speculative swapgs when coming from kernel space.
374 *
375 * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
376 * to prevent the swapgs from getting speculatively skipped when coming from
377 * user space.
378 */
379.macro FENCE_SWAPGS_USER_ENTRY
380 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
381.endm
382.macro FENCE_SWAPGS_KERNEL_ENTRY
383 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
384.endm
385
386.macro STACKLEAK_ERASE_NOCLOBBER
387#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
388 PUSH_AND_CLEAR_REGS
389 call stackleak_erase
390 POP_REGS
391#endif
392.endm
393
394#endif /* CONFIG_X86_64 */
395
396.macro STACKLEAK_ERASE
397#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
398 call stackleak_erase
399#endif
400.endm
401
402/*
403 * This does 'call enter_from_user_mode' unless we can avoid it based on
404 * kernel config or using the static jump infrastructure.
405 */
406.macro CALL_enter_from_user_mode
407#ifdef CONFIG_CONTEXT_TRACKING
408#ifdef CONFIG_JUMP_LABEL
409 STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
410#endif
411 call enter_from_user_mode
412.Lafter_call_\@:
413#endif
414.endm
415
416#ifdef CONFIG_PARAVIRT_XXL
417#define GET_CR2_INTO(reg) GET_CR2_INTO_AX ; _ASM_MOV %_ASM_AX, reg
418#else
419#define GET_CR2_INTO(reg) _ASM_MOV %cr2, reg
420#endif