blob: 8353348ddeaffcb0e09c886069c34609325c16f0 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
4 * GPL v2
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/sched/task_stack.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/tracehook.h>
18#include <linux/audit.h>
19#include <linux/seccomp.h>
20#include <linux/signal.h>
21#include <linux/export.h>
22#include <linux/context_tracking.h>
23#include <linux/user-return-notifier.h>
24#include <linux/nospec.h>
25#include <linux/uprobes.h>
26#include <linux/livepatch.h>
27#include <linux/syscalls.h>
28
29#include <asm/desc.h>
30#include <asm/traps.h>
31#include <asm/vdso.h>
32#include <linux/uaccess.h>
33#include <asm/cpufeature.h>
34#include <asm/nospec-branch.h>
35
36#define CREATE_TRACE_POINTS
37#include <trace/events/syscalls.h>
38
39#ifdef CONFIG_CONTEXT_TRACKING
40/* Called on entry from user mode with IRQs off. */
41__visible inline void enter_from_user_mode(void)
42{
43 CT_WARN_ON(ct_state() != CONTEXT_USER);
44 user_exit_irqoff();
45}
46#else
47static inline void enter_from_user_mode(void) {}
48#endif
49
50static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
51{
52#ifdef CONFIG_X86_64
53 if (arch == AUDIT_ARCH_X86_64) {
54 audit_syscall_entry(regs->orig_ax, regs->di,
55 regs->si, regs->dx, regs->r10);
56 } else
57#endif
58 {
59 audit_syscall_entry(regs->orig_ax, regs->bx,
60 regs->cx, regs->dx, regs->si);
61 }
62}
63
64/*
65 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
66 * to skip the syscall.
67 */
68static long syscall_trace_enter(struct pt_regs *regs)
69{
70 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
71
72 struct thread_info *ti = current_thread_info();
73 unsigned long ret = 0;
74 bool emulated = false;
75 u32 work;
76
77 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
78 BUG_ON(regs != task_pt_regs(current));
79
80 work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
81
82 if (unlikely(work & _TIF_SYSCALL_EMU))
83 emulated = true;
84
85 if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
86 tracehook_report_syscall_entry(regs))
87 return -1L;
88
89 if (emulated)
90 return -1L;
91
92#ifdef CONFIG_SECCOMP
93 /*
94 * Do seccomp after ptrace, to catch any tracer changes.
95 */
96 if (work & _TIF_SECCOMP) {
97 struct seccomp_data sd;
98
99 sd.arch = arch;
100 sd.nr = regs->orig_ax;
101 sd.instruction_pointer = regs->ip;
102#ifdef CONFIG_X86_64
103 if (arch == AUDIT_ARCH_X86_64) {
104 sd.args[0] = regs->di;
105 sd.args[1] = regs->si;
106 sd.args[2] = regs->dx;
107 sd.args[3] = regs->r10;
108 sd.args[4] = regs->r8;
109 sd.args[5] = regs->r9;
110 } else
111#endif
112 {
113 sd.args[0] = regs->bx;
114 sd.args[1] = regs->cx;
115 sd.args[2] = regs->dx;
116 sd.args[3] = regs->si;
117 sd.args[4] = regs->di;
118 sd.args[5] = regs->bp;
119 }
120
121 ret = __secure_computing(&sd);
122 if (ret == -1)
123 return ret;
124 }
125#endif
126
127 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
128 trace_sys_enter(regs, regs->orig_ax);
129
130 do_audit_syscall_entry(regs, arch);
131
132 return ret ?: regs->orig_ax;
133}
134
135#define EXIT_TO_USERMODE_LOOP_FLAGS \
136 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
137 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
138
139static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
140{
141 /*
142 * In order to return to user mode, we need to have IRQs off with
143 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
144 * can be set at any time on preemptable kernels if we have IRQs on,
145 * so we need to loop. Disabling preemption wouldn't help: doing the
146 * work to clear some of the flags can sleep.
147 */
148 while (true) {
149 /* We have work to do. */
150 local_irq_enable();
151
152 if (cached_flags & _TIF_NEED_RESCHED)
153 schedule();
154
155 if (cached_flags & _TIF_UPROBE)
156 uprobe_notify_resume(regs);
157
158 if (cached_flags & _TIF_PATCH_PENDING)
159 klp_update_patch_state(current);
160
161 /* deal with pending signal delivery */
162 if (cached_flags & _TIF_SIGPENDING)
163 do_signal(regs);
164
165 if (cached_flags & _TIF_NOTIFY_RESUME) {
166 clear_thread_flag(TIF_NOTIFY_RESUME);
167 tracehook_notify_resume(regs);
168 rseq_handle_notify_resume(NULL, regs);
169 }
170
171 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
172 fire_user_return_notifiers();
173
174 /* Disable IRQs and retry */
175 local_irq_disable();
176
177 cached_flags = READ_ONCE(current_thread_info()->flags);
178
179 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
180 break;
181 }
182}
183
184/* Called with IRQs disabled. */
185__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
186{
187 struct thread_info *ti = current_thread_info();
188 u32 cached_flags;
189
190 addr_limit_user_check();
191
192 lockdep_assert_irqs_disabled();
193 lockdep_sys_exit();
194
195 cached_flags = READ_ONCE(ti->flags);
196
197 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
198 exit_to_usermode_loop(regs, cached_flags);
199
200#ifdef CONFIG_COMPAT
201 /*
202 * Compat syscalls set TS_COMPAT. Make sure we clear it before
203 * returning to user mode. We need to clear it *after* signal
204 * handling, because syscall restart has a fixup for compat
205 * syscalls. The fixup is exercised by the ptrace_syscall_32
206 * selftest.
207 *
208 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
209 * special case only applies after poking regs and before the
210 * very next return to user mode.
211 */
212 ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
213#endif
214
215 user_enter_irqoff();
216
217 mds_user_clear_cpu_buffers();
218}
219
220#define SYSCALL_EXIT_WORK_FLAGS \
221 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
222 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
223
224static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
225{
226 bool step;
227
228 audit_syscall_exit(regs);
229
230 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
231 trace_sys_exit(regs, regs->ax);
232
233 /*
234 * If TIF_SYSCALL_EMU is set, we only get here because of
235 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
236 * We already reported this syscall instruction in
237 * syscall_trace_enter().
238 */
239 step = unlikely(
240 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
241 == _TIF_SINGLESTEP);
242 if (step || cached_flags & _TIF_SYSCALL_TRACE)
243 tracehook_report_syscall_exit(regs, step);
244}
245
246/*
247 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
248 * state such that we can immediately switch to user mode.
249 */
250__visible inline void syscall_return_slowpath(struct pt_regs *regs)
251{
252 struct thread_info *ti = current_thread_info();
253 u32 cached_flags = READ_ONCE(ti->flags);
254
255 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
256
257 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
258 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
259 local_irq_enable();
260
261 rseq_syscall(regs);
262
263 /*
264 * First do one-time work. If these work items are enabled, we
265 * want to run them exactly once per syscall exit with IRQs on.
266 */
267 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
268 syscall_slow_exit_work(regs, cached_flags);
269
270 local_irq_disable();
271 prepare_exit_to_usermode(regs);
272}
273
274#ifdef CONFIG_X86_64
275__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
276{
277 struct thread_info *ti;
278
279 enter_from_user_mode();
280 local_irq_enable();
281 ti = current_thread_info();
282 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
283 nr = syscall_trace_enter(regs);
284
285 /*
286 * NB: Native and x32 syscalls are dispatched from the same
287 * table. The only functional difference is the x32 bit in
288 * regs->orig_ax, which changes the behavior of some syscalls.
289 */
290 nr &= __SYSCALL_MASK;
291 if (likely(nr < NR_syscalls)) {
292 nr = array_index_nospec(nr, NR_syscalls);
293 regs->ax = sys_call_table[nr](regs);
294 }
295
296 syscall_return_slowpath(regs);
297}
298#endif
299
300#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
301/*
302 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
303 * all entry and exit work and returns with IRQs off. This function is
304 * extremely hot in workloads that use it, and it's usually called from
305 * do_fast_syscall_32, so forcibly inline it to improve performance.
306 */
307static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
308{
309 struct thread_info *ti = current_thread_info();
310 unsigned int nr = (unsigned int)regs->orig_ax;
311
312#ifdef CONFIG_IA32_EMULATION
313 ti->status |= TS_COMPAT;
314#endif
315
316 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
317 /*
318 * Subtlety here: if ptrace pokes something larger than
319 * 2^32-1 into orig_ax, this truncates it. This may or
320 * may not be necessary, but it matches the old asm
321 * behavior.
322 */
323 nr = syscall_trace_enter(regs);
324 }
325
326 if (likely(nr < IA32_NR_syscalls)) {
327 nr = array_index_nospec(nr, IA32_NR_syscalls);
328#ifdef CONFIG_IA32_EMULATION
329 regs->ax = ia32_sys_call_table[nr](regs);
330#else
331 /*
332 * It's possible that a 32-bit syscall implementation
333 * takes a 64-bit parameter but nonetheless assumes that
334 * the high bits are zero. Make sure we zero-extend all
335 * of the args.
336 */
337 regs->ax = ia32_sys_call_table[nr](
338 (unsigned int)regs->bx, (unsigned int)regs->cx,
339 (unsigned int)regs->dx, (unsigned int)regs->si,
340 (unsigned int)regs->di, (unsigned int)regs->bp);
341#endif /* CONFIG_IA32_EMULATION */
342 }
343
344 syscall_return_slowpath(regs);
345}
346
347/* Handles int $0x80 */
348__visible void do_int80_syscall_32(struct pt_regs *regs)
349{
350 enter_from_user_mode();
351 local_irq_enable();
352 do_syscall_32_irqs_on(regs);
353}
354
355/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
356__visible long do_fast_syscall_32(struct pt_regs *regs)
357{
358 /*
359 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
360 * convention. Adjust regs so it looks like we entered using int80.
361 */
362
363 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
364 vdso_image_32.sym_int80_landing_pad;
365
366 /*
367 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
368 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
369 * Fix it up.
370 */
371 regs->ip = landing_pad;
372
373 enter_from_user_mode();
374
375 local_irq_enable();
376
377 /* Fetch EBP from where the vDSO stashed it. */
378 if (
379#ifdef CONFIG_X86_64
380 /*
381 * Micro-optimization: the pointer we're following is explicitly
382 * 32 bits, so it can't be out of range.
383 */
384 __get_user(*(u32 *)&regs->bp,
385 (u32 __user __force *)(unsigned long)(u32)regs->sp)
386#else
387 get_user(*(u32 *)&regs->bp,
388 (u32 __user __force *)(unsigned long)(u32)regs->sp)
389#endif
390 ) {
391
392 /* User code screwed up. */
393 local_irq_disable();
394 regs->ax = -EFAULT;
395 prepare_exit_to_usermode(regs);
396 return 0; /* Keep it simple: use IRET. */
397 }
398
399 /* Now this is just like a normal syscall. */
400 do_syscall_32_irqs_on(regs);
401
402#ifdef CONFIG_X86_64
403 /*
404 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
405 * SYSRETL is available on all 64-bit CPUs, so we don't need to
406 * bother with SYSEXIT.
407 *
408 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
409 * because the ECX fixup above will ensure that this is essentially
410 * never the case.
411 */
412 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
413 regs->ip == landing_pad &&
414 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
415#else
416 /*
417 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
418 *
419 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
420 * because the ECX fixup above will ensure that this is essentially
421 * never the case.
422 *
423 * We don't allow syscalls at all from VM86 mode, but we still
424 * need to check VM, because we might be returning from sys_vm86.
425 */
426 return static_cpu_has(X86_FEATURE_SEP) &&
427 regs->cs == __USER_CS && regs->ss == __USER_DS &&
428 regs->ip == landing_pad &&
429 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
430#endif
431}
432#endif