b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> |
| 4 | */ |
| 5 | #ifndef _ASM_POWERPC_SWITCH_TO_H |
| 6 | #define _ASM_POWERPC_SWITCH_TO_H |
| 7 | |
| 8 | #include <asm/reg.h> |
| 9 | |
| 10 | struct thread_struct; |
| 11 | struct task_struct; |
| 12 | struct pt_regs; |
| 13 | |
| 14 | extern struct task_struct *__switch_to(struct task_struct *, |
| 15 | struct task_struct *); |
| 16 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) |
| 17 | |
| 18 | extern struct task_struct *_switch(struct thread_struct *prev, |
| 19 | struct thread_struct *next); |
| 20 | |
| 21 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); |
| 22 | |
| 23 | extern int emulate_altivec(struct pt_regs *); |
| 24 | |
| 25 | extern void flush_all_to_thread(struct task_struct *); |
| 26 | extern void giveup_all(struct task_struct *); |
| 27 | |
| 28 | #ifdef CONFIG_PPC_FPU |
| 29 | extern void enable_kernel_fp(void); |
| 30 | extern void flush_fp_to_thread(struct task_struct *); |
| 31 | extern void giveup_fpu(struct task_struct *); |
| 32 | extern void save_fpu(struct task_struct *); |
| 33 | static inline void disable_kernel_fp(void) |
| 34 | { |
| 35 | msr_check_and_clear(MSR_FP); |
| 36 | } |
| 37 | #else |
| 38 | static inline void save_fpu(struct task_struct *t) { } |
| 39 | static inline void flush_fp_to_thread(struct task_struct *t) { } |
| 40 | #endif |
| 41 | |
| 42 | #ifdef CONFIG_ALTIVEC |
| 43 | extern void enable_kernel_altivec(void); |
| 44 | extern void flush_altivec_to_thread(struct task_struct *); |
| 45 | extern void giveup_altivec(struct task_struct *); |
| 46 | extern void save_altivec(struct task_struct *); |
| 47 | static inline void disable_kernel_altivec(void) |
| 48 | { |
| 49 | msr_check_and_clear(MSR_VEC); |
| 50 | } |
| 51 | #else |
| 52 | static inline void save_altivec(struct task_struct *t) { } |
| 53 | static inline void __giveup_altivec(struct task_struct *t) { } |
| 54 | #endif |
| 55 | |
| 56 | #ifdef CONFIG_VSX |
| 57 | extern void enable_kernel_vsx(void); |
| 58 | extern void flush_vsx_to_thread(struct task_struct *); |
| 59 | static inline void disable_kernel_vsx(void) |
| 60 | { |
| 61 | msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); |
| 62 | } |
| 63 | #endif |
| 64 | |
| 65 | #ifdef CONFIG_SPE |
| 66 | extern void enable_kernel_spe(void); |
| 67 | extern void flush_spe_to_thread(struct task_struct *); |
| 68 | extern void giveup_spe(struct task_struct *); |
| 69 | extern void __giveup_spe(struct task_struct *); |
| 70 | static inline void disable_kernel_spe(void) |
| 71 | { |
| 72 | msr_check_and_clear(MSR_SPE); |
| 73 | } |
| 74 | #else |
| 75 | static inline void __giveup_spe(struct task_struct *t) { } |
| 76 | #endif |
| 77 | |
| 78 | static inline void clear_task_ebb(struct task_struct *t) |
| 79 | { |
| 80 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 81 | /* EBB perf events are not inherited, so clear all EBB state. */ |
| 82 | t->thread.ebbrr = 0; |
| 83 | t->thread.ebbhr = 0; |
| 84 | t->thread.bescr = 0; |
| 85 | t->thread.mmcr2 = 0; |
| 86 | t->thread.mmcr0 = 0; |
| 87 | t->thread.siar = 0; |
| 88 | t->thread.sdar = 0; |
| 89 | t->thread.sier = 0; |
| 90 | t->thread.used_ebb = 0; |
| 91 | #endif |
| 92 | } |
| 93 | |
| 94 | extern int set_thread_uses_vas(void); |
| 95 | |
| 96 | extern int set_thread_tidr(struct task_struct *t); |
| 97 | |
| 98 | #endif /* _ASM_POWERPC_SWITCH_TO_H */ |