b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __PARISC_MMU_CONTEXT_H |
| 3 | #define __PARISC_MMU_CONTEXT_H |
| 4 | |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/atomic.h> |
| 8 | #include <asm/pgalloc.h> |
| 9 | #include <asm/pgtable.h> |
| 10 | #include <asm-generic/mm_hooks.h> |
| 11 | |
| 12 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 13 | { |
| 14 | } |
| 15 | |
| 16 | /* on PA-RISC, we actually have enough contexts to justify an allocator |
| 17 | * for them. prumpf */ |
| 18 | |
| 19 | extern unsigned long alloc_sid(void); |
| 20 | extern void free_sid(unsigned long); |
| 21 | |
| 22 | static inline int |
| 23 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 24 | { |
| 25 | BUG_ON(atomic_read(&mm->mm_users) != 1); |
| 26 | |
| 27 | mm->context = alloc_sid(); |
| 28 | return 0; |
| 29 | } |
| 30 | |
| 31 | static inline void |
| 32 | destroy_context(struct mm_struct *mm) |
| 33 | { |
| 34 | free_sid(mm->context); |
| 35 | mm->context = 0; |
| 36 | } |
| 37 | |
| 38 | static inline unsigned long __space_to_prot(mm_context_t context) |
| 39 | { |
| 40 | #if SPACEID_SHIFT == 0 |
| 41 | return context << 1; |
| 42 | #else |
| 43 | return context >> (SPACEID_SHIFT - 1); |
| 44 | #endif |
| 45 | } |
| 46 | |
| 47 | static inline void load_context(mm_context_t context) |
| 48 | { |
| 49 | mtsp(context, 3); |
| 50 | mtctl(__space_to_prot(context), 8); |
| 51 | } |
| 52 | |
| 53 | static inline void switch_mm_irqs_off(struct mm_struct *prev, |
| 54 | struct mm_struct *next, struct task_struct *tsk) |
| 55 | { |
| 56 | if (prev != next) { |
| 57 | mtctl(__pa(next->pgd), 25); |
| 58 | load_context(next->context); |
| 59 | } |
| 60 | } |
| 61 | |
| 62 | static inline void switch_mm(struct mm_struct *prev, |
| 63 | struct mm_struct *next, struct task_struct *tsk) |
| 64 | { |
| 65 | unsigned long flags; |
| 66 | |
| 67 | if (prev == next) |
| 68 | return; |
| 69 | |
| 70 | local_irq_save(flags); |
| 71 | switch_mm_irqs_off(prev, next, tsk); |
| 72 | local_irq_restore(flags); |
| 73 | } |
| 74 | #define switch_mm_irqs_off switch_mm_irqs_off |
| 75 | |
| 76 | #define deactivate_mm(tsk,mm) do { } while (0) |
| 77 | |
| 78 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) |
| 79 | { |
| 80 | /* |
| 81 | * Activate_mm is our one chance to allocate a space id |
| 82 | * for a new mm created in the exec path. There's also |
| 83 | * some lazy tlb stuff, which is currently dead code, but |
| 84 | * we only allocate a space id if one hasn't been allocated |
| 85 | * already, so we should be OK. |
| 86 | */ |
| 87 | |
| 88 | BUG_ON(next == &init_mm); /* Should never happen */ |
| 89 | |
| 90 | if (next->context == 0) |
| 91 | next->context = alloc_sid(); |
| 92 | |
| 93 | switch_mm(prev,next,current); |
| 94 | } |
| 95 | #endif |