b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __ASM_POWERPC_MMU_CONTEXT_H |
| 3 | #define __ASM_POWERPC_MMU_CONTEXT_H |
| 4 | #ifdef __KERNEL__ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/mm.h> |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/spinlock.h> |
| 10 | #include <asm/mmu.h> |
| 11 | #include <asm/cputable.h> |
| 12 | #include <asm/cputhreads.h> |
| 13 | |
| 14 | /* |
| 15 | * Most if the context management is out of line |
| 16 | */ |
| 17 | extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
| 18 | extern void destroy_context(struct mm_struct *mm); |
| 19 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
| 20 | struct mm_iommu_table_group_mem_t; |
| 21 | |
| 22 | extern int isolate_lru_page(struct page *page); /* from internal.h */ |
| 23 | extern bool mm_iommu_preregistered(struct mm_struct *mm); |
| 24 | extern long mm_iommu_new(struct mm_struct *mm, |
| 25 | unsigned long ua, unsigned long entries, |
| 26 | struct mm_iommu_table_group_mem_t **pmem); |
| 27 | extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, |
| 28 | unsigned long entries, unsigned long dev_hpa, |
| 29 | struct mm_iommu_table_group_mem_t **pmem); |
| 30 | extern long mm_iommu_put(struct mm_struct *mm, |
| 31 | struct mm_iommu_table_group_mem_t *mem); |
| 32 | extern void mm_iommu_init(struct mm_struct *mm); |
| 33 | extern void mm_iommu_cleanup(struct mm_struct *mm); |
| 34 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, |
| 35 | unsigned long ua, unsigned long size); |
| 36 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( |
| 37 | struct mm_struct *mm, unsigned long ua, unsigned long size); |
| 38 | extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, |
| 39 | unsigned long ua, unsigned long entries); |
| 40 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
| 41 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
| 42 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
| 43 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
| 44 | extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua); |
| 45 | extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, |
| 46 | unsigned int pageshift, unsigned long *size); |
| 47 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
| 48 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); |
| 49 | #else |
| 50 | static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, |
| 51 | unsigned int pageshift, unsigned long *size) |
| 52 | { |
| 53 | return false; |
| 54 | } |
| 55 | static inline void mm_iommu_init(struct mm_struct *mm) { } |
| 56 | #endif |
| 57 | extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); |
| 58 | extern void set_context(unsigned long id, pgd_t *pgd); |
| 59 | |
| 60 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 61 | extern void radix__switch_mmu_context(struct mm_struct *prev, |
| 62 | struct mm_struct *next); |
| 63 | static inline void switch_mmu_context(struct mm_struct *prev, |
| 64 | struct mm_struct *next, |
| 65 | struct task_struct *tsk) |
| 66 | { |
| 67 | if (radix_enabled()) |
| 68 | return radix__switch_mmu_context(prev, next); |
| 69 | return switch_slb(tsk, next); |
| 70 | } |
| 71 | |
| 72 | extern int hash__alloc_context_id(void); |
| 73 | extern void hash__reserve_context_id(int id); |
| 74 | extern void __destroy_context(int context_id); |
| 75 | static inline void mmu_context_init(void) { } |
| 76 | |
| 77 | static inline int alloc_extended_context(struct mm_struct *mm, |
| 78 | unsigned long ea) |
| 79 | { |
| 80 | int context_id; |
| 81 | |
| 82 | int index = ea >> MAX_EA_BITS_PER_CONTEXT; |
| 83 | |
| 84 | context_id = hash__alloc_context_id(); |
| 85 | if (context_id < 0) |
| 86 | return context_id; |
| 87 | |
| 88 | VM_WARN_ON(mm->context.extended_id[index]); |
| 89 | mm->context.extended_id[index] = context_id; |
| 90 | return context_id; |
| 91 | } |
| 92 | |
| 93 | static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) |
| 94 | { |
| 95 | int context_id; |
| 96 | |
| 97 | context_id = get_user_context(&mm->context, ea); |
| 98 | if (!context_id) |
| 99 | return true; |
| 100 | return false; |
| 101 | } |
| 102 | |
| 103 | #else |
| 104 | extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, |
| 105 | struct task_struct *tsk); |
| 106 | extern unsigned long __init_new_context(void); |
| 107 | extern void __destroy_context(unsigned long context_id); |
| 108 | extern void mmu_context_init(void); |
| 109 | static inline int alloc_extended_context(struct mm_struct *mm, |
| 110 | unsigned long ea) |
| 111 | { |
| 112 | /* non book3s_64 should never find this called */ |
| 113 | WARN_ON(1); |
| 114 | return -ENOMEM; |
| 115 | } |
| 116 | |
| 117 | static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) |
| 118 | { |
| 119 | return false; |
| 120 | } |
| 121 | #endif |
| 122 | |
| 123 | #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU) |
| 124 | extern void radix_kvm_prefetch_workaround(struct mm_struct *mm); |
| 125 | #else |
| 126 | static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { } |
| 127 | #endif |
| 128 | |
| 129 | extern void switch_cop(struct mm_struct *next); |
| 130 | extern int use_cop(unsigned long acop, struct mm_struct *mm); |
| 131 | extern void drop_cop(unsigned long acop, struct mm_struct *mm); |
| 132 | |
| 133 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 134 | static inline void inc_mm_active_cpus(struct mm_struct *mm) |
| 135 | { |
| 136 | atomic_inc(&mm->context.active_cpus); |
| 137 | } |
| 138 | |
| 139 | static inline void dec_mm_active_cpus(struct mm_struct *mm) |
| 140 | { |
| 141 | atomic_dec(&mm->context.active_cpus); |
| 142 | } |
| 143 | |
| 144 | static inline void mm_context_add_copro(struct mm_struct *mm) |
| 145 | { |
| 146 | /* |
| 147 | * If any copro is in use, increment the active CPU count |
| 148 | * in order to force TLB invalidations to be global as to |
| 149 | * propagate to the Nest MMU. |
| 150 | */ |
| 151 | if (atomic_inc_return(&mm->context.copros) == 1) |
| 152 | inc_mm_active_cpus(mm); |
| 153 | } |
| 154 | |
| 155 | static inline void mm_context_remove_copro(struct mm_struct *mm) |
| 156 | { |
| 157 | int c; |
| 158 | |
| 159 | /* |
| 160 | * When removing the last copro, we need to broadcast a global |
| 161 | * flush of the full mm, as the next TLBI may be local and the |
| 162 | * nMMU and/or PSL need to be cleaned up. |
| 163 | * |
| 164 | * Both the 'copros' and 'active_cpus' counts are looked at in |
| 165 | * flush_all_mm() to determine the scope (local/global) of the |
| 166 | * TLBIs, so we need to flush first before decrementing |
| 167 | * 'copros'. If this API is used by several callers for the |
| 168 | * same context, it can lead to over-flushing. It's hopefully |
| 169 | * not common enough to be a problem. |
| 170 | * |
| 171 | * Skip on hash, as we don't know how to do the proper flush |
| 172 | * for the time being. Invalidations will remain global if |
| 173 | * used on hash. Note that we can't drop 'copros' either, as |
| 174 | * it could make some invalidations local with no flush |
| 175 | * in-between. |
| 176 | */ |
| 177 | if (radix_enabled()) { |
| 178 | flush_all_mm(mm); |
| 179 | |
| 180 | c = atomic_dec_if_positive(&mm->context.copros); |
| 181 | /* Detect imbalance between add and remove */ |
| 182 | WARN_ON(c < 0); |
| 183 | |
| 184 | if (c == 0) |
| 185 | dec_mm_active_cpus(mm); |
| 186 | } |
| 187 | } |
| 188 | #else |
| 189 | static inline void inc_mm_active_cpus(struct mm_struct *mm) { } |
| 190 | static inline void dec_mm_active_cpus(struct mm_struct *mm) { } |
| 191 | static inline void mm_context_add_copro(struct mm_struct *mm) { } |
| 192 | static inline void mm_context_remove_copro(struct mm_struct *mm) { } |
| 193 | #endif |
| 194 | |
| 195 | |
| 196 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
| 197 | struct task_struct *tsk); |
| 198 | |
| 199 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 200 | struct task_struct *tsk) |
| 201 | { |
| 202 | unsigned long flags; |
| 203 | |
| 204 | local_irq_save(flags); |
| 205 | switch_mm_irqs_off(prev, next, tsk); |
| 206 | local_irq_restore(flags); |
| 207 | } |
| 208 | #define switch_mm_irqs_off switch_mm_irqs_off |
| 209 | |
| 210 | |
| 211 | #define deactivate_mm(tsk,mm) do { } while (0) |
| 212 | |
| 213 | /* |
| 214 | * After we have set current->mm to a new value, this activates |
| 215 | * the context for the new mm so we see the new mappings. |
| 216 | */ |
| 217 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) |
| 218 | { |
| 219 | switch_mm_irqs_off(prev, next, current); |
| 220 | } |
| 221 | |
| 222 | /* We don't currently use enter_lazy_tlb() for anything */ |
| 223 | static inline void enter_lazy_tlb(struct mm_struct *mm, |
| 224 | struct task_struct *tsk) |
| 225 | { |
| 226 | /* 64-bit Book3E keeps track of current PGD in the PACA */ |
| 227 | #ifdef CONFIG_PPC_BOOK3E_64 |
| 228 | get_paca()->pgd = NULL; |
| 229 | #endif |
| 230 | } |
| 231 | |
| 232 | extern void arch_exit_mmap(struct mm_struct *mm); |
| 233 | |
| 234 | static inline void arch_unmap(struct mm_struct *mm, |
| 235 | unsigned long start, unsigned long end) |
| 236 | { |
| 237 | if (start <= mm->context.vdso_base && mm->context.vdso_base < end) |
| 238 | mm->context.vdso_base = 0; |
| 239 | } |
| 240 | |
| 241 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
| 242 | struct vm_area_struct *vma) |
| 243 | { |
| 244 | } |
| 245 | |
| 246 | #ifdef CONFIG_PPC_MEM_KEYS |
| 247 | bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, |
| 248 | bool execute, bool foreign); |
| 249 | void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm); |
| 250 | #else /* CONFIG_PPC_MEM_KEYS */ |
| 251 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
| 252 | bool write, bool execute, bool foreign) |
| 253 | { |
| 254 | /* by default, allow everything */ |
| 255 | return true; |
| 256 | } |
| 257 | |
| 258 | #define pkey_mm_init(mm) |
| 259 | #define thread_pkey_regs_save(thread) |
| 260 | #define thread_pkey_regs_restore(new_thread, old_thread) |
| 261 | #define thread_pkey_regs_init(thread) |
| 262 | #define arch_dup_pkeys(oldmm, mm) |
| 263 | |
| 264 | static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) |
| 265 | { |
| 266 | return 0x0UL; |
| 267 | } |
| 268 | |
| 269 | #endif /* CONFIG_PPC_MEM_KEYS */ |
| 270 | |
| 271 | static inline int arch_dup_mmap(struct mm_struct *oldmm, |
| 272 | struct mm_struct *mm) |
| 273 | { |
| 274 | arch_dup_pkeys(oldmm, mm); |
| 275 | return 0; |
| 276 | } |
| 277 | |
| 278 | #endif /* __KERNEL__ */ |
| 279 | #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ |