| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* include/asm-generic/tlb.h | 
|  | 2 | * | 
|  | 3 | *	Generic TLB shootdown code | 
|  | 4 | * | 
|  | 5 | * Copyright 2001 Red Hat, Inc. | 
|  | 6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. | 
|  | 7 | * | 
|  | 8 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra | 
|  | 9 | * | 
|  | 10 | * This program is free software; you can redistribute it and/or | 
|  | 11 | * modify it under the terms of the GNU General Public License | 
|  | 12 | * as published by the Free Software Foundation; either version | 
|  | 13 | * 2 of the License, or (at your option) any later version. | 
|  | 14 | */ | 
|  | 15 | #ifndef _ASM_GENERIC__TLB_H | 
|  | 16 | #define _ASM_GENERIC__TLB_H | 
|  | 17 |  | 
|  | 18 | #include <linux/mmu_notifier.h> | 
|  | 19 | #include <linux/swap.h> | 
|  | 20 | #include <asm/pgalloc.h> | 
|  | 21 | #include <asm/tlbflush.h> | 
|  | 22 |  | 
|  | 23 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 
|  | 24 | /* | 
|  | 25 | * Semi RCU freeing of the page directories. | 
|  | 26 | * | 
|  | 27 | * This is needed by some architectures to implement software pagetable walkers. | 
|  | 28 | * | 
|  | 29 | * gup_fast() and other software pagetable walkers do a lockless page-table | 
|  | 30 | * walk and therefore needs some synchronization with the freeing of the page | 
|  | 31 | * directories. The chosen means to accomplish that is by disabling IRQs over | 
|  | 32 | * the walk. | 
|  | 33 | * | 
|  | 34 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, | 
|  | 35 | * since we unlink the page, flush TLBs, free the page. Since the disabling of | 
|  | 36 | * IRQs delays the completion of the TLB flush we can never observe an already | 
|  | 37 | * freed page. | 
|  | 38 | * | 
|  | 39 | * Architectures that do not have this (PPC) need to delay the freeing by some | 
|  | 40 | * other means, this is that means. | 
|  | 41 | * | 
|  | 42 | * What we do is batch the freed directory pages (tables) and RCU free them. | 
|  | 43 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling | 
|  | 44 | * holds off grace periods. | 
|  | 45 | * | 
|  | 46 | * However, in order to batch these pages we need to allocate storage, this | 
|  | 47 | * allocation is deep inside the MM code and can thus easily fail on memory | 
|  | 48 | * pressure. To guarantee progress we fall back to single table freeing, see | 
|  | 49 | * the implementation of tlb_remove_table_one(). | 
|  | 50 | * | 
|  | 51 | */ | 
|  | 52 | struct mmu_table_batch { | 
|  | 53 | struct rcu_head		rcu; | 
|  | 54 | unsigned int		nr; | 
|  | 55 | void			*tables[0]; | 
|  | 56 | }; | 
|  | 57 |  | 
|  | 58 | #define MAX_TABLE_BATCH		\ | 
|  | 59 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) | 
|  | 60 |  | 
|  | 61 | extern void tlb_table_flush(struct mmu_gather *tlb); | 
|  | 62 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | 
|  | 63 |  | 
|  | 64 | #endif | 
|  | 65 |  | 
|  | 66 | /* | 
|  | 67 | * If we can't allocate a page to make a big batch of page pointers | 
|  | 68 | * to work on, then just handle a few from the on-stack structure. | 
|  | 69 | */ | 
|  | 70 | #define MMU_GATHER_BUNDLE	8 | 
|  | 71 |  | 
|  | 72 | struct mmu_gather_batch { | 
|  | 73 | struct mmu_gather_batch	*next; | 
|  | 74 | unsigned int		nr; | 
|  | 75 | unsigned int		max; | 
|  | 76 | struct page		*pages[0]; | 
|  | 77 | }; | 
|  | 78 |  | 
|  | 79 | #define MAX_GATHER_BATCH	\ | 
|  | 80 | ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) | 
|  | 81 |  | 
|  | 82 | /* | 
|  | 83 | * Limit the maximum number of mmu_gather batches to reduce a risk of soft | 
|  | 84 | * lockups for non-preemptible kernels on huge machines when a lot of memory | 
|  | 85 | * is zapped during unmapping. | 
|  | 86 | * 10K pages freed at once should be safe even without a preemption point. | 
|  | 87 | */ | 
|  | 88 | #define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH) | 
|  | 89 |  | 
|  | 90 | /* struct mmu_gather is an opaque type used by the mm code for passing around | 
|  | 91 | * any data needed by arch specific code for tlb_remove_page. | 
|  | 92 | */ | 
|  | 93 | struct mmu_gather { | 
|  | 94 | struct mm_struct	*mm; | 
|  | 95 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 
|  | 96 | struct mmu_table_batch	*batch; | 
|  | 97 | #endif | 
|  | 98 | unsigned long		start; | 
|  | 99 | unsigned long		end; | 
|  | 100 | /* we are in the middle of an operation to clear | 
|  | 101 | * a full mm and can make some optimizations */ | 
|  | 102 | unsigned int		fullmm : 1, | 
|  | 103 | /* we have performed an operation which | 
|  | 104 | * requires a complete flush of the tlb */ | 
|  | 105 | need_flush_all : 1; | 
|  | 106 |  | 
|  | 107 | struct mmu_gather_batch *active; | 
|  | 108 | struct mmu_gather_batch	local; | 
|  | 109 | struct page		*__pages[MMU_GATHER_BUNDLE]; | 
|  | 110 | unsigned int		batch_count; | 
|  | 111 | int page_size; | 
|  | 112 | }; | 
|  | 113 |  | 
|  | 114 | #define HAVE_GENERIC_MMU_GATHER | 
|  | 115 |  | 
|  | 116 | void arch_tlb_gather_mmu(struct mmu_gather *tlb, | 
|  | 117 | struct mm_struct *mm, unsigned long start, unsigned long end); | 
|  | 118 | void tlb_flush_mmu(struct mmu_gather *tlb); | 
|  | 119 | void arch_tlb_finish_mmu(struct mmu_gather *tlb, | 
|  | 120 | unsigned long start, unsigned long end, bool force); | 
|  | 121 | extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, | 
|  | 122 | int page_size); | 
|  | 123 |  | 
|  | 124 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, | 
|  | 125 | unsigned long address, | 
|  | 126 | unsigned int range_size) | 
|  | 127 | { | 
|  | 128 | tlb->start = min(tlb->start, address); | 
|  | 129 | tlb->end = max(tlb->end, address + range_size); | 
|  | 130 | } | 
|  | 131 |  | 
|  | 132 | static inline void __tlb_reset_range(struct mmu_gather *tlb) | 
|  | 133 | { | 
|  | 134 | if (tlb->fullmm) { | 
|  | 135 | tlb->start = tlb->end = ~0; | 
|  | 136 | } else { | 
|  | 137 | tlb->start = TASK_SIZE; | 
|  | 138 | tlb->end = 0; | 
|  | 139 | } | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) | 
|  | 143 | { | 
|  | 144 | if (!tlb->end) | 
|  | 145 | return; | 
|  | 146 |  | 
|  | 147 | tlb_flush(tlb); | 
|  | 148 | mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); | 
|  | 149 | __tlb_reset_range(tlb); | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, | 
|  | 153 | struct page *page, int page_size) | 
|  | 154 | { | 
|  | 155 | if (__tlb_remove_page_size(tlb, page, page_size)) | 
|  | 156 | tlb_flush_mmu(tlb); | 
|  | 157 | } | 
|  | 158 |  | 
|  | 159 | static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 
|  | 160 | { | 
|  | 161 | return __tlb_remove_page_size(tlb, page, PAGE_SIZE); | 
|  | 162 | } | 
|  | 163 |  | 
|  | 164 | /* tlb_remove_page | 
|  | 165 | *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when | 
|  | 166 | *	required. | 
|  | 167 | */ | 
|  | 168 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 
|  | 169 | { | 
|  | 170 | return tlb_remove_page_size(tlb, page, PAGE_SIZE); | 
|  | 171 | } | 
|  | 172 |  | 
|  | 173 | #ifndef tlb_remove_check_page_size_change | 
|  | 174 | #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change | 
|  | 175 | static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, | 
|  | 176 | unsigned int page_size) | 
|  | 177 | { | 
|  | 178 | /* | 
|  | 179 | * We don't care about page size change, just update | 
|  | 180 | * mmu_gather page size here so that debug checks | 
|  | 181 | * doesn't throw false warning. | 
|  | 182 | */ | 
|  | 183 | #ifdef CONFIG_DEBUG_VM | 
|  | 184 | tlb->page_size = page_size; | 
|  | 185 | #endif | 
|  | 186 | } | 
|  | 187 | #endif | 
|  | 188 |  | 
|  | 189 | /* | 
|  | 190 | * In the case of tlb vma handling, we can optimise these away in the | 
|  | 191 | * case where we're doing a full MM flush.  When we're doing a munmap, | 
|  | 192 | * the vmas are adjusted to only cover the region to be torn down. | 
|  | 193 | */ | 
|  | 194 | #ifndef tlb_start_vma | 
|  | 195 | #define tlb_start_vma(tlb, vma) do { } while (0) | 
|  | 196 | #endif | 
|  | 197 |  | 
|  | 198 | #define __tlb_end_vma(tlb, vma)					\ | 
|  | 199 | do {							\ | 
|  | 200 | if (!tlb->fullmm)				\ | 
|  | 201 | tlb_flush_mmu_tlbonly(tlb);		\ | 
|  | 202 | } while (0) | 
|  | 203 |  | 
|  | 204 | #ifndef tlb_end_vma | 
|  | 205 | #define tlb_end_vma	__tlb_end_vma | 
|  | 206 | #endif | 
|  | 207 |  | 
|  | 208 | #ifndef __tlb_remove_tlb_entry | 
|  | 209 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | 
|  | 210 | #endif | 
|  | 211 |  | 
|  | 212 | /** | 
|  | 213 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. | 
|  | 214 | * | 
|  | 215 | * Record the fact that pte's were really unmapped by updating the range, | 
|  | 216 | * so we can later optimise away the tlb invalidate.   This helps when | 
|  | 217 | * userspace is unmapping already-unmapped pages, which happens quite a lot. | 
|  | 218 | */ | 
|  | 219 | #define tlb_remove_tlb_entry(tlb, ptep, address)		\ | 
|  | 220 | do {							\ | 
|  | 221 | __tlb_adjust_range(tlb, address, PAGE_SIZE);	\ | 
|  | 222 | __tlb_remove_tlb_entry(tlb, ptep, address);	\ | 
|  | 223 | } while (0) | 
|  | 224 |  | 
|  | 225 | #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	     \ | 
|  | 226 | do {							     \ | 
|  | 227 | __tlb_adjust_range(tlb, address, huge_page_size(h)); \ | 
|  | 228 | __tlb_remove_tlb_entry(tlb, ptep, address);	     \ | 
|  | 229 | } while (0) | 
|  | 230 |  | 
|  | 231 | /** | 
|  | 232 | * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation | 
|  | 233 | * This is a nop so far, because only x86 needs it. | 
|  | 234 | */ | 
|  | 235 | #ifndef __tlb_remove_pmd_tlb_entry | 
|  | 236 | #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) | 
|  | 237 | #endif | 
|  | 238 |  | 
|  | 239 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\ | 
|  | 240 | do {								\ | 
|  | 241 | __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE);	\ | 
|  | 242 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\ | 
|  | 243 | } while (0) | 
|  | 244 |  | 
|  | 245 | /** | 
|  | 246 | * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb | 
|  | 247 | * invalidation. This is a nop so far, because only x86 needs it. | 
|  | 248 | */ | 
|  | 249 | #ifndef __tlb_remove_pud_tlb_entry | 
|  | 250 | #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) | 
|  | 251 | #endif | 
|  | 252 |  | 
|  | 253 | #define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\ | 
|  | 254 | do {								\ | 
|  | 255 | __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE);	\ | 
|  | 256 | __tlb_remove_pud_tlb_entry(tlb, pudp, address);		\ | 
|  | 257 | } while (0) | 
|  | 258 |  | 
|  | 259 | /* | 
|  | 260 | * For things like page tables caches (ie caching addresses "inside" the | 
|  | 261 | * page tables, like x86 does), for legacy reasons, flushing an | 
|  | 262 | * individual page had better flush the page table caches behind it. This | 
|  | 263 | * is definitely how x86 works, for example. And if you have an | 
|  | 264 | * architected non-legacy page table cache (which I'm not aware of | 
|  | 265 | * anybody actually doing), you're going to have some architecturally | 
|  | 266 | * explicit flushing for that, likely *separate* from a regular TLB entry | 
|  | 267 | * flush, and thus you'd need more than just some range expansion.. | 
|  | 268 | * | 
|  | 269 | * So if we ever find an architecture | 
|  | 270 | * that would want something that odd, I think it is up to that | 
|  | 271 | * architecture to do its own odd thing, not cause pain for others | 
|  | 272 | * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com | 
|  | 273 | * | 
|  | 274 | * For now w.r.t page table cache, mark the range_size as PAGE_SIZE | 
|  | 275 | */ | 
|  | 276 |  | 
|  | 277 | #ifndef pte_free_tlb | 
|  | 278 | #define pte_free_tlb(tlb, ptep, address)			\ | 
|  | 279 | do {							\ | 
|  | 280 | __tlb_adjust_range(tlb, address, PAGE_SIZE);	\ | 
|  | 281 | __pte_free_tlb(tlb, ptep, address);		\ | 
|  | 282 | } while (0) | 
|  | 283 | #endif | 
|  | 284 |  | 
|  | 285 | #ifndef pmd_free_tlb | 
|  | 286 | #define pmd_free_tlb(tlb, pmdp, address)			\ | 
|  | 287 | do {							\ | 
|  | 288 | __tlb_adjust_range(tlb, address, PAGE_SIZE);		\ | 
|  | 289 | __pmd_free_tlb(tlb, pmdp, address);		\ | 
|  | 290 | } while (0) | 
|  | 291 | #endif | 
|  | 292 |  | 
|  | 293 | #ifndef __ARCH_HAS_4LEVEL_HACK | 
|  | 294 | #ifndef pud_free_tlb | 
|  | 295 | #define pud_free_tlb(tlb, pudp, address)			\ | 
|  | 296 | do {							\ | 
|  | 297 | __tlb_adjust_range(tlb, address, PAGE_SIZE);	\ | 
|  | 298 | __pud_free_tlb(tlb, pudp, address);		\ | 
|  | 299 | } while (0) | 
|  | 300 | #endif | 
|  | 301 | #endif | 
|  | 302 |  | 
|  | 303 | #ifndef __ARCH_HAS_5LEVEL_HACK | 
|  | 304 | #ifndef p4d_free_tlb | 
|  | 305 | #define p4d_free_tlb(tlb, pudp, address)			\ | 
|  | 306 | do {							\ | 
|  | 307 | __tlb_adjust_range(tlb, address, PAGE_SIZE);		\ | 
|  | 308 | __p4d_free_tlb(tlb, pudp, address);		\ | 
|  | 309 | } while (0) | 
|  | 310 | #endif | 
|  | 311 | #endif | 
|  | 312 |  | 
|  | 313 | #define tlb_migrate_finish(mm) do {} while (0) | 
|  | 314 |  | 
|  | 315 | #endif /* _ASM_GENERIC__TLB_H */ |