| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/arm/mm/flush.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1995-2002 Russell King | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | */ | 
|  | 10 | #include <linux/module.h> | 
|  | 11 | #include <linux/mm.h> | 
|  | 12 | #include <linux/pagemap.h> | 
|  | 13 | #include <linux/highmem.h> | 
|  | 14 |  | 
|  | 15 | #include <asm/cacheflush.h> | 
|  | 16 | #include <asm/cachetype.h> | 
|  | 17 | #include <asm/highmem.h> | 
|  | 18 | #include <asm/smp_plat.h> | 
|  | 19 | #include <asm/tlbflush.h> | 
|  | 20 | #include <linux/hugetlb.h> | 
|  | 21 |  | 
|  | 22 | #include "mm.h" | 
|  | 23 |  | 
|  | 24 | #ifdef CONFIG_ARM_HEAVY_MB | 
|  | 25 | void (*soc_mb)(void); | 
|  | 26 |  | 
|  | 27 | void arm_heavy_mb(void) | 
|  | 28 | { | 
|  | 29 | #ifdef CONFIG_OUTER_CACHE_SYNC | 
|  | 30 | if (outer_cache.sync) | 
|  | 31 | outer_cache.sync(); | 
|  | 32 | #endif | 
|  | 33 | if (soc_mb) | 
|  | 34 | soc_mb(); | 
|  | 35 | } | 
|  | 36 | EXPORT_SYMBOL(arm_heavy_mb); | 
|  | 37 | #endif | 
|  | 38 |  | 
|  | 39 | #ifdef CONFIG_CPU_CACHE_VIPT | 
|  | 40 |  | 
|  | 41 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | 
|  | 42 | { | 
|  | 43 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); | 
|  | 44 | const int zero = 0; | 
|  | 45 |  | 
|  | 46 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); | 
|  | 47 |  | 
|  | 48 | asm(	"mcrr	p15, 0, %1, %0, c14\n" | 
|  | 49 | "	mcr	p15, 0, %2, c7, c10, 4" | 
|  | 50 | : | 
|  | 51 | : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) | 
|  | 52 | : "cc"); | 
|  | 53 | } | 
|  | 54 |  | 
|  | 55 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) | 
|  | 56 | { | 
|  | 57 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); | 
|  | 58 | unsigned long offset = vaddr & (PAGE_SIZE - 1); | 
|  | 59 | unsigned long to; | 
|  | 60 |  | 
|  | 61 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); | 
|  | 62 | to = va + offset; | 
|  | 63 | flush_icache_range(to, to + len); | 
|  | 64 | } | 
|  | 65 |  | 
|  | 66 | void flush_cache_mm(struct mm_struct *mm) | 
|  | 67 | { | 
|  | 68 | if (cache_is_vivt()) { | 
|  | 69 | vivt_flush_cache_mm(mm); | 
|  | 70 | return; | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | if (cache_is_vipt_aliasing()) { | 
|  | 74 | asm(	"mcr	p15, 0, %0, c7, c14, 0\n" | 
|  | 75 | "	mcr	p15, 0, %0, c7, c10, 4" | 
|  | 76 | : | 
|  | 77 | : "r" (0) | 
|  | 78 | : "cc"); | 
|  | 79 | } | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 
|  | 83 | { | 
|  | 84 | if (cache_is_vivt()) { | 
|  | 85 | vivt_flush_cache_range(vma, start, end); | 
|  | 86 | return; | 
|  | 87 | } | 
|  | 88 |  | 
|  | 89 | if (cache_is_vipt_aliasing()) { | 
|  | 90 | asm(	"mcr	p15, 0, %0, c7, c14, 0\n" | 
|  | 91 | "	mcr	p15, 0, %0, c7, c10, 4" | 
|  | 92 | : | 
|  | 93 | : "r" (0) | 
|  | 94 | : "cc"); | 
|  | 95 | } | 
|  | 96 |  | 
|  | 97 | if (vma->vm_flags & VM_EXEC) | 
|  | 98 | __flush_icache_all(); | 
|  | 99 | } | 
|  | 100 |  | 
|  | 101 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | 
|  | 102 | { | 
|  | 103 | if (cache_is_vivt()) { | 
|  | 104 | vivt_flush_cache_page(vma, user_addr, pfn); | 
|  | 105 | return; | 
|  | 106 | } | 
|  | 107 |  | 
|  | 108 | if (cache_is_vipt_aliasing()) { | 
|  | 109 | flush_pfn_alias(pfn, user_addr); | 
|  | 110 | __flush_icache_all(); | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | 
|  | 114 | __flush_icache_all(); | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | #else | 
|  | 118 | #define flush_pfn_alias(pfn,vaddr)		do { } while (0) | 
|  | 119 | #define flush_icache_alias(pfn,vaddr,len)	do { } while (0) | 
|  | 120 | #endif | 
|  | 121 |  | 
|  | 122 | #define FLAG_PA_IS_EXEC 1 | 
|  | 123 | #define FLAG_PA_CORE_IN_MM 2 | 
|  | 124 |  | 
|  | 125 | static void flush_ptrace_access_other(void *args) | 
|  | 126 | { | 
|  | 127 | __flush_icache_all(); | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | static inline | 
|  | 131 | void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, | 
|  | 132 | unsigned long len, unsigned int flags) | 
|  | 133 | { | 
|  | 134 | if (cache_is_vivt()) { | 
|  | 135 | if (flags & FLAG_PA_CORE_IN_MM) { | 
|  | 136 | unsigned long addr = (unsigned long)kaddr; | 
|  | 137 | __cpuc_coherent_kern_range(addr, addr + len); | 
|  | 138 | } | 
|  | 139 | return; | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | if (cache_is_vipt_aliasing()) { | 
|  | 143 | flush_pfn_alias(page_to_pfn(page), uaddr); | 
|  | 144 | __flush_icache_all(); | 
|  | 145 | return; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | /* VIPT non-aliasing D-cache */ | 
|  | 149 | if (flags & FLAG_PA_IS_EXEC) { | 
|  | 150 | unsigned long addr = (unsigned long)kaddr; | 
|  | 151 | if (icache_is_vipt_aliasing()) | 
|  | 152 | flush_icache_alias(page_to_pfn(page), uaddr, len); | 
|  | 153 | else | 
|  | 154 | __cpuc_coherent_kern_range(addr, addr + len); | 
|  | 155 | if (cache_ops_need_broadcast()) | 
|  | 156 | smp_call_function(flush_ptrace_access_other, | 
|  | 157 | NULL, 1); | 
|  | 158 | } | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | static | 
|  | 162 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | 
|  | 163 | unsigned long uaddr, void *kaddr, unsigned long len) | 
|  | 164 | { | 
|  | 165 | unsigned int flags = 0; | 
|  | 166 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | 
|  | 167 | flags |= FLAG_PA_CORE_IN_MM; | 
|  | 168 | if (vma->vm_flags & VM_EXEC) | 
|  | 169 | flags |= FLAG_PA_IS_EXEC; | 
|  | 170 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | 
|  | 171 | } | 
|  | 172 |  | 
|  | 173 | void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, | 
|  | 174 | void *kaddr, unsigned long len) | 
|  | 175 | { | 
|  | 176 | unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC; | 
|  | 177 |  | 
|  | 178 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | 
|  | 179 | } | 
|  | 180 |  | 
|  | 181 | /* | 
|  | 182 | * Copy user data from/to a page which is mapped into a different | 
|  | 183 | * processes address space.  Really, we want to allow our "user | 
|  | 184 | * space" model to handle this. | 
|  | 185 | * | 
|  | 186 | * Note that this code needs to run on the current CPU. | 
|  | 187 | */ | 
|  | 188 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 
|  | 189 | unsigned long uaddr, void *dst, const void *src, | 
|  | 190 | unsigned long len) | 
|  | 191 | { | 
|  | 192 | #ifdef CONFIG_SMP | 
|  | 193 | preempt_disable(); | 
|  | 194 | #endif | 
|  | 195 | memcpy(dst, src, len); | 
|  | 196 | flush_ptrace_access(vma, page, uaddr, dst, len); | 
|  | 197 | #ifdef CONFIG_SMP | 
|  | 198 | preempt_enable(); | 
|  | 199 | #endif | 
|  | 200 | } | 
|  | 201 |  | 
|  | 202 | void __flush_dcache_page(struct address_space *mapping, struct page *page) | 
|  | 203 | { | 
|  | 204 | /* | 
|  | 205 | * Writeback any data associated with the kernel mapping of this | 
|  | 206 | * page.  This ensures that data in the physical page is mutually | 
|  | 207 | * coherent with the kernels mapping. | 
|  | 208 | */ | 
|  | 209 | if (!PageHighMem(page)) { | 
|  | 210 | size_t page_size = PAGE_SIZE << compound_order(page); | 
|  | 211 | __cpuc_flush_dcache_area(page_address(page), page_size); | 
|  | 212 | } else { | 
|  | 213 | unsigned long i; | 
|  | 214 | if (cache_is_vipt_nonaliasing()) { | 
|  | 215 | for (i = 0; i < (1 << compound_order(page)); i++) { | 
|  | 216 | void *addr = kmap_atomic(page + i); | 
|  | 217 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 
|  | 218 | kunmap_atomic(addr); | 
|  | 219 | } | 
|  | 220 | } else { | 
|  | 221 | for (i = 0; i < (1 << compound_order(page)); i++) { | 
|  | 222 | void *addr = kmap_high_get(page + i); | 
|  | 223 | if (addr) { | 
|  | 224 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 
|  | 225 | kunmap_high(page + i); | 
|  | 226 | } | 
|  | 227 | } | 
|  | 228 | } | 
|  | 229 | } | 
|  | 230 |  | 
|  | 231 | /* | 
|  | 232 | * If this is a page cache page, and we have an aliasing VIPT cache, | 
|  | 233 | * we only need to do one flush - which would be at the relevant | 
|  | 234 | * userspace colour, which is congruent with page->index. | 
|  | 235 | */ | 
|  | 236 | if (mapping && cache_is_vipt_aliasing()) | 
|  | 237 | flush_pfn_alias(page_to_pfn(page), | 
|  | 238 | page->index << PAGE_SHIFT); | 
|  | 239 | } | 
|  | 240 |  | 
|  | 241 | static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) | 
|  | 242 | { | 
|  | 243 | struct mm_struct *mm = current->active_mm; | 
|  | 244 | struct vm_area_struct *mpnt; | 
|  | 245 | pgoff_t pgoff; | 
|  | 246 |  | 
|  | 247 | /* | 
|  | 248 | * There are possible user space mappings of this page: | 
|  | 249 | * - VIVT cache: we need to also write back and invalidate all user | 
|  | 250 | *   data in the current VM view associated with this page. | 
|  | 251 | * - aliasing VIPT: we only need to find one mapping of this page. | 
|  | 252 | */ | 
|  | 253 | pgoff = page->index; | 
|  | 254 |  | 
|  | 255 | flush_dcache_mmap_lock(mapping); | 
|  | 256 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { | 
|  | 257 | unsigned long offset; | 
|  | 258 |  | 
|  | 259 | /* | 
|  | 260 | * If this VMA is not in our MM, we can ignore it. | 
|  | 261 | */ | 
|  | 262 | if (mpnt->vm_mm != mm) | 
|  | 263 | continue; | 
|  | 264 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | 
|  | 265 | continue; | 
|  | 266 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | 
|  | 267 | flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); | 
|  | 268 | } | 
|  | 269 | flush_dcache_mmap_unlock(mapping); | 
|  | 270 | } | 
|  | 271 |  | 
|  | 272 | #if __LINUX_ARM_ARCH__ >= 6 | 
|  | 273 | void __sync_icache_dcache(pte_t pteval) | 
|  | 274 | { | 
|  | 275 | unsigned long pfn; | 
|  | 276 | struct page *page; | 
|  | 277 | struct address_space *mapping; | 
|  | 278 |  | 
|  | 279 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) | 
|  | 280 | /* only flush non-aliasing VIPT caches for exec mappings */ | 
|  | 281 | return; | 
|  | 282 | pfn = pte_pfn(pteval); | 
|  | 283 | if (!pfn_valid(pfn)) | 
|  | 284 | return; | 
|  | 285 |  | 
|  | 286 | page = pfn_to_page(pfn); | 
|  | 287 | if (cache_is_vipt_aliasing()) | 
|  | 288 | mapping = page_mapping_file(page); | 
|  | 289 | else | 
|  | 290 | mapping = NULL; | 
|  | 291 |  | 
|  | 292 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | 
|  | 293 | __flush_dcache_page(mapping, page); | 
|  | 294 |  | 
|  | 295 | if (pte_exec(pteval)) | 
|  | 296 | __flush_icache_all(); | 
|  | 297 | } | 
|  | 298 | #endif | 
|  | 299 |  | 
|  | 300 | /* | 
|  | 301 | * Ensure cache coherency between kernel mapping and userspace mapping | 
|  | 302 | * of this page. | 
|  | 303 | * | 
|  | 304 | * We have three cases to consider: | 
|  | 305 | *  - VIPT non-aliasing cache: fully coherent so nothing required. | 
|  | 306 | *  - VIVT: fully aliasing, so we need to handle every alias in our | 
|  | 307 | *          current VM view. | 
|  | 308 | *  - VIPT aliasing: need to handle one alias in our current VM view. | 
|  | 309 | * | 
|  | 310 | * If we need to handle aliasing: | 
|  | 311 | *  If the page only exists in the page cache and there are no user | 
|  | 312 | *  space mappings, we can be lazy and remember that we may have dirty | 
|  | 313 | *  kernel cache lines for later.  Otherwise, we assume we have | 
|  | 314 | *  aliasing mappings. | 
|  | 315 | * | 
|  | 316 | * Note that we disable the lazy flush for SMP configurations where | 
|  | 317 | * the cache maintenance operations are not automatically broadcasted. | 
|  | 318 | */ | 
|  | 319 | void flush_dcache_page(struct page *page) | 
|  | 320 | { | 
|  | 321 | struct address_space *mapping; | 
|  | 322 |  | 
|  | 323 | /* | 
|  | 324 | * The zero page is never written to, so never has any dirty | 
|  | 325 | * cache lines, and therefore never needs to be flushed. | 
|  | 326 | */ | 
|  | 327 | if (page == ZERO_PAGE(0)) | 
|  | 328 | return; | 
|  | 329 |  | 
|  | 330 | if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) { | 
|  | 331 | if (test_bit(PG_dcache_clean, &page->flags)) | 
|  | 332 | clear_bit(PG_dcache_clean, &page->flags); | 
|  | 333 | return; | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | mapping = page_mapping_file(page); | 
|  | 337 |  | 
|  | 338 | if (!cache_ops_need_broadcast() && | 
|  | 339 | mapping && !page_mapcount(page)) | 
|  | 340 | clear_bit(PG_dcache_clean, &page->flags); | 
|  | 341 | else { | 
|  | 342 | __flush_dcache_page(mapping, page); | 
|  | 343 | if (mapping && cache_is_vivt()) | 
|  | 344 | __flush_dcache_aliases(mapping, page); | 
|  | 345 | else if (mapping) | 
|  | 346 | __flush_icache_all(); | 
|  | 347 | set_bit(PG_dcache_clean, &page->flags); | 
|  | 348 | } | 
|  | 349 | } | 
|  | 350 | EXPORT_SYMBOL(flush_dcache_page); | 
|  | 351 |  | 
|  | 352 | /* | 
|  | 353 | * Ensure cache coherency for the kernel mapping of this page. We can | 
|  | 354 | * assume that the page is pinned via kmap. | 
|  | 355 | * | 
|  | 356 | * If the page only exists in the page cache and there are no user | 
|  | 357 | * space mappings, this is a no-op since the page was already marked | 
|  | 358 | * dirty at creation.  Otherwise, we need to flush the dirty kernel | 
|  | 359 | * cache lines directly. | 
|  | 360 | */ | 
|  | 361 | void flush_kernel_dcache_page(struct page *page) | 
|  | 362 | { | 
|  | 363 | if (cache_is_vivt() || cache_is_vipt_aliasing()) { | 
|  | 364 | struct address_space *mapping; | 
|  | 365 |  | 
|  | 366 | mapping = page_mapping_file(page); | 
|  | 367 |  | 
|  | 368 | if (!mapping || mapping_mapped(mapping)) { | 
|  | 369 | void *addr; | 
|  | 370 |  | 
|  | 371 | addr = page_address(page); | 
|  | 372 | /* | 
|  | 373 | * kmap_atomic() doesn't set the page virtual | 
|  | 374 | * address for highmem pages, and | 
|  | 375 | * kunmap_atomic() takes care of cache | 
|  | 376 | * flushing already. | 
|  | 377 | */ | 
|  | 378 | if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) | 
|  | 379 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 
|  | 380 | } | 
|  | 381 | } | 
|  | 382 | } | 
|  | 383 | EXPORT_SYMBOL(flush_kernel_dcache_page); | 
|  | 384 |  | 
|  | 385 | /* | 
|  | 386 | * Flush an anonymous page so that users of get_user_pages() | 
|  | 387 | * can safely access the data.  The expected sequence is: | 
|  | 388 | * | 
|  | 389 | *  get_user_pages() | 
|  | 390 | *    -> flush_anon_page | 
|  | 391 | *  memcpy() to/from page | 
|  | 392 | *  if written to page, flush_dcache_page() | 
|  | 393 | */ | 
|  | 394 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | 
|  | 395 | { | 
|  | 396 | unsigned long pfn; | 
|  | 397 |  | 
|  | 398 | /* VIPT non-aliasing caches need do nothing */ | 
|  | 399 | if (cache_is_vipt_nonaliasing()) | 
|  | 400 | return; | 
|  | 401 |  | 
|  | 402 | /* | 
|  | 403 | * Write back and invalidate userspace mapping. | 
|  | 404 | */ | 
|  | 405 | pfn = page_to_pfn(page); | 
|  | 406 | if (cache_is_vivt()) { | 
|  | 407 | flush_cache_page(vma, vmaddr, pfn); | 
|  | 408 | } else { | 
|  | 409 | /* | 
|  | 410 | * For aliasing VIPT, we can flush an alias of the | 
|  | 411 | * userspace address only. | 
|  | 412 | */ | 
|  | 413 | flush_pfn_alias(pfn, vmaddr); | 
|  | 414 | __flush_icache_all(); | 
|  | 415 | } | 
|  | 416 |  | 
|  | 417 | /* | 
|  | 418 | * Invalidate kernel mapping.  No data should be contained | 
|  | 419 | * in this mapping of the page.  FIXME: this is overkill | 
|  | 420 | * since we actually ask for a write-back and invalidate. | 
|  | 421 | */ | 
|  | 422 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); | 
|  | 423 | } |