| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | *  PowerPC version | 
|  | 3 | *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
|  | 4 | * | 
|  | 5 | *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | 
|  | 6 | *  and Cort Dougan (PReP) (cort@cs.nmt.edu) | 
|  | 7 | *    Copyright (C) 1996 Paul Mackerras | 
|  | 8 | *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) | 
|  | 9 | * | 
|  | 10 | *  Derived from "arch/i386/mm/init.c" | 
|  | 11 | *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
|  | 12 | * | 
|  | 13 | *  This program is free software; you can redistribute it and/or | 
|  | 14 | *  modify it under the terms of the GNU General Public License | 
|  | 15 | *  as published by the Free Software Foundation; either version | 
|  | 16 | *  2 of the License, or (at your option) any later version. | 
|  | 17 | * | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | #include <linux/export.h> | 
|  | 21 | #include <linux/sched.h> | 
|  | 22 | #include <linux/kernel.h> | 
|  | 23 | #include <linux/errno.h> | 
|  | 24 | #include <linux/string.h> | 
|  | 25 | #include <linux/gfp.h> | 
|  | 26 | #include <linux/types.h> | 
|  | 27 | #include <linux/mm.h> | 
|  | 28 | #include <linux/stddef.h> | 
|  | 29 | #include <linux/init.h> | 
|  | 30 | #include <linux/bootmem.h> | 
|  | 31 | #include <linux/highmem.h> | 
|  | 32 | #include <linux/initrd.h> | 
|  | 33 | #include <linux/pagemap.h> | 
|  | 34 | #include <linux/suspend.h> | 
|  | 35 | #include <linux/memblock.h> | 
|  | 36 | #include <linux/hugetlb.h> | 
|  | 37 | #include <linux/slab.h> | 
|  | 38 | #include <linux/vmalloc.h> | 
|  | 39 | #include <linux/memremap.h> | 
|  | 40 |  | 
|  | 41 | #include <asm/pgalloc.h> | 
|  | 42 | #include <asm/prom.h> | 
|  | 43 | #include <asm/io.h> | 
|  | 44 | #include <asm/mmu_context.h> | 
|  | 45 | #include <asm/pgtable.h> | 
|  | 46 | #include <asm/mmu.h> | 
|  | 47 | #include <asm/smp.h> | 
|  | 48 | #include <asm/machdep.h> | 
|  | 49 | #include <asm/btext.h> | 
|  | 50 | #include <asm/tlb.h> | 
|  | 51 | #include <asm/sections.h> | 
|  | 52 | #include <asm/sparsemem.h> | 
|  | 53 | #include <asm/vdso.h> | 
|  | 54 | #include <asm/fixmap.h> | 
|  | 55 | #include <asm/swiotlb.h> | 
|  | 56 | #include <asm/rtas.h> | 
|  | 57 |  | 
|  | 58 | #include "mmu_decl.h" | 
|  | 59 |  | 
|  | 60 | #ifndef CPU_FTR_COHERENT_ICACHE | 
|  | 61 | #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */ | 
|  | 62 | #define CPU_FTR_NOEXECUTE	0 | 
|  | 63 | #endif | 
|  | 64 |  | 
|  | 65 | unsigned long long memory_limit; | 
|  | 66 | bool init_mem_is_free; | 
|  | 67 |  | 
|  | 68 | #ifdef CONFIG_HIGHMEM | 
|  | 69 | pte_t *kmap_pte; | 
|  | 70 | EXPORT_SYMBOL(kmap_pte); | 
|  | 71 | pgprot_t kmap_prot; | 
|  | 72 | EXPORT_SYMBOL(kmap_prot); | 
|  | 73 | #define TOP_ZONE ZONE_HIGHMEM | 
|  | 74 |  | 
|  | 75 | static inline pte_t *virt_to_kpte(unsigned long vaddr) | 
|  | 76 | { | 
|  | 77 | return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), | 
|  | 78 | vaddr), vaddr), vaddr); | 
|  | 79 | } | 
|  | 80 | #else | 
|  | 81 | #define TOP_ZONE ZONE_NORMAL | 
|  | 82 | #endif | 
|  | 83 |  | 
|  | 84 | int page_is_ram(unsigned long pfn) | 
|  | 85 | { | 
|  | 86 | return memblock_is_memory(__pfn_to_phys(pfn)); | 
|  | 87 | } | 
|  | 88 |  | 
|  | 89 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 
|  | 90 | unsigned long size, pgprot_t vma_prot) | 
|  | 91 | { | 
|  | 92 | if (ppc_md.phys_mem_access_prot) | 
|  | 93 | return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); | 
|  | 94 |  | 
|  | 95 | if (!page_is_ram(pfn)) | 
|  | 96 | vma_prot = pgprot_noncached(vma_prot); | 
|  | 97 |  | 
|  | 98 | return vma_prot; | 
|  | 99 | } | 
|  | 100 | EXPORT_SYMBOL(phys_mem_access_prot); | 
|  | 101 |  | 
|  | 102 | #ifdef CONFIG_MEMORY_HOTPLUG | 
|  | 103 |  | 
|  | 104 | #ifdef CONFIG_NUMA | 
|  | 105 | int memory_add_physaddr_to_nid(u64 start) | 
|  | 106 | { | 
|  | 107 | return hot_add_scn_to_nid(start); | 
|  | 108 | } | 
|  | 109 | #endif | 
|  | 110 |  | 
|  | 111 | int __weak create_section_mapping(unsigned long start, unsigned long end, int nid) | 
|  | 112 | { | 
|  | 113 | return -ENODEV; | 
|  | 114 | } | 
|  | 115 |  | 
|  | 116 | int __weak remove_section_mapping(unsigned long start, unsigned long end) | 
|  | 117 | { | 
|  | 118 | return -ENODEV; | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, | 
|  | 122 | bool want_memblock) | 
|  | 123 | { | 
|  | 124 | unsigned long start_pfn = start >> PAGE_SHIFT; | 
|  | 125 | unsigned long nr_pages = size >> PAGE_SHIFT; | 
|  | 126 | int rc; | 
|  | 127 |  | 
|  | 128 | resize_hpt_for_hotplug(memblock_phys_mem_size()); | 
|  | 129 |  | 
|  | 130 | start = (unsigned long)__va(start); | 
|  | 131 | rc = create_section_mapping(start, start + size, nid); | 
|  | 132 | if (rc) { | 
|  | 133 | pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", | 
|  | 134 | start, start + size, rc); | 
|  | 135 | return -EFAULT; | 
|  | 136 | } | 
|  | 137 | flush_inval_dcache_range(start, start + size); | 
|  | 138 |  | 
|  | 139 | return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | #ifdef CONFIG_MEMORY_HOTREMOVE | 
|  | 143 | int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) | 
|  | 144 | { | 
|  | 145 | unsigned long start_pfn = start >> PAGE_SHIFT; | 
|  | 146 | unsigned long nr_pages = size >> PAGE_SHIFT; | 
|  | 147 | struct page *page; | 
|  | 148 | int ret; | 
|  | 149 |  | 
|  | 150 | /* | 
|  | 151 | * If we have an altmap then we need to skip over any reserved PFNs | 
|  | 152 | * when querying the zone. | 
|  | 153 | */ | 
|  | 154 | page = pfn_to_page(start_pfn); | 
|  | 155 | if (altmap) | 
|  | 156 | page += vmem_altmap_offset(altmap); | 
|  | 157 |  | 
|  | 158 | ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); | 
|  | 159 | if (ret) | 
|  | 160 | return ret; | 
|  | 161 |  | 
|  | 162 | /* Remove htab bolted mappings for this section of memory */ | 
|  | 163 | start = (unsigned long)__va(start); | 
|  | 164 | flush_inval_dcache_range(start, start + size); | 
|  | 165 | ret = remove_section_mapping(start, start + size); | 
|  | 166 |  | 
|  | 167 | /* Ensure all vmalloc mappings are flushed in case they also | 
|  | 168 | * hit that section of memory | 
|  | 169 | */ | 
|  | 170 | vm_unmap_aliases(); | 
|  | 171 |  | 
|  | 172 | resize_hpt_for_hotplug(memblock_phys_mem_size()); | 
|  | 173 |  | 
|  | 174 | return ret; | 
|  | 175 | } | 
|  | 176 | #endif | 
|  | 177 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 
|  | 178 |  | 
|  | 179 | /* | 
|  | 180 | * walk_memory_resource() needs to make sure there is no holes in a given | 
|  | 181 | * memory range.  PPC64 does not maintain the memory layout in /proc/iomem. | 
|  | 182 | * Instead it maintains it in memblock.memory structures.  Walk through the | 
|  | 183 | * memory regions, find holes and callback for contiguous regions. | 
|  | 184 | */ | 
|  | 185 | int | 
|  | 186 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | 
|  | 187 | void *arg, int (*func)(unsigned long, unsigned long, void *)) | 
|  | 188 | { | 
|  | 189 | struct memblock_region *reg; | 
|  | 190 | unsigned long end_pfn = start_pfn + nr_pages; | 
|  | 191 | unsigned long tstart, tend; | 
|  | 192 | int ret = -1; | 
|  | 193 |  | 
|  | 194 | for_each_memblock(memory, reg) { | 
|  | 195 | tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); | 
|  | 196 | tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); | 
|  | 197 | if (tstart >= tend) | 
|  | 198 | continue; | 
|  | 199 | ret = (*func)(tstart, tend - tstart, arg); | 
|  | 200 | if (ret) | 
|  | 201 | break; | 
|  | 202 | } | 
|  | 203 | return ret; | 
|  | 204 | } | 
|  | 205 | EXPORT_SYMBOL_GPL(walk_system_ram_range); | 
|  | 206 |  | 
|  | 207 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 
|  | 208 | void __init mem_topology_setup(void) | 
|  | 209 | { | 
|  | 210 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; | 
|  | 211 | min_low_pfn = MEMORY_START >> PAGE_SHIFT; | 
|  | 212 | #ifdef CONFIG_HIGHMEM | 
|  | 213 | max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; | 
|  | 214 | #endif | 
|  | 215 |  | 
|  | 216 | /* Place all memblock_regions in the same node and merge contiguous | 
|  | 217 | * memblock_regions | 
|  | 218 | */ | 
|  | 219 | memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); | 
|  | 220 | } | 
|  | 221 |  | 
|  | 222 | void __init initmem_init(void) | 
|  | 223 | { | 
|  | 224 | /* XXX need to clip this if using highmem? */ | 
|  | 225 | sparse_memory_present_with_active_regions(0); | 
|  | 226 | sparse_init(); | 
|  | 227 | } | 
|  | 228 |  | 
|  | 229 | /* mark pages that don't exist as nosave */ | 
|  | 230 | static int __init mark_nonram_nosave(void) | 
|  | 231 | { | 
|  | 232 | struct memblock_region *reg, *prev = NULL; | 
|  | 233 |  | 
|  | 234 | for_each_memblock(memory, reg) { | 
|  | 235 | if (prev && | 
|  | 236 | memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) | 
|  | 237 | register_nosave_region(memblock_region_memory_end_pfn(prev), | 
|  | 238 | memblock_region_memory_base_pfn(reg)); | 
|  | 239 | prev = reg; | 
|  | 240 | } | 
|  | 241 | return 0; | 
|  | 242 | } | 
|  | 243 | #else /* CONFIG_NEED_MULTIPLE_NODES */ | 
|  | 244 | static int __init mark_nonram_nosave(void) | 
|  | 245 | { | 
|  | 246 | return 0; | 
|  | 247 | } | 
|  | 248 | #endif | 
|  | 249 |  | 
|  | 250 | static bool zone_limits_final; | 
|  | 251 |  | 
|  | 252 | /* | 
|  | 253 | * The memory zones past TOP_ZONE are managed by generic mm code. | 
|  | 254 | * These should be set to zero since that's what every other | 
|  | 255 | * architecture does. | 
|  | 256 | */ | 
|  | 257 | static unsigned long max_zone_pfns[MAX_NR_ZONES] = { | 
|  | 258 | [0            ... TOP_ZONE        ] = ~0UL, | 
|  | 259 | [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0 | 
|  | 260 | }; | 
|  | 261 |  | 
|  | 262 | /* | 
|  | 263 | * Restrict the specified zone and all more restrictive zones | 
|  | 264 | * to be below the specified pfn.  May not be called after | 
|  | 265 | * paging_init(). | 
|  | 266 | */ | 
|  | 267 | void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit) | 
|  | 268 | { | 
|  | 269 | int i; | 
|  | 270 |  | 
|  | 271 | if (WARN_ON(zone_limits_final)) | 
|  | 272 | return; | 
|  | 273 |  | 
|  | 274 | for (i = zone; i >= 0; i--) { | 
|  | 275 | if (max_zone_pfns[i] > pfn_limit) | 
|  | 276 | max_zone_pfns[i] = pfn_limit; | 
|  | 277 | } | 
|  | 278 | } | 
|  | 279 |  | 
|  | 280 | /* | 
|  | 281 | * Find the least restrictive zone that is entirely below the | 
|  | 282 | * specified pfn limit.  Returns < 0 if no suitable zone is found. | 
|  | 283 | * | 
|  | 284 | * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit | 
|  | 285 | * systems -- the DMA limit can be higher than any possible real pfn. | 
|  | 286 | */ | 
|  | 287 | int dma_pfn_limit_to_zone(u64 pfn_limit) | 
|  | 288 | { | 
|  | 289 | int i; | 
|  | 290 |  | 
|  | 291 | for (i = TOP_ZONE; i >= 0; i--) { | 
|  | 292 | if (max_zone_pfns[i] <= pfn_limit) | 
|  | 293 | return i; | 
|  | 294 | } | 
|  | 295 |  | 
|  | 296 | return -EPERM; | 
|  | 297 | } | 
|  | 298 |  | 
|  | 299 | /* | 
|  | 300 | * paging_init() sets up the page tables - in fact we've already done this. | 
|  | 301 | */ | 
|  | 302 | void __init paging_init(void) | 
|  | 303 | { | 
|  | 304 | unsigned long long total_ram = memblock_phys_mem_size(); | 
|  | 305 | phys_addr_t top_of_ram = memblock_end_of_DRAM(); | 
|  | 306 |  | 
|  | 307 | #ifdef CONFIG_PPC32 | 
|  | 308 | unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); | 
|  | 309 | unsigned long end = __fix_to_virt(FIX_HOLE); | 
|  | 310 |  | 
|  | 311 | for (; v < end; v += PAGE_SIZE) | 
|  | 312 | map_kernel_page(v, 0, 0); /* XXX gross */ | 
|  | 313 | #endif | 
|  | 314 |  | 
|  | 315 | #ifdef CONFIG_HIGHMEM | 
|  | 316 | map_kernel_page(PKMAP_BASE, 0, 0);	/* XXX gross */ | 
|  | 317 | pkmap_page_table = virt_to_kpte(PKMAP_BASE); | 
|  | 318 |  | 
|  | 319 | kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); | 
|  | 320 | kmap_prot = PAGE_KERNEL; | 
|  | 321 | #endif /* CONFIG_HIGHMEM */ | 
|  | 322 |  | 
|  | 323 | printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", | 
|  | 324 | (unsigned long long)top_of_ram, total_ram); | 
|  | 325 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 
|  | 326 | (long int)((top_of_ram - total_ram) >> 20)); | 
|  | 327 |  | 
|  | 328 | #ifdef CONFIG_HIGHMEM | 
|  | 329 | limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT); | 
|  | 330 | #endif | 
|  | 331 | limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT); | 
|  | 332 | zone_limits_final = true; | 
|  | 333 | free_area_init_nodes(max_zone_pfns); | 
|  | 334 |  | 
|  | 335 | mark_nonram_nosave(); | 
|  | 336 | } | 
|  | 337 |  | 
|  | 338 | void __init mem_init(void) | 
|  | 339 | { | 
|  | 340 | /* | 
|  | 341 | * book3s is limited to 16 page sizes due to encoding this in | 
|  | 342 | * a 4-bit field for slices. | 
|  | 343 | */ | 
|  | 344 | BUILD_BUG_ON(MMU_PAGE_COUNT > 16); | 
|  | 345 |  | 
|  | 346 | #ifdef CONFIG_SWIOTLB | 
|  | 347 | /* | 
|  | 348 | * Some platforms (e.g. 85xx) limit DMA-able memory way below | 
|  | 349 | * 4G. We force memblock to bottom-up mode to ensure that the | 
|  | 350 | * memory allocated in swiotlb_init() is DMA-able. | 
|  | 351 | * As it's the last memblock allocation, no need to reset it | 
|  | 352 | * back to to-down. | 
|  | 353 | */ | 
|  | 354 | memblock_set_bottom_up(true); | 
|  | 355 | swiotlb_init(0); | 
|  | 356 | #endif | 
|  | 357 |  | 
|  | 358 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 
|  | 359 | set_max_mapnr(max_pfn); | 
|  | 360 | free_all_bootmem(); | 
|  | 361 |  | 
|  | 362 | #ifdef CONFIG_HIGHMEM | 
|  | 363 | { | 
|  | 364 | unsigned long pfn, highmem_mapnr; | 
|  | 365 |  | 
|  | 366 | highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; | 
|  | 367 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { | 
|  | 368 | phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; | 
|  | 369 | struct page *page = pfn_to_page(pfn); | 
|  | 370 | if (!memblock_is_reserved(paddr)) | 
|  | 371 | free_highmem_page(page); | 
|  | 372 | } | 
|  | 373 | } | 
|  | 374 | #endif /* CONFIG_HIGHMEM */ | 
|  | 375 |  | 
|  | 376 | #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) | 
|  | 377 | /* | 
|  | 378 | * If smp is enabled, next_tlbcam_idx is initialized in the cpu up | 
|  | 379 | * functions.... do it here for the non-smp case. | 
|  | 380 | */ | 
|  | 381 | per_cpu(next_tlbcam_idx, smp_processor_id()) = | 
|  | 382 | (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; | 
|  | 383 | #endif | 
|  | 384 |  | 
|  | 385 | mem_init_print_info(NULL); | 
|  | 386 | #ifdef CONFIG_PPC32 | 
|  | 387 | pr_info("Kernel virtual memory layout:\n"); | 
|  | 388 | pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP); | 
|  | 389 | #ifdef CONFIG_HIGHMEM | 
|  | 390 | pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n", | 
|  | 391 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); | 
|  | 392 | #endif /* CONFIG_HIGHMEM */ | 
|  | 393 | #ifdef CONFIG_NOT_COHERENT_CACHE | 
|  | 394 | pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n", | 
|  | 395 | IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); | 
|  | 396 | #endif /* CONFIG_NOT_COHERENT_CACHE */ | 
|  | 397 | pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n", | 
|  | 398 | ioremap_bot, IOREMAP_TOP); | 
|  | 399 | pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n", | 
|  | 400 | VMALLOC_START, VMALLOC_END); | 
|  | 401 | #endif /* CONFIG_PPC32 */ | 
|  | 402 | } | 
|  | 403 |  | 
|  | 404 | void free_initmem(void) | 
|  | 405 | { | 
|  | 406 | ppc_md.progress = ppc_printk_progress; | 
|  | 407 | mark_initmem_nx(); | 
|  | 408 | init_mem_is_free = true; | 
|  | 409 | free_initmem_default(POISON_FREE_INITMEM); | 
|  | 410 | } | 
|  | 411 |  | 
|  | 412 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 413 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 
|  | 414 | { | 
|  | 415 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); | 
|  | 416 | } | 
|  | 417 | #endif | 
|  | 418 |  | 
|  | 419 | /* | 
|  | 420 | * This is called when a page has been modified by the kernel. | 
|  | 421 | * It just marks the page as not i-cache clean.  We do the i-cache | 
|  | 422 | * flush later when the page is given to a user process, if necessary. | 
|  | 423 | */ | 
|  | 424 | void flush_dcache_page(struct page *page) | 
|  | 425 | { | 
|  | 426 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | 
|  | 427 | return; | 
|  | 428 | /* avoid an atomic op if possible */ | 
|  | 429 | if (test_bit(PG_arch_1, &page->flags)) | 
|  | 430 | clear_bit(PG_arch_1, &page->flags); | 
|  | 431 | } | 
|  | 432 | EXPORT_SYMBOL(flush_dcache_page); | 
|  | 433 |  | 
|  | 434 | void flush_dcache_icache_page(struct page *page) | 
|  | 435 | { | 
|  | 436 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 437 | if (PageCompound(page)) { | 
|  | 438 | flush_dcache_icache_hugepage(page); | 
|  | 439 | return; | 
|  | 440 | } | 
|  | 441 | #endif | 
|  | 442 | #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64) | 
|  | 443 | /* On 8xx there is no need to kmap since highmem is not supported */ | 
|  | 444 | __flush_dcache_icache(page_address(page)); | 
|  | 445 | #else | 
|  | 446 | if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { | 
|  | 447 | void *start = kmap_atomic(page); | 
|  | 448 | __flush_dcache_icache(start); | 
|  | 449 | kunmap_atomic(start); | 
|  | 450 | } else { | 
|  | 451 | __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); | 
|  | 452 | } | 
|  | 453 | #endif | 
|  | 454 | } | 
|  | 455 | EXPORT_SYMBOL(flush_dcache_icache_page); | 
|  | 456 |  | 
|  | 457 | void clear_user_page(void *page, unsigned long vaddr, struct page *pg) | 
|  | 458 | { | 
|  | 459 | clear_page(page); | 
|  | 460 |  | 
|  | 461 | /* | 
|  | 462 | * We shouldn't have to do this, but some versions of glibc | 
|  | 463 | * require it (ld.so assumes zero filled pages are icache clean) | 
|  | 464 | * - Anton | 
|  | 465 | */ | 
|  | 466 | flush_dcache_page(pg); | 
|  | 467 | } | 
|  | 468 | EXPORT_SYMBOL(clear_user_page); | 
|  | 469 |  | 
|  | 470 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | 
|  | 471 | struct page *pg) | 
|  | 472 | { | 
|  | 473 | copy_page(vto, vfrom); | 
|  | 474 |  | 
|  | 475 | /* | 
|  | 476 | * We should be able to use the following optimisation, however | 
|  | 477 | * there are two problems. | 
|  | 478 | * Firstly a bug in some versions of binutils meant PLT sections | 
|  | 479 | * were not marked executable. | 
|  | 480 | * Secondly the first word in the GOT section is blrl, used | 
|  | 481 | * to establish the GOT address. Until recently the GOT was | 
|  | 482 | * not marked executable. | 
|  | 483 | * - Anton | 
|  | 484 | */ | 
|  | 485 | #if 0 | 
|  | 486 | if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) | 
|  | 487 | return; | 
|  | 488 | #endif | 
|  | 489 |  | 
|  | 490 | flush_dcache_page(pg); | 
|  | 491 | } | 
|  | 492 |  | 
|  | 493 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | 
|  | 494 | unsigned long addr, int len) | 
|  | 495 | { | 
|  | 496 | unsigned long maddr; | 
|  | 497 |  | 
|  | 498 | maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); | 
|  | 499 | flush_icache_range(maddr, maddr + len); | 
|  | 500 | kunmap(page); | 
|  | 501 | } | 
|  | 502 | EXPORT_SYMBOL(flush_icache_user_range); | 
|  | 503 |  | 
|  | 504 | /* | 
|  | 505 | * This is called at the end of handling a user page fault, when the | 
|  | 506 | * fault has been handled by updating a PTE in the linux page tables. | 
|  | 507 | * We use it to preload an HPTE into the hash table corresponding to | 
|  | 508 | * the updated linux PTE. | 
|  | 509 | * | 
|  | 510 | * This must always be called with the pte lock held. | 
|  | 511 | */ | 
|  | 512 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | 
|  | 513 | pte_t *ptep) | 
|  | 514 | { | 
|  | 515 | #ifdef CONFIG_PPC_STD_MMU | 
|  | 516 | /* | 
|  | 517 | * We don't need to worry about _PAGE_PRESENT here because we are | 
|  | 518 | * called with either mm->page_table_lock held or ptl lock held | 
|  | 519 | */ | 
|  | 520 | unsigned long access, trap; | 
|  | 521 |  | 
|  | 522 | if (radix_enabled()) { | 
|  | 523 | prefetch((void *)address); | 
|  | 524 | return; | 
|  | 525 | } | 
|  | 526 |  | 
|  | 527 | /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ | 
|  | 528 | if (!pte_young(*ptep) || address >= TASK_SIZE) | 
|  | 529 | return; | 
|  | 530 |  | 
|  | 531 | /* We try to figure out if we are coming from an instruction | 
|  | 532 | * access fault and pass that down to __hash_page so we avoid | 
|  | 533 | * double-faulting on execution of fresh text. We have to test | 
|  | 534 | * for regs NULL since init will get here first thing at boot | 
|  | 535 | * | 
|  | 536 | * We also avoid filling the hash if not coming from a fault | 
|  | 537 | */ | 
|  | 538 |  | 
|  | 539 | trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL; | 
|  | 540 | switch (trap) { | 
|  | 541 | case 0x300: | 
|  | 542 | access = 0UL; | 
|  | 543 | break; | 
|  | 544 | case 0x400: | 
|  | 545 | access = _PAGE_EXEC; | 
|  | 546 | break; | 
|  | 547 | default: | 
|  | 548 | return; | 
|  | 549 | } | 
|  | 550 |  | 
|  | 551 | hash_preload(vma->vm_mm, address, access, trap); | 
|  | 552 | #endif /* CONFIG_PPC_STD_MMU */ | 
|  | 553 | #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ | 
|  | 554 | && defined(CONFIG_HUGETLB_PAGE) | 
|  | 555 | if (is_vm_hugetlb_page(vma)) | 
|  | 556 | book3e_hugetlb_preload(vma, address, *ptep); | 
|  | 557 | #endif | 
|  | 558 | } | 
|  | 559 |  | 
|  | 560 | /* | 
|  | 561 | * System memory should not be in /proc/iomem but various tools expect it | 
|  | 562 | * (eg kdump). | 
|  | 563 | */ | 
|  | 564 | static int __init add_system_ram_resources(void) | 
|  | 565 | { | 
|  | 566 | struct memblock_region *reg; | 
|  | 567 |  | 
|  | 568 | for_each_memblock(memory, reg) { | 
|  | 569 | struct resource *res; | 
|  | 570 | unsigned long base = reg->base; | 
|  | 571 | unsigned long size = reg->size; | 
|  | 572 |  | 
|  | 573 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | 
|  | 574 | WARN_ON(!res); | 
|  | 575 |  | 
|  | 576 | if (res) { | 
|  | 577 | res->name = "System RAM"; | 
|  | 578 | res->start = base; | 
|  | 579 | res->end = base + size - 1; | 
|  | 580 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; | 
|  | 581 | WARN_ON(request_resource(&iomem_resource, res) < 0); | 
|  | 582 | } | 
|  | 583 | } | 
|  | 584 |  | 
|  | 585 | return 0; | 
|  | 586 | } | 
|  | 587 | subsys_initcall(add_system_ram_resources); | 
|  | 588 |  | 
|  | 589 | #ifdef CONFIG_STRICT_DEVMEM | 
|  | 590 | /* | 
|  | 591 | * devmem_is_allowed(): check to see if /dev/mem access to a certain address | 
|  | 592 | * is valid. The argument is a physical page number. | 
|  | 593 | * | 
|  | 594 | * Access has to be given to non-kernel-ram areas as well, these contain the | 
|  | 595 | * PCI mmio resources as well as potential bios/acpi data regions. | 
|  | 596 | */ | 
|  | 597 | int devmem_is_allowed(unsigned long pfn) | 
|  | 598 | { | 
|  | 599 | if (page_is_rtas_user_buf(pfn)) | 
|  | 600 | return 1; | 
|  | 601 | if (iomem_is_exclusive(PFN_PHYS(pfn))) | 
|  | 602 | return 0; | 
|  | 603 | if (!page_is_ram(pfn)) | 
|  | 604 | return 1; | 
|  | 605 | return 0; | 
|  | 606 | } | 
|  | 607 | #endif /* CONFIG_STRICT_DEVMEM */ |