blob: fd26b5c92b44c67c46bc44aab2087d4f9e113fc7 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * linux/arch/arm/mm/init.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
16#include <linux/sched/signal.h>
17#include <linux/sched/task.h>
18#include <linux/export.h>
19#include <linux/nodemask.h>
20#include <linux/initrd.h>
21#include <linux/of_fdt.h>
22#include <linux/highmem.h>
23#include <linux/gfp.h>
24#include <linux/memblock.h>
25#include <linux/dma-contiguous.h>
26#include <linux/sizes.h>
27#include <linux/stop_machine.h>
28
29#include <asm/cp15.h>
30#include <asm/mach-types.h>
31#include <asm/memblock.h>
32#include <asm/memory.h>
33#include <asm/prom.h>
34#include <asm/sections.h>
35#include <asm/setup.h>
36#include <asm/system_info.h>
37#include <asm/tlb.h>
38#include <asm/fixmap.h>
39
40#include <asm/mach/arch.h>
41#include <asm/mach/map.h>
42
43#include "mm.h"
44
45#ifdef CONFIG_CPU_CP15_MMU
46unsigned long __init __clear_cr(unsigned long mask)
47{
48 cr_alignment = cr_alignment & ~mask;
49 return cr_alignment;
50}
51#endif
52
53static phys_addr_t phys_initrd_start __initdata = 0;
54static unsigned long phys_initrd_size __initdata = 0;
55
56static int __init early_initrd(char *p)
57{
58 phys_addr_t start;
59 unsigned long size;
60 char *endp;
61
62 start = memparse(p, &endp);
63 if (*endp == ',') {
64 size = memparse(endp + 1, NULL);
65
66 phys_initrd_start = start;
67 phys_initrd_size = size;
68 }
69 return 0;
70}
71early_param("initrd", early_initrd);
72
73static int __init parse_tag_initrd(const struct tag *tag)
74{
75 pr_warn("ATAG_INITRD is deprecated; "
76 "please update your bootloader.\n");
77 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
78 phys_initrd_size = tag->u.initrd.size;
79 return 0;
80}
81
82__tagtable(ATAG_INITRD, parse_tag_initrd);
83
84static int __init parse_tag_initrd2(const struct tag *tag)
85{
86 phys_initrd_start = tag->u.initrd.start;
87 phys_initrd_size = tag->u.initrd.size;
88 return 0;
89}
90
91__tagtable(ATAG_INITRD2, parse_tag_initrd2);
92
93static void __init find_limits(unsigned long *min, unsigned long *max_low,
94 unsigned long *max_high)
95{
96 *max_low = PFN_DOWN(memblock_get_current_limit());
97 *min = PFN_UP(memblock_start_of_DRAM());
98 *max_high = PFN_DOWN(memblock_end_of_DRAM());
99}
100
101#ifdef CONFIG_ZONE_DMA
102
103phys_addr_t arm_dma_zone_size __read_mostly;
104EXPORT_SYMBOL(arm_dma_zone_size);
105
106/*
107 * The DMA mask corresponding to the maximum bus address allocatable
108 * using GFP_DMA. The default here places no restriction on DMA
109 * allocations. This must be the smallest DMA mask in the system,
110 * so a successful GFP_DMA allocation will always satisfy this.
111 */
112phys_addr_t arm_dma_limit;
113unsigned long arm_dma_pfn_limit;
114
115static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
116 unsigned long dma_size)
117{
118 if (size[0] <= dma_size)
119 return;
120
121 size[ZONE_NORMAL] = size[0] - dma_size;
122 size[ZONE_DMA] = dma_size;
123 hole[ZONE_NORMAL] = hole[0];
124 hole[ZONE_DMA] = 0;
125}
126#endif
127
128void __init setup_dma_zone(const struct machine_desc *mdesc)
129{
130#ifdef CONFIG_ZONE_DMA
131 if (mdesc->dma_zone_size) {
132 arm_dma_zone_size = mdesc->dma_zone_size;
133 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
134 } else
135 arm_dma_limit = 0xffffffff;
136 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
137#endif
138}
139
140static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
141 unsigned long max_high)
142{
143 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
144 struct memblock_region *reg;
145
146 /*
147 * initialise the zones.
148 */
149 memset(zone_size, 0, sizeof(zone_size));
150
151 /*
152 * The memory size has already been determined. If we need
153 * to do anything fancy with the allocation of this memory
154 * to the zones, now is the time to do it.
155 */
156 zone_size[0] = max_low - min;
157#ifdef CONFIG_HIGHMEM
158 zone_size[ZONE_HIGHMEM] = max_high - max_low;
159#endif
160
161 /*
162 * Calculate the size of the holes.
163 * holes = node_size - sum(bank_sizes)
164 */
165 memcpy(zhole_size, zone_size, sizeof(zhole_size));
166 for_each_memblock(memory, reg) {
167 unsigned long start = memblock_region_memory_base_pfn(reg);
168 unsigned long end = memblock_region_memory_end_pfn(reg);
169
170 if (start < max_low) {
171 unsigned long low_end = min(end, max_low);
172 zhole_size[0] -= low_end - start;
173 }
174#ifdef CONFIG_HIGHMEM
175 if (end > max_low) {
176 unsigned long high_start = max(start, max_low);
177 zhole_size[ZONE_HIGHMEM] -= end - high_start;
178 }
179#endif
180 }
181
182#ifdef CONFIG_ZONE_DMA
183 /*
184 * Adjust the sizes according to any special requirements for
185 * this machine type.
186 */
187 if (arm_dma_zone_size)
188 arm_adjust_dma_zone(zone_size, zhole_size,
189 arm_dma_zone_size >> PAGE_SHIFT);
190#endif
191
192 free_area_init_node(0, zone_size, min, zhole_size);
193}
194
195#ifdef CONFIG_HAVE_ARCH_PFN_VALID
196int pfn_valid(unsigned long pfn)
197{
198 phys_addr_t addr = __pfn_to_phys(pfn);
199
200 if (__phys_to_pfn(addr) != pfn)
201 return 0;
202
203 return memblock_is_map_memory(__pfn_to_phys(pfn));
204}
205EXPORT_SYMBOL(pfn_valid);
206#endif
207
208#ifndef CONFIG_SPARSEMEM
209static void __init arm_memory_present(void)
210{
211}
212#else
213static void __init arm_memory_present(void)
214{
215 struct memblock_region *reg;
216
217 for_each_memblock(memory, reg)
218 memory_present(0, memblock_region_memory_base_pfn(reg),
219 memblock_region_memory_end_pfn(reg));
220}
221#endif
222
223static bool arm_memblock_steal_permitted = true;
224
225phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
226{
227 phys_addr_t phys;
228
229 BUG_ON(!arm_memblock_steal_permitted);
230
231 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
232 memblock_free(phys, size);
233 memblock_remove(phys, size);
234
235 return phys;
236}
237
238static void __init arm_initrd_init(void)
239{
240#ifdef CONFIG_BLK_DEV_INITRD
241 phys_addr_t start;
242 unsigned long size;
243
244 /* FDT scan will populate initrd_start */
245 if (initrd_start && !phys_initrd_size) {
246 phys_initrd_start = __virt_to_phys(initrd_start);
247 phys_initrd_size = initrd_end - initrd_start;
248 }
249
250 initrd_start = initrd_end = 0;
251
252 if (!phys_initrd_size)
253 return;
254
255 /*
256 * Round the memory region to page boundaries as per free_initrd_mem()
257 * This allows us to detect whether the pages overlapping the initrd
258 * are in use, but more importantly, reserves the entire set of pages
259 * as we don't want these pages allocated for other purposes.
260 */
261 start = round_down(phys_initrd_start, PAGE_SIZE);
262 size = phys_initrd_size + (phys_initrd_start - start);
263 size = round_up(size, PAGE_SIZE);
264
265 if (!memblock_is_region_memory(start, size)) {
266 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
267 (u64)start, size);
268 return;
269 }
270
271 if (memblock_is_region_reserved(start, size)) {
272 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
273 (u64)start, size);
274 return;
275 }
276
277 memblock_reserve(start, size);
278
279 /* Now convert initrd to virtual addresses */
280 initrd_start = __phys_to_virt(phys_initrd_start);
281 initrd_end = initrd_start + phys_initrd_size;
282#endif
283}
284
285void __init arm_memblock_init(const struct machine_desc *mdesc)
286{
287 /* Register the kernel text, kernel data and initrd with memblock. */
288 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
289
290 arm_initrd_init();
291
292 arm_mm_memblock_reserve();
293
294 /* reserve any platform specific memblock areas */
295 if (mdesc->reserve)
296 mdesc->reserve();
297
298 early_init_fdt_reserve_self();
299 early_init_fdt_scan_reserved_mem();
300
301 /* reserve memory for DMA contiguous allocations */
302 dma_contiguous_reserve(arm_dma_limit);
303
304 arm_memblock_steal_permitted = false;
305 memblock_dump_all();
306}
307
308void __init bootmem_init(void)
309{
310 unsigned long min, max_low, max_high;
311
312 memblock_allow_resize();
313 max_low = max_high = 0;
314
315 find_limits(&min, &max_low, &max_high);
316
317 early_memtest((phys_addr_t)min << PAGE_SHIFT,
318 (phys_addr_t)max_low << PAGE_SHIFT);
319
320 /*
321 * Sparsemem tries to allocate bootmem in memory_present(),
322 * so must be done after the fixed reservations
323 */
324 arm_memory_present();
325
326 /*
327 * sparse_init() needs the bootmem allocator up and running.
328 */
329 sparse_init();
330
331 /*
332 * Now free the memory - free_area_init_node needs
333 * the sparse mem_map arrays initialized by sparse_init()
334 * for memmap_init_zone(), otherwise all PFNs are invalid.
335 */
336 zone_sizes_init(min, max_low, max_high);
337
338 /*
339 * This doesn't seem to be used by the Linux memory manager any
340 * more, but is used by ll_rw_block. If we can get rid of it, we
341 * also get rid of some of the stuff above as well.
342 */
343 min_low_pfn = min;
344 max_low_pfn = max_low;
345 max_pfn = max_high;
346}
347
348/*
349 * Poison init memory with an undefined instruction (ARM) or a branch to an
350 * undefined instruction (Thumb).
351 */
352static inline void poison_init_mem(void *s, size_t count)
353{
354 u32 *p = (u32 *)s;
355 for (; count != 0; count -= 4)
356 *p++ = 0xe7fddef0;
357}
358
359static inline void __init
360free_memmap(unsigned long start_pfn, unsigned long end_pfn)
361{
362 struct page *start_pg, *end_pg;
363 phys_addr_t pg, pgend;
364
365 /*
366 * Convert start_pfn/end_pfn to a struct page pointer.
367 */
368 start_pg = pfn_to_page(start_pfn - 1) + 1;
369 end_pg = pfn_to_page(end_pfn - 1) + 1;
370
371 /*
372 * Convert to physical addresses, and
373 * round start upwards and end downwards.
374 */
375 pg = PAGE_ALIGN(__pa(start_pg));
376 pgend = __pa(end_pg) & PAGE_MASK;
377
378 /*
379 * If there are free pages between these,
380 * free the section of the memmap array.
381 */
382 if (pg < pgend)
383 memblock_free_early(pg, pgend - pg);
384}
385
386/*
387 * The mem_map array can get very big. Free the unused area of the memory map.
388 */
389static void __init free_unused_memmap(void)
390{
391 unsigned long start, prev_end = 0;
392 struct memblock_region *reg;
393
394 /*
395 * This relies on each bank being in address order.
396 * The banks are sorted previously in bootmem_init().
397 */
398 for_each_memblock(memory, reg) {
399 start = memblock_region_memory_base_pfn(reg);
400
401#ifdef CONFIG_SPARSEMEM
402 /*
403 * Take care not to free memmap entries that don't exist
404 * due to SPARSEMEM sections which aren't present.
405 */
406 start = min(start,
407 ALIGN(prev_end, PAGES_PER_SECTION));
408#else
409 /*
410 * Align down here since the VM subsystem insists that the
411 * memmap entries are valid from the bank start aligned to
412 * MAX_ORDER_NR_PAGES.
413 */
414 start = round_down(start, MAX_ORDER_NR_PAGES);
415#endif
416 /*
417 * If we had a previous bank, and there is a space
418 * between the current bank and the previous, free it.
419 */
420 if (prev_end && prev_end < start)
421 free_memmap(prev_end, start);
422
423 /*
424 * Align up here since the VM subsystem insists that the
425 * memmap entries are valid from the bank end aligned to
426 * MAX_ORDER_NR_PAGES.
427 */
428 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
429 MAX_ORDER_NR_PAGES);
430 }
431
432#ifdef CONFIG_SPARSEMEM
433 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
434 free_memmap(prev_end,
435 ALIGN(prev_end, PAGES_PER_SECTION));
436#endif
437}
438
439#ifdef CONFIG_HIGHMEM
440static inline void free_area_high(unsigned long pfn, unsigned long end)
441{
442 for (; pfn < end; pfn++)
443 free_highmem_page(pfn_to_page(pfn));
444}
445#endif
446
447static void __init free_highpages(void)
448{
449#ifdef CONFIG_HIGHMEM
450 unsigned long max_low = max_low_pfn;
451 struct memblock_region *mem, *res;
452
453 /* set highmem page free */
454 for_each_memblock(memory, mem) {
455 unsigned long start = memblock_region_memory_base_pfn(mem);
456 unsigned long end = memblock_region_memory_end_pfn(mem);
457
458 /* Ignore complete lowmem entries */
459 if (end <= max_low)
460 continue;
461
462 if (memblock_is_nomap(mem))
463 continue;
464
465 /* Truncate partial highmem entries */
466 if (start < max_low)
467 start = max_low;
468
469 /* Find and exclude any reserved regions */
470 for_each_memblock(reserved, res) {
471 unsigned long res_start, res_end;
472
473 res_start = memblock_region_reserved_base_pfn(res);
474 res_end = memblock_region_reserved_end_pfn(res);
475
476 if (res_end < start)
477 continue;
478 if (res_start < start)
479 res_start = start;
480 if (res_start > end)
481 res_start = end;
482 if (res_end > end)
483 res_end = end;
484 if (res_start != start)
485 free_area_high(start, res_start);
486 start = res_end;
487 if (start == end)
488 break;
489 }
490
491 /* And now free anything which remains */
492 if (start < end)
493 free_area_high(start, end);
494 }
495#endif
496}
497
498/*
499 * mem_init() marks the free areas in the mem_map and tells us how much
500 * memory is free. This is done after various parts of the system have
501 * claimed their memory after the kernel image.
502 */
503void __init mem_init(void)
504{
505#ifdef CONFIG_HAVE_TCM
506 /* These pointers are filled in on TCM detection */
507 extern u32 dtcm_end;
508 extern u32 itcm_end;
509#endif
510
511 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
512
513 /* this will put all unused low memory onto the freelists */
514 free_unused_memmap();
515 free_all_bootmem();
516
517#ifdef CONFIG_SA1111
518 /* now that our DMA memory is actually so designated, we can free it */
519 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
520#endif
521
522 free_highpages();
523
524 mem_init_print_info(NULL);
525
526#define MLK(b, t) b, t, ((t) - (b)) >> 10
527#define MLM(b, t) b, t, ((t) - (b)) >> 20
528#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
529
530 pr_notice("Virtual kernel memory layout:\n"
531 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
532#ifdef CONFIG_HAVE_TCM
533 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
534 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
535#endif
536 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
537 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
538 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
539#ifdef CONFIG_HIGHMEM
540 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
541#endif
542#ifdef CONFIG_MODULES
543 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
544#endif
545 " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
546 " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
547 " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
548 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
549
550 MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
551#ifdef CONFIG_HAVE_TCM
552 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
553 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
554#endif
555 MLK(FIXADDR_START, FIXADDR_END),
556 MLM(VMALLOC_START, VMALLOC_END),
557 MLM(PAGE_OFFSET, (unsigned long)high_memory),
558#ifdef CONFIG_HIGHMEM
559 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
560 (PAGE_SIZE)),
561#endif
562#ifdef CONFIG_MODULES
563 MLM(MODULES_VADDR, MODULES_END),
564#endif
565
566 MLK_ROUNDUP(_text, _etext),
567 MLK_ROUNDUP(__init_begin, __init_end),
568 MLK_ROUNDUP(_sdata, _edata),
569 MLK_ROUNDUP(__bss_start, __bss_stop));
570
571#undef MLK
572#undef MLM
573#undef MLK_ROUNDUP
574
575 /*
576 * Check boundaries twice: Some fundamental inconsistencies can
577 * be detected at build time already.
578 */
579#ifdef CONFIG_MMU
580 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
581 BUG_ON(TASK_SIZE > MODULES_VADDR);
582#endif
583
584#ifdef CONFIG_HIGHMEM
585 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
586 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
587#endif
588
589 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
590 extern int sysctl_overcommit_memory;
591 /*
592 * On a machine this small we won't get
593 * anywhere without overcommit, so turn
594 * it on by default.
595 */
596 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
597 }
598}
599
600#ifdef CONFIG_STRICT_KERNEL_RWX
601struct section_perm {
602 const char *name;
603 unsigned long start;
604 unsigned long end;
605 pmdval_t mask;
606 pmdval_t prot;
607 pmdval_t clear;
608};
609
610/* First section-aligned location at or after __start_rodata. */
611extern char __start_rodata_section_aligned[];
612
613static struct section_perm nx_perms[] = {
614 /* Make pages tables, etc before _stext RW (set NX). */
615 {
616 .name = "pre-text NX",
617 .start = PAGE_OFFSET,
618 .end = (unsigned long)_stext,
619 .mask = ~PMD_SECT_XN,
620 .prot = PMD_SECT_XN,
621 },
622 /* Make init RW (set NX). */
623 {
624 .name = "init NX",
625 .start = (unsigned long)__init_begin,
626 .end = (unsigned long)_sdata,
627 .mask = ~PMD_SECT_XN,
628 .prot = PMD_SECT_XN,
629 },
630 /* Make rodata NX (set RO in ro_perms below). */
631 {
632 .name = "rodata NX",
633 .start = (unsigned long)__start_rodata_section_aligned,
634 .end = (unsigned long)__init_begin,
635 .mask = ~PMD_SECT_XN,
636 .prot = PMD_SECT_XN,
637 },
638};
639
640static struct section_perm ro_perms[] = {
641 /* Make kernel code and rodata RX (set RO). */
642 {
643 .name = "text/rodata RO",
644 .start = (unsigned long)_stext,
645 .end = (unsigned long)__init_begin,
646#ifdef CONFIG_ARM_LPAE
647 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
648 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
649#else
650 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
651 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
652 .clear = PMD_SECT_AP_WRITE,
653#endif
654 },
655};
656
657/*
658 * Updates section permissions only for the current mm (sections are
659 * copied into each mm). During startup, this is the init_mm. Is only
660 * safe to be called with preemption disabled, as under stop_machine().
661 */
662static inline void section_update(unsigned long addr, pmdval_t mask,
663 pmdval_t prot, struct mm_struct *mm)
664{
665 pmd_t *pmd;
666
667 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
668
669#ifdef CONFIG_ARM_LPAE
670 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
671#else
672 if (addr & SECTION_SIZE)
673 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
674 else
675 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
676#endif
677 flush_pmd_entry(pmd);
678 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
679}
680
681/* Make sure extended page tables are in use. */
682static inline bool arch_has_strict_perms(void)
683{
684 if (cpu_architecture() < CPU_ARCH_ARMv6)
685 return false;
686
687 return !!(get_cr() & CR_XP);
688}
689
690void set_section_perms(struct section_perm *perms, int n, bool set,
691 struct mm_struct *mm)
692{
693 size_t i;
694 unsigned long addr;
695
696 if (!arch_has_strict_perms())
697 return;
698
699 for (i = 0; i < n; i++) {
700 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
701 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
702 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
703 perms[i].name, perms[i].start, perms[i].end,
704 SECTION_SIZE);
705 continue;
706 }
707
708 for (addr = perms[i].start;
709 addr < perms[i].end;
710 addr += SECTION_SIZE)
711 section_update(addr, perms[i].mask,
712 set ? perms[i].prot : perms[i].clear, mm);
713 }
714
715}
716
717/**
718 * update_sections_early intended to be called only through stop_machine
719 * framework and executed by only one CPU while all other CPUs will spin and
720 * wait, so no locking is required in this function.
721 */
722static void update_sections_early(struct section_perm perms[], int n)
723{
724 struct task_struct *t, *s;
725
726 for_each_process(t) {
727 if (t->flags & PF_KTHREAD)
728 continue;
729 for_each_thread(t, s)
730 if (s->mm)
731 set_section_perms(perms, n, true, s->mm);
732 }
733 set_section_perms(perms, n, true, current->active_mm);
734 set_section_perms(perms, n, true, &init_mm);
735}
736
737static int __fix_kernmem_perms(void *unused)
738{
739 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
740 return 0;
741}
742
743static void fix_kernmem_perms(void)
744{
745 stop_machine(__fix_kernmem_perms, NULL, NULL);
746}
747
748static int __mark_rodata_ro(void *unused)
749{
750 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
751 return 0;
752}
753
754static int kernel_set_to_readonly __read_mostly;
755
756void mark_rodata_ro(void)
757{
758 kernel_set_to_readonly = 1;
759 stop_machine(__mark_rodata_ro, NULL, NULL);
760}
761
762void set_kernel_text_rw(void)
763{
764 if (!kernel_set_to_readonly)
765 return;
766
767 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
768 current->active_mm);
769}
770
771void set_kernel_text_ro(void)
772{
773 if (!kernel_set_to_readonly)
774 return;
775
776 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
777 current->active_mm);
778}
779
780#else
781static inline void fix_kernmem_perms(void) { }
782#endif /* CONFIG_STRICT_KERNEL_RWX */
783
784void free_tcmmem(void)
785{
786#ifdef CONFIG_HAVE_TCM
787 extern char __tcm_start, __tcm_end;
788
789 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
790 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
791#endif
792}
793
794void free_initmem(void)
795{
796 fix_kernmem_perms();
797 free_tcmmem();
798
799 poison_init_mem(__init_begin, __init_end - __init_begin);
800 if (!machine_is_integrator() && !machine_is_cintegrator())
801 free_initmem_default(-1);
802}
803
804#ifdef CONFIG_BLK_DEV_INITRD
805
806static int keep_initrd;
807
808void free_initrd_mem(unsigned long start, unsigned long end)
809{
810 if (!keep_initrd) {
811 if (start == initrd_start)
812 start = round_down(start, PAGE_SIZE);
813 if (end == initrd_end)
814 end = round_up(end, PAGE_SIZE);
815
816 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
817 free_reserved_area((void *)start, (void *)end, -1, "initrd");
818 }
819}
820
821static int __init keepinitrd_setup(char *__unused)
822{
823 keep_initrd = 1;
824 return 1;
825}
826
827__setup("keepinitrd", keepinitrd_setup);
828#endif