blob: 9f537d6bb1d392fabe4352eadeb3ca51b8e13fd2 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * linux/arch/arm/mm/init.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
16#include <linux/export.h>
17#include <linux/nodemask.h>
18#include <linux/initrd.h>
19#include <linux/of_fdt.h>
20#include <linux/highmem.h>
21#include <linux/gfp.h>
22#include <linux/memblock.h>
23
24#include <asm/mach-types.h>
25#include <asm/memblock.h>
26#include <asm/prom.h>
27#include <asm/sections.h>
28#include <asm/setup.h>
29#include <asm/sizes.h>
30#include <asm/tlb.h>
31#include <asm/fixmap.h>
32
33#include <asm/mach/arch.h>
34#include <asm/mach/map.h>
35
36#include "mm.h"
37
38static unsigned long phys_initrd_start __initdata = 0;
39static unsigned long phys_initrd_size __initdata = 0;
40
41static int __init early_initrd(char *p)
42{
43 unsigned long start, size;
44 char *endp;
45
46 start = memparse(p, &endp);
47 if (*endp == ',') {
48 size = memparse(endp + 1, NULL);
49
50 phys_initrd_start = start;
51 phys_initrd_size = size;
52 }
53 return 0;
54}
55early_param("initrd", early_initrd);
56
57static int __init parse_tag_initrd(const struct tag *tag)
58{
59 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
60 "please update your bootloader.\n");
61 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
62 phys_initrd_size = tag->u.initrd.size;
63 return 0;
64}
65
66__tagtable(ATAG_INITRD, parse_tag_initrd);
67
68static int __init parse_tag_initrd2(const struct tag *tag)
69{
70 phys_initrd_start = tag->u.initrd.start;
71 phys_initrd_size = tag->u.initrd.size;
72 return 0;
73}
74
75__tagtable(ATAG_INITRD2, parse_tag_initrd2);
76
77#ifdef CONFIG_OF_FLATTREE
78void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
79{
80 phys_initrd_start = start;
81 phys_initrd_size = end - start;
82}
83#endif /* CONFIG_OF_FLATTREE */
84
85/*
86 * This keeps memory configuration data used by a couple memory
87 * initialization functions, as well as show_mem() for the skipping
88 * of holes in the memory map. It is populated by arm_add_memory().
89 */
90struct meminfo meminfo;
91
92void show_mem2(unsigned int filter)
93{
94 int free = 0, total = 0, reserved = 0;
95 int shared = 0, cached = 0, slab = 0, i;
96 struct meminfo * mi = &meminfo;
97
98 printk("Mem-info:\n");
99 show_free_areas(filter);
100
101 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
102 return;
103
104 for_each_bank (i, mi) {
105 struct membank *bank = &mi->bank[i];
106 unsigned int pfn1, pfn2;
107 struct page *page, *end;
108
109 pfn1 = bank_pfn_start(bank);
110 pfn2 = bank_pfn_end(bank);
111
112 page = pfn_to_page(pfn1);
113 end = pfn_to_page(pfn2 - 1) + 1;
114
115 do {
116 total++;
117 if (PageReserved(page))
118 reserved++;
119 else if (PageSwapCache(page))
120 cached++;
121 else if (PageSlab(page))
122 slab++;
123 else if (!page_count(page))
124 free++;
125 else
126 shared += page_count(page) - 1;
127 page++;
128 } while (page < end);
129 }
130
131 printk("%d pages of RAM\n", total);
132 printk("%d free pages\n", free);
133 printk("%d reserved pages\n", reserved);
134 printk("%d slab pages\n", slab);
135 printk("%d pages shared\n", shared);
136 printk("%d pages swap cached\n", cached);
137}
138
139static void __init find_limits(unsigned long *min, unsigned long *max_low,
140 unsigned long *max_high)
141{
142 struct meminfo *mi = &meminfo;
143 int i;
144
145 /* This assumes the meminfo array is properly sorted */
146 *min = bank_pfn_start(&mi->bank[0]);
147 for_each_bank (i, mi)
148 if (mi->bank[i].highmem)
149 break;
150 *max_low = bank_pfn_end(&mi->bank[i - 1]);
151 *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
152}
153
154static void __init arm_bootmem_init(unsigned long start_pfn,
155 unsigned long end_pfn)
156{
157 struct memblock_region *reg;
158 unsigned int boot_pages;
159 phys_addr_t bitmap;
160 pg_data_t *pgdat;
161
162 /*
163 * Allocate the bootmem bitmap page. This must be in a region
164 * of memory which has already been mapped.
165 */
166 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
167 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
168 __pfn_to_phys(end_pfn));
169
170 /*
171 * Initialise the bootmem allocator, handing the
172 * memory banks over to bootmem.
173 */
174 node_set_online(0);
175 pgdat = NODE_DATA(0);
176 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
177
178 /* Free the lowmem regions from memblock into bootmem. */
179 for_each_memblock(memory, reg) {
180 unsigned long start = memblock_region_memory_base_pfn(reg);
181 unsigned long end = memblock_region_memory_end_pfn(reg);
182
183 if (end >= end_pfn)
184 end = end_pfn;
185 if (start >= end)
186 break;
187
188 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
189 }
190
191 /* Reserve the lowmem memblock reserved regions in bootmem. */
192 for_each_memblock(reserved, reg) {
193 unsigned long start = memblock_region_reserved_base_pfn(reg);
194 unsigned long end = memblock_region_reserved_end_pfn(reg);
195
196 if (end >= end_pfn)
197 end = end_pfn;
198 if (start >= end)
199 break;
200
201 reserve_bootmem(__pfn_to_phys(start),
202 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
203 }
204}
205
206#ifdef CONFIG_ZONE_DMA
207
208unsigned long arm_dma_zone_size __read_mostly;
209EXPORT_SYMBOL(arm_dma_zone_size);
210
211/*
212 * The DMA mask corresponding to the maximum bus address allocatable
213 * using GFP_DMA. The default here places no restriction on DMA
214 * allocations. This must be the smallest DMA mask in the system,
215 * so a successful GFP_DMA allocation will always satisfy this.
216 */
217u32 arm_dma_limit;
218
219static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
220 unsigned long dma_size)
221{
222 if (size[0] <= dma_size)
223 return;
224
225 size[ZONE_NORMAL] = size[0] - dma_size;
226 size[ZONE_DMA] = dma_size;
227 hole[ZONE_NORMAL] = hole[0];
228 hole[ZONE_DMA] = 0;
229}
230#endif
231
232static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
233 unsigned long max_high)
234{
235 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
236 struct memblock_region *reg;
237
238 /*
239 * initialise the zones.
240 */
241 memset(zone_size, 0, sizeof(zone_size));
242
243 /*
244 * The memory size has already been determined. If we need
245 * to do anything fancy with the allocation of this memory
246 * to the zones, now is the time to do it.
247 */
248 zone_size[0] = max_low - min;
249#ifdef CONFIG_HIGHMEM
250 zone_size[ZONE_HIGHMEM] = max_high - max_low;
251#endif
252
253 /*
254 * Calculate the size of the holes.
255 * holes = node_size - sum(bank_sizes)
256 */
257 memcpy(zhole_size, zone_size, sizeof(zhole_size));
258 for_each_memblock(memory, reg) {
259 unsigned long start = memblock_region_memory_base_pfn(reg);
260 unsigned long end = memblock_region_memory_end_pfn(reg);
261
262 if (start < max_low) {
263 unsigned long low_end = min(end, max_low);
264 zhole_size[0] -= low_end - start;
265 }
266#ifdef CONFIG_HIGHMEM
267 if (end > max_low) {
268 unsigned long high_start = max(start, max_low);
269 zhole_size[ZONE_HIGHMEM] -= end - high_start;
270 }
271#endif
272 }
273
274#ifdef CONFIG_ZONE_DMA
275 /*
276 * Adjust the sizes according to any special requirements for
277 * this machine type.
278 */
279 if (arm_dma_zone_size) {
280 arm_adjust_dma_zone(zone_size, zhole_size,
281 arm_dma_zone_size >> PAGE_SHIFT);
282 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
283 } else
284 arm_dma_limit = 0xffffffff;
285#endif
286
287 free_area_init_node(0, zone_size, min, zhole_size);
288}
289
290#ifdef CONFIG_HAVE_ARCH_PFN_VALID
291int pfn_valid(unsigned long pfn)
292{
293 return memblock_is_memory(__pfn_to_phys(pfn));
294}
295EXPORT_SYMBOL(pfn_valid);
296#endif
297
298#ifndef CONFIG_SPARSEMEM
299static void __init arm_memory_present(void)
300{
301}
302#else
303static void __init arm_memory_present(void)
304{
305 struct memblock_region *reg;
306
307 for_each_memblock(memory, reg)
308 memory_present(0, memblock_region_memory_base_pfn(reg),
309 memblock_region_memory_end_pfn(reg));
310}
311#endif
312
313static bool arm_memblock_steal_permitted = true;
314
315phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
316{
317 phys_addr_t phys;
318
319 BUG_ON(!arm_memblock_steal_permitted);
320
321 phys = memblock_alloc(size, align);
322 memblock_free(phys, size);
323 memblock_remove(phys, size);
324
325 return phys;
326}
327
328void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
329{
330 int i;
331
332 for (i = 0; i < mi->nr_banks; i++)
333 memblock_add(mi->bank[i].start, mi->bank[i].size);
334
335 /* Register the kernel text, kernel data and initrd with memblock. */
336#ifdef CONFIG_XIP_KERNEL
337 memblock_reserve(__pa(_sdata), _end - _sdata);
338#else
339 memblock_reserve(__pa(_stext), _end - _stext);
340#endif
341#ifdef CONFIG_BLK_DEV_INITRD
342 if (phys_initrd_size &&
343 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
344 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
345 phys_initrd_start, phys_initrd_size);
346 phys_initrd_start = phys_initrd_size = 0;
347 }
348 if (phys_initrd_size &&
349 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
350 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
351 phys_initrd_start, phys_initrd_size);
352 phys_initrd_start = phys_initrd_size = 0;
353 }
354 if (phys_initrd_size) {
355 memblock_reserve(phys_initrd_start, phys_initrd_size);
356
357 /* Now convert initrd to virtual addresses */
358 initrd_start = __phys_to_virt(phys_initrd_start);
359 initrd_end = initrd_start + phys_initrd_size;
360 }
361#endif
362
363 arm_mm_memblock_reserve();
364 arm_dt_memblock_reserve();
365
366 /* reserve any platform specific memblock areas */
367 if (mdesc->reserve)
368 mdesc->reserve();
369
370 arm_memblock_steal_permitted = false;
371 memblock_allow_resize();
372 memblock_dump_all();
373}
374
375void __init bootmem_init(void)
376{
377 unsigned long min, max_low, max_high;
378
379 max_low = max_high = 0;
380
381 find_limits(&min, &max_low, &max_high);
382
383 arm_bootmem_init(min, max_low);
384
385 /*
386 * Sparsemem tries to allocate bootmem in memory_present(),
387 * so must be done after the fixed reservations
388 */
389 arm_memory_present();
390
391 /*
392 * sparse_init() needs the bootmem allocator up and running.
393 */
394 sparse_init();
395
396 /*
397 * Now free the memory - free_area_init_node needs
398 * the sparse mem_map arrays initialized by sparse_init()
399 * for memmap_init_zone(), otherwise all PFNs are invalid.
400 */
401 arm_bootmem_free(min, max_low, max_high);
402
403 /*
404 * This doesn't seem to be used by the Linux memory manager any
405 * more, but is used by ll_rw_block. If we can get rid of it, we
406 * also get rid of some of the stuff above as well.
407 *
408 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
409 * the system, not the maximum PFN.
410 */
411 max_low_pfn = max_low - PHYS_PFN_OFFSET;
412 max_pfn = max_high - PHYS_PFN_OFFSET;
413}
414
415static inline int free_area(unsigned long pfn, unsigned long end, char *s)
416{
417 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
418
419 for (; pfn < end; pfn++) {
420 struct page *page = pfn_to_page(pfn);
421 ClearPageReserved(page);
422 init_page_count(page);
423 __free_page(page);
424 pages++;
425 }
426
427 if (size && s)
428 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
429
430 return pages;
431}
432
433/*
434 * Poison init memory with an undefined instruction (ARM) or a branch to an
435 * undefined instruction (Thumb).
436 */
437static inline void poison_init_mem(void *s, size_t count)
438{
439 u32 *p = (u32 *)s;
440 for (; count != 0; count -= 4)
441 *p++ = 0xe7fddef0;
442}
443
444static inline void
445free_memmap(unsigned long start_pfn, unsigned long end_pfn)
446{
447 struct page *start_pg, *end_pg;
448 unsigned long pg, pgend;
449
450 /*
451 * Convert start_pfn/end_pfn to a struct page pointer.
452 */
453 start_pg = pfn_to_page(start_pfn - 1) + 1;
454 end_pg = pfn_to_page(end_pfn - 1) + 1;
455
456 /*
457 * Convert to physical addresses, and
458 * round start upwards and end downwards.
459 */
460 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
461 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
462
463 /*
464 * If there are free pages between these,
465 * free the section of the memmap array.
466 */
467 if (pg < pgend)
468 free_bootmem(pg, pgend - pg);
469}
470
471/*
472 * The mem_map array can get very big. Free the unused area of the memory map.
473 */
474static void __init free_unused_memmap(struct meminfo *mi)
475{
476 unsigned long bank_start, prev_bank_end = 0;
477 unsigned int i;
478
479 /*
480 * This relies on each bank being in address order.
481 * The banks are sorted previously in bootmem_init().
482 */
483 for_each_bank(i, mi) {
484 struct membank *bank = &mi->bank[i];
485
486 bank_start = bank_pfn_start(bank);
487
488#ifdef CONFIG_SPARSEMEM
489 /*
490 * Take care not to free memmap entries that don't exist
491 * due to SPARSEMEM sections which aren't present.
492 */
493 bank_start = min(bank_start,
494 ALIGN(prev_bank_end, PAGES_PER_SECTION));
495#else
496 /*
497 * Align down here since the VM subsystem insists that the
498 * memmap entries are valid from the bank start aligned to
499 * MAX_ORDER_NR_PAGES.
500 */
501 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
502#endif
503 /*
504 * If we had a previous bank, and there is a space
505 * between the current bank and the previous, free it.
506 */
507 if (prev_bank_end && prev_bank_end < bank_start)
508 free_memmap(prev_bank_end, bank_start);
509
510 /*
511 * Align up here since the VM subsystem insists that the
512 * memmap entries are valid from the bank end aligned to
513 * MAX_ORDER_NR_PAGES.
514 */
515 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
516 }
517
518#ifdef CONFIG_SPARSEMEM
519 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
520 free_memmap(prev_bank_end,
521 ALIGN(prev_bank_end, PAGES_PER_SECTION));
522#endif
523}
524
525static void __init free_highpages(void)
526{
527#ifdef CONFIG_HIGHMEM
528 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
529 struct memblock_region *mem, *res;
530
531 /* set highmem page free */
532 for_each_memblock(memory, mem) {
533 unsigned long start = memblock_region_memory_base_pfn(mem);
534 unsigned long end = memblock_region_memory_end_pfn(mem);
535
536 /* Ignore complete lowmem entries */
537 if (end <= max_low)
538 continue;
539
540 /* Truncate partial highmem entries */
541 if (start < max_low)
542 start = max_low;
543
544 /* Find and exclude any reserved regions */
545 for_each_memblock(reserved, res) {
546 unsigned long res_start, res_end;
547
548 res_start = memblock_region_reserved_base_pfn(res);
549 res_end = memblock_region_reserved_end_pfn(res);
550
551 if (res_end < start)
552 continue;
553 if (res_start < start)
554 res_start = start;
555 if (res_start > end)
556 res_start = end;
557 if (res_end > end)
558 res_end = end;
559 if (res_start != start)
560 totalhigh_pages += free_area(start, res_start,
561 NULL);
562 start = res_end;
563 if (start == end)
564 break;
565 }
566
567 /* And now free anything which remains */
568 if (start < end)
569 totalhigh_pages += free_area(start, end, NULL);
570 }
571 totalram_pages += totalhigh_pages;
572#endif
573}
574
575/*
576 * mem_init() marks the free areas in the mem_map and tells us how much
577 * memory is free. This is done after various parts of the system have
578 * claimed their memory after the kernel image.
579 */
580void __init mem_init(void)
581{
582 unsigned long reserved_pages, free_pages;
583 struct memblock_region *reg;
584 int i;
585#ifdef CONFIG_HAVE_TCM
586 /* These pointers are filled in on TCM detection */
587 extern u32 dtcm_end;
588 extern u32 itcm_end;
589#endif
590
591 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
592
593 /* this will put all unused low memory onto the freelists */
594 free_unused_memmap(&meminfo);
595
596 totalram_pages += free_all_bootmem();
597
598#ifdef CONFIG_SA1111
599 /* now that our DMA memory is actually so designated, we can free it */
600 totalram_pages += free_area(PHYS_PFN_OFFSET,
601 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
602#endif
603
604 free_highpages();
605
606 reserved_pages = free_pages = 0;
607
608 for_each_bank(i, &meminfo) {
609 struct membank *bank = &meminfo.bank[i];
610 unsigned int pfn1, pfn2;
611 struct page *page, *end;
612
613 pfn1 = bank_pfn_start(bank);
614 pfn2 = bank_pfn_end(bank);
615
616 page = pfn_to_page(pfn1);
617 end = pfn_to_page(pfn2 - 1) + 1;
618
619 do {
620 if (PageReserved(page))
621 reserved_pages++;
622 else if (!page_count(page))
623 free_pages++;
624 page++;
625 } while (page < end);
626 }
627
628 /*
629 * Since our memory may not be contiguous, calculate the
630 * real number of pages we have in this system
631 */
632 printk(KERN_INFO "Memory:");
633 num_physpages = 0;
634 for_each_memblock(memory, reg) {
635 unsigned long pages = memblock_region_memory_end_pfn(reg) -
636 memblock_region_memory_base_pfn(reg);
637 num_physpages += pages;
638 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
639 }
640 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
641
642 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
643 nr_free_pages() << (PAGE_SHIFT-10),
644 free_pages << (PAGE_SHIFT-10),
645 reserved_pages << (PAGE_SHIFT-10),
646 totalhigh_pages << (PAGE_SHIFT-10));
647
648#define MLK(b, t) b, t, ((t) - (b)) >> 10
649#define MLM(b, t) b, t, ((t) - (b)) >> 20
650#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
651
652 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
653 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
654#ifdef CONFIG_HAVE_TCM
655 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
656 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
657#endif
658 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
659 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
660 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
661#ifdef CONFIG_HIGHMEM
662 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
663#endif
664#ifdef CONFIG_MODULES
665 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
666#endif
667 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
668 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
669 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
670 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
671
672 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
673 (PAGE_SIZE)),
674#ifdef CONFIG_HAVE_TCM
675 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
676 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
677#endif
678 MLK(FIXADDR_START, FIXADDR_TOP),
679 MLM(VMALLOC_START, VMALLOC_END),
680 MLM(PAGE_OFFSET, (unsigned long)high_memory),
681#ifdef CONFIG_HIGHMEM
682 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
683 (PAGE_SIZE)),
684#endif
685#ifdef CONFIG_MODULES
686 MLM(MODULES_VADDR, MODULES_END),
687#endif
688
689 MLK_ROUNDUP(_text, _etext),
690 MLK_ROUNDUP(__init_begin, __init_end),
691 MLK_ROUNDUP(_sdata, _edata),
692 MLK_ROUNDUP(__bss_start, __bss_stop));
693
694#undef MLK
695#undef MLM
696#undef MLK_ROUNDUP
697
698 /*
699 * Check boundaries twice: Some fundamental inconsistencies can
700 * be detected at build time already.
701 */
702#ifdef CONFIG_MMU
703 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
704 BUG_ON(TASK_SIZE > MODULES_VADDR);
705#endif
706
707#ifdef CONFIG_HIGHMEM
708 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
709 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
710#endif
711
712 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
713 extern int sysctl_overcommit_memory;
714 /*
715 * On a machine this small we won't get
716 * anywhere without overcommit, so turn
717 * it on by default.
718 */
719 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
720 }
721}
722
723void free_initmem(void)
724{
725#ifdef CONFIG_HAVE_TCM
726 extern char __tcm_start, __tcm_end;
727
728 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
729 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
730 __phys_to_pfn(__pa(&__tcm_end)),
731 "TCM link");
732#endif
733
734 poison_init_mem(__init_begin, __init_end - __init_begin);
735 if (!machine_is_integrator() && !machine_is_cintegrator())
736 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
737 __phys_to_pfn(__pa(__init_end)),
738 "init");
739}
740
741#ifdef CONFIG_BLK_DEV_INITRD
742
743static int keep_initrd;
744
745void free_initrd_mem(unsigned long start, unsigned long end)
746{
747 if (!keep_initrd) {
748 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
749 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
750 __phys_to_pfn(__pa(end)),
751 "initrd");
752 }
753}
754
755static int __init keepinitrd_setup(char *__unused)
756{
757 keep_initrd = 1;
758 return 1;
759}
760
761__setup("keepinitrd", keepinitrd_setup);
762#endif