init.c 17.6 KB
Newer Older
Catalin Marinas's avatar
Catalin Marinas committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
/*
 * Based on arch/arm/mm/init.c
 *
 * Copyright (C) 1995-2005 Russell King
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
26
#include <linux/cache.h>
Catalin Marinas's avatar
Catalin Marinas committed
27
28
29
30
31
32
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
33
#include <linux/of.h>
Catalin Marinas's avatar
Catalin Marinas committed
34
#include <linux/of_fdt.h>
35
#include <linux/dma-mapping.h>
Laura Abbott's avatar
Laura Abbott committed
36
#include <linux/dma-contiguous.h>
37
#include <linux/efi.h>
38
#include <linux/swiotlb.h>
39
#include <linux/vmalloc.h>
40
#include <linux/mm.h>
41
#include <linux/kexec.h>
42
#include <linux/crash_dump.h>
Catalin Marinas's avatar
Catalin Marinas committed
43

44
#include <asm/boot.h>
45
#include <asm/fixmap.h>
46
#include <asm/kasan.h>
47
#include <asm/kernel-pgtable.h>
48
#include <asm/memory.h>
49
#include <asm/numa.h>
Catalin Marinas's avatar
Catalin Marinas committed
50
51
52
53
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
54
#include <asm/alternative.h>
Catalin Marinas's avatar
Catalin Marinas committed
55

56
57
58
59
60
61
/*
 * We need to be able to catch inadvertent references to memstart_addr
 * that occur (potentially in generic code) before arm64_memblock_init()
 * executes, which assigns it its actual value. So use a default value
 * that cannot be mistaken for a real physical address.
 */
62
63
s64 memstart_addr __ro_after_init = -1;
phys_addr_t arm64_dma_phys_limit __ro_after_init;
Catalin Marinas's avatar
Catalin Marinas committed
64

65
#ifdef CONFIG_BLK_DEV_INITRD
Catalin Marinas's avatar
Catalin Marinas committed
66
67
68
69
70
71
72
73
74
static int __init early_initrd(char *p)
{
	unsigned long start, size;
	char *endp;

	start = memparse(p, &endp);
	if (*endp == ',') {
		size = memparse(endp + 1, NULL);

75
76
		initrd_start = start;
		initrd_end = start + size;
Catalin Marinas's avatar
Catalin Marinas committed
77
78
79
80
	}
	return 0;
}
early_param("initrd", early_initrd);
81
#endif
Catalin Marinas's avatar
Catalin Marinas committed
82

83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
#ifdef CONFIG_KEXEC_CORE
/*
 * reserve_crashkernel() - reserves memory for crash kernel
 *
 * This function reserves memory area given in "crashkernel=" kernel command
 * line parameter. The memory reserved is used by dump capture kernel when
 * primary kernel is crashing.
 */
static void __init reserve_crashkernel(void)
{
	unsigned long long crash_base, crash_size;
	int ret;

	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
				&crash_size, &crash_base);
	/* no crashkernel= or invalid value specified */
	if (ret || !crash_size)
		return;

	crash_size = PAGE_ALIGN(crash_size);

	if (crash_base == 0) {
		/* Current arm64 boot protocol requires 2MB alignment */
		crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
				crash_size, SZ_2M);
		if (crash_base == 0) {
			pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
				crash_size);
			return;
		}
	} else {
		/* User specifies base address explicitly. */
		if (!memblock_is_region_memory(crash_base, crash_size)) {
			pr_warn("cannot reserve crashkernel: region is not memory\n");
			return;
		}

		if (memblock_is_region_reserved(crash_base, crash_size)) {
			pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
			return;
		}

		if (!IS_ALIGNED(crash_base, SZ_2M)) {
			pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
			return;
		}
	}
	memblock_reserve(crash_base, crash_size);

	pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
		crash_base, crash_base + crash_size, crash_size >> 20);

	crashk_res.start = crash_base;
	crashk_res.end = crash_base + crash_size - 1;
}
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

static void __init kexec_reserve_crashkres_pages(void)
{
#ifdef CONFIG_HIBERNATION
	phys_addr_t addr;
	struct page *page;

	if (!crashk_res.end)
		return;

	/*
	 * To reduce the size of hibernation image, all the pages are
	 * marked as Reserved initially.
	 */
	for (addr = crashk_res.start; addr < (crashk_res.end + 1);
			addr += PAGE_SIZE) {
		page = phys_to_page(addr);
		SetPageReserved(page);
	}
#endif
}
159
160
161
162
#else
static void __init reserve_crashkernel(void)
{
}
163
164
165
166

static void __init kexec_reserve_crashkres_pages(void)
{
}
167
168
#endif /* CONFIG_KEXEC_CORE */

169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
#ifdef CONFIG_CRASH_DUMP
static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
		const char *uname, int depth, void *data)
{
	const __be32 *reg;
	int len;

	if (depth != 1 || strcmp(uname, "chosen") != 0)
		return 0;

	reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
	if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
		return 1;

	elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
	elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);

	return 1;
}

/*
 * reserve_elfcorehdr() - reserves memory for elf core header
 *
 * This function reserves the memory occupied by an elf core header
 * described in the device tree. This region contains all the
 * information about primary kernel's core image and is used by a dump
 * capture kernel to access the system memory on primary kernel.
 */
static void __init reserve_elfcorehdr(void)
{
	of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);

	if (!elfcorehdr_size)
		return;

	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
		pr_warn("elfcorehdr is overlapped\n");
		return;
	}

	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);

	pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
		elfcorehdr_size >> 10, elfcorehdr_addr);
}
#else
static void __init reserve_elfcorehdr(void)
{
}
#endif /* CONFIG_CRASH_DUMP */
219
/*
220
 * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
221
222
223
 * currently assumes that for memory starting above 4G, 32-bit devices will
 * use a DMA offset.
 */
224
static phys_addr_t __init max_zone_dma_phys(void)
225
226
227
228
229
{
	phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
	return min(offset + (1ULL << 32), memblock_end_of_DRAM());
}

230
231
232
233
234
235
#ifdef CONFIG_NUMA

static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};

236
237
	if (IS_ENABLED(CONFIG_ZONE_DMA32))
		max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
238
239
240
241
242
243
244
	max_zone_pfns[ZONE_NORMAL] = max;

	free_area_init_nodes(max_zone_pfns);
}

#else

Catalin Marinas's avatar
Catalin Marinas committed
245
246
247
248
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	struct memblock_region *reg;
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
249
	unsigned long max_dma = min;
Catalin Marinas's avatar
Catalin Marinas committed
250
251
252
253

	memset(zone_size, 0, sizeof(zone_size));

	/* 4GB maximum for 32-bit only capable devices */
254
#ifdef CONFIG_ZONE_DMA32
255
	max_dma = PFN_DOWN(arm64_dma_phys_limit);
256
	zone_size[ZONE_DMA32] = max_dma - min;
257
#endif
258
	zone_size[ZONE_NORMAL] = max - max_dma;
Catalin Marinas's avatar
Catalin Marinas committed
259
260
261
262
263
264
265
266
267

	memcpy(zhole_size, zone_size, sizeof(zhole_size));

	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start >= max)
			continue;
268

269
#ifdef CONFIG_ZONE_DMA32
270
		if (start < max_dma) {
271
			unsigned long dma_end = min(end, max_dma);
272
			zhole_size[ZONE_DMA32] -= dma_end - start;
Catalin Marinas's avatar
Catalin Marinas committed
273
		}
274
#endif
275
		if (end > max_dma) {
Catalin Marinas's avatar
Catalin Marinas committed
276
			unsigned long normal_end = min(end, max);
277
			unsigned long normal_start = max(start, max_dma);
Catalin Marinas's avatar
Catalin Marinas committed
278
279
280
281
282
283
284
			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
		}
	}

	free_area_init_node(0, zone_size, min, zhole_size);
}

285
286
#endif /* CONFIG_NUMA */

Catalin Marinas's avatar
Catalin Marinas committed
287
288
289
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
290
	return memblock_is_map_memory(pfn << PAGE_SHIFT);
Catalin Marinas's avatar
Catalin Marinas committed
291
292
293
294
295
}
EXPORT_SYMBOL(pfn_valid);
#endif

#ifndef CONFIG_SPARSEMEM
296
static void __init arm64_memory_present(void)
Catalin Marinas's avatar
Catalin Marinas committed
297
298
299
{
}
#else
300
static void __init arm64_memory_present(void)
Catalin Marinas's avatar
Catalin Marinas committed
301
302
303
{
	struct memblock_region *reg;

304
	for_each_memblock(memory, reg) {
305
306
		int nid = memblock_get_region_node(reg);

307
308
309
		memory_present(nid, memblock_region_memory_base_pfn(reg),
				memblock_region_memory_end_pfn(reg));
	}
Catalin Marinas's avatar
Catalin Marinas committed
310
311
312
}
#endif

313
static phys_addr_t memory_limit = PHYS_ADDR_MAX;
Mark Rutland's avatar
Mark Rutland committed
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329

/*
 * Limit the memory size that was specified via FDT.
 */
static int __init early_mem(char *p)
{
	if (!p)
		return 1;

	memory_limit = memparse(p, &p) & PAGE_MASK;
	pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);

	return 0;
}
early_param("mem", early_mem);

330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
static int __init early_init_dt_scan_usablemem(unsigned long node,
		const char *uname, int depth, void *data)
{
	struct memblock_region *usablemem = data;
	const __be32 *reg;
	int len;

	if (depth != 1 || strcmp(uname, "chosen") != 0)
		return 0;

	reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
	if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
		return 1;

	usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
	usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);

	return 1;
}

static void __init fdt_enforce_memory_region(void)
{
	struct memblock_region reg = {
		.size = 0,
	};

	of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);

	if (reg.size)
		memblock_cap_memory_range(reg.base, reg.size);
}

Catalin Marinas's avatar
Catalin Marinas committed
362
363
void __init arm64_memblock_init(void)
{
364
365
	const s64 linear_region_size = -(s64)PAGE_OFFSET;

366
367
368
	/* Handle linux,usable-memory-range property */
	fdt_enforce_memory_region();

369
370
371
	/* Remove memory above our supported physical address size */
	memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);

372
373
374
375
376
377
378
	/*
	 * Ensure that the linear region takes up exactly half of the kernel
	 * virtual address space. This way, we can distinguish a linear address
	 * from a kernel/module/vmalloc address by testing a single bit.
	 */
	BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));

379
380
381
382
383
384
385
386
387
388
389
	/*
	 * Select a suitable value for the base of physical memory.
	 */
	memstart_addr = round_down(memblock_start_of_DRAM(),
				   ARM64_MEMSTART_ALIGN);

	/*
	 * Remove the memory that we will not be able to cover with the
	 * linear mapping. Take care not to clip the kernel which may be
	 * high in memory.
	 */
390
391
	memblock_remove(max_t(u64, memstart_addr + linear_region_size,
			__pa_symbol(_end)), ULLONG_MAX);
392
393
394
395
396
397
	if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
		/* ensure that memstart_addr remains sufficiently aligned */
		memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
					 ARM64_MEMSTART_ALIGN);
		memblock_remove(0, memstart_addr);
	}
398
399
400
401
402
403

	/*
	 * Apply the memory limit if it was set. Since the kernel may be loaded
	 * high up in memory, add back the kernel region that must be accessible
	 * via the linear mapping.
	 */
404
	if (memory_limit != PHYS_ADDR_MAX) {
405
		memblock_mem_limit_remove_map(memory_limit);
406
		memblock_add(__pa_symbol(_text), (u64)(_end - _text));
407
	}
Mark Rutland's avatar
Mark Rutland committed
408

409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
		/*
		 * Add back the memory we just removed if it results in the
		 * initrd to become inaccessible via the linear mapping.
		 * Otherwise, this is a no-op
		 */
		u64 base = initrd_start & PAGE_MASK;
		u64 size = PAGE_ALIGN(initrd_end) - base;

		/*
		 * We can only add back the initrd memory if we don't end up
		 * with more memory than we can address via the linear mapping.
		 * It is up to the bootloader to position the kernel and the
		 * initrd reasonably close to each other (i.e., within 32 GB of
		 * each other) so that all granule/#levels combinations can
		 * always access both.
		 */
		if (WARN(base < memblock_start_of_DRAM() ||
			 base + size > memblock_start_of_DRAM() +
				       linear_region_size,
			"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
			initrd_start = 0;
		} else {
			memblock_remove(base, size); /* clear MEMBLOCK_ flags */
			memblock_add(base, size);
			memblock_reserve(base, size);
		}
	}

438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
		extern u16 memstart_offset_seed;
		u64 range = linear_region_size -
			    (memblock_end_of_DRAM() - memblock_start_of_DRAM());

		/*
		 * If the size of the linear region exceeds, by a sufficient
		 * margin, the size of the region that the available physical
		 * memory spans, randomize the linear region as well.
		 */
		if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
			range = range / ARM64_MEMSTART_ALIGN + 1;
			memstart_addr -= ARM64_MEMSTART_ALIGN *
					 ((range * memstart_offset_seed) >> 16);
		}
	}
Mark Rutland's avatar
Mark Rutland committed
454

455
456
457
458
	/*
	 * Register the kernel text, kernel data, initrd, and initial
	 * pagetables with memblock.
	 */
459
	memblock_reserve(__pa_symbol(_text), _end - _text);
Catalin Marinas's avatar
Catalin Marinas committed
460
#ifdef CONFIG_BLK_DEV_INITRD
461
462
463
464
465
466
467
	if (initrd_start) {
		memblock_reserve(initrd_start, initrd_end - initrd_start);

		/* the generic initrd code expects virtual addresses */
		initrd_start = __phys_to_virt(initrd_start);
		initrd_end = __phys_to_virt(initrd_end);
	}
Catalin Marinas's avatar
Catalin Marinas committed
468
469
#endif

470
	early_init_fdt_scan_reserved_mem();
471
472

	/* 4GB maximum for 32-bit only capable devices */
473
	if (IS_ENABLED(CONFIG_ZONE_DMA32))
474
475
476
		arm64_dma_phys_limit = max_zone_dma_phys();
	else
		arm64_dma_phys_limit = PHYS_MASK + 1;
477
478
479

	reserve_crashkernel();

480
481
	reserve_elfcorehdr();

482
483
	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;

484
	dma_contiguous_reserve(arm64_dma_phys_limit);
Laura Abbott's avatar
Laura Abbott committed
485

Catalin Marinas's avatar
Catalin Marinas committed
486
487
488
489
490
491
492
493
494
495
	memblock_allow_resize();
}

void __init bootmem_init(void)
{
	unsigned long min, max;

	min = PFN_UP(memblock_start_of_DRAM());
	max = PFN_DOWN(memblock_end_of_DRAM());

496
497
	early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);

498
499
500
	max_pfn = max_low_pfn = max;

	arm64_numa_init();
Catalin Marinas's avatar
Catalin Marinas committed
501
502
503
504
505
506
507
508
509
	/*
	 * Sparsemem tries to allocate bootmem in memory_present(), so must be
	 * done after the fixed reservations.
	 */
	arm64_memory_present();

	sparse_init();
	zone_sizes_init(min, max);

510
	memblock_dump_all();
Catalin Marinas's avatar
Catalin Marinas committed
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
}

#ifndef CONFIG_SPARSEMEM_VMEMMAP
static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
	struct page *start_pg, *end_pg;
	unsigned long pg, pgend;

	/*
	 * Convert start_pfn/end_pfn to a struct page pointer.
	 */
	start_pg = pfn_to_page(start_pfn - 1) + 1;
	end_pg = pfn_to_page(end_pfn - 1) + 1;

	/*
	 * Convert to physical addresses, and round start upwards and end
	 * downwards.
	 */
	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
	pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;

	/*
	 * If there are free pages between these, free the section of the
	 * memmap array.
	 */
	if (pg < pgend)
		free_bootmem(pg, pgend - pg);
}

/*
 * The mem_map array can get very big. Free the unused area of the memory map.
 */
static void __init free_unused_memmap(void)
{
	unsigned long start, prev_end = 0;
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		start = __phys_to_pfn(reg->base);

#ifdef CONFIG_SPARSEMEM
		/*
		 * Take care not to free memmap entries that don't exist due
		 * to SPARSEMEM sections which aren't present.
		 */
		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
#endif
		/*
		 * If we had a previous bank, and there is a space between the
		 * current bank and the previous, free it.
		 */
		if (prev_end && prev_end < start)
			free_memmap(prev_end, start);

		/*
		 * Align up here since the VM subsystem insists that the
		 * memmap entries are valid from the bank end aligned to
		 * MAX_ORDER_NR_PAGES.
		 */
570
		prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
Catalin Marinas's avatar
Catalin Marinas committed
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
				 MAX_ORDER_NR_PAGES);
	}

#ifdef CONFIG_SPARSEMEM
	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
#endif
}
#endif	/* !CONFIG_SPARSEMEM_VMEMMAP */

/*
 * mem_init() marks the free areas in the mem_map and tells us how much memory
 * is free.  This is done after various parts of the system have claimed their
 * memory after the kernel image.
 */
void __init mem_init(void)
{
588
589
	if (swiotlb_force == SWIOTLB_FORCE ||
	    max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
590
		swiotlb_init(1);
591
592
	else
		swiotlb_force = SWIOTLB_NO_FORCE;
593

594
	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
Catalin Marinas's avatar
Catalin Marinas committed
595
596
597
598

#ifndef CONFIG_SPARSEMEM_VMEMMAP
	free_unused_memmap();
#endif
599
	/* this will put all unused low memory onto the freelists */
600
	free_all_bootmem();
Catalin Marinas's avatar
Catalin Marinas committed
601

602
603
	kexec_reserve_crashkres_pages();

604
	mem_init_print_info(NULL);
Catalin Marinas's avatar
Catalin Marinas committed
605
606
607
608
609
610
611
612
613

	/*
	 * Check boundaries twice: Some fundamental inconsistencies can be
	 * detected at build time already.
	 */
#ifdef CONFIG_COMPAT
	BUILD_BUG_ON(TASK_SIZE_32			> TASK_SIZE_64);
#endif

614
615
616
617
618
619
	/*
	 * Make sure we chose the upper bound of sizeof(struct page)
	 * correctly.
	 */
	BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));

620
	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
Catalin Marinas's avatar
Catalin Marinas committed
621
622
623
624
625
626
627
628
629
630
631
		extern int sysctl_overcommit_memory;
		/*
		 * On a machine this small we won't get anywhere without
		 * overcommit, so turn it on by default.
		 */
		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
	}
}

void free_initmem(void)
{
632
633
	free_reserved_area(lm_alias(__init_begin),
			   lm_alias(__init_end),
634
			   0, "unused kernel");
635
636
637
638
639
640
	/*
	 * Unmap the __init region but leave the VM area in place. This
	 * prevents the region from being reused for kernel modules, which
	 * is not supported by kallsyms.
	 */
	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
Catalin Marinas's avatar
Catalin Marinas committed
641
642
643
644
}

#ifdef CONFIG_BLK_DEV_INITRD

645
static int keep_initrd __initdata;
Catalin Marinas's avatar
Catalin Marinas committed
646

647
void __init free_initrd_mem(unsigned long start, unsigned long end)
Catalin Marinas's avatar
Catalin Marinas committed
648
{
649
	if (!keep_initrd) {
Jiang Liu's avatar
Jiang Liu committed
650
		free_reserved_area((void *)start, (void *)end, 0, "initrd");
651
652
		memblock_free(__virt_to_phys(start), end - start);
	}
Catalin Marinas's avatar
Catalin Marinas committed
653
654
655
656
657
658
659
660
661
662
}

static int __init keepinitrd_setup(char *__unused)
{
	keep_initrd = 1;
	return 1;
}

__setup("keepinitrd", keepinitrd_setup);
#endif
663
664
665
666
667
668

/*
 * Dump out memory limit information on panic.
 */
static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
{
669
	if (memory_limit != PHYS_ADDR_MAX) {
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
		pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
	} else {
		pr_emerg("Memory Limit: none\n");
	}
	return 0;
}

static struct notifier_block mem_limit_notifier = {
	.notifier_call = dump_mem_limit,
};

static int __init register_mem_limit_dumper(void)
{
	atomic_notifier_chain_register(&panic_notifier_list,
				       &mem_limit_notifier);
	return 0;
}
__initcall(register_mem_limit_dumper);