init.c 15.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
C
Catalin Marinas 已提交
2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Based on arch/arm/mm/init.c
 *
 * Copyright (C) 1995-2005 Russell King
 * Copyright (C) 2012 ARM Ltd.
 */

#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
14
#include <linux/cache.h>
C
Catalin Marinas 已提交
15 16 17 18 19 20
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
21
#include <linux/of.h>
C
Catalin Marinas 已提交
22
#include <linux/of_fdt.h>
23
#include <linux/dma-direct.h>
24
#include <linux/dma-map-ops.h>
25
#include <linux/efi.h>
26
#include <linux/swiotlb.h>
27
#include <linux/vmalloc.h>
28
#include <linux/mm.h>
29
#include <linux/kexec.h>
30
#include <linux/crash_dump.h>
31
#include <linux/hugetlb.h>
C
Catalin Marinas 已提交
32

33
#include <asm/boot.h>
34
#include <asm/fixmap.h>
35
#include <asm/kasan.h>
36
#include <asm/kernel-pgtable.h>
37
#include <asm/memory.h>
38
#include <asm/numa.h>
C
Catalin Marinas 已提交
39 40
#include <asm/sections.h>
#include <asm/setup.h>
41
#include <linux/sizes.h>
C
Catalin Marinas 已提交
42
#include <asm/tlb.h>
43
#include <asm/alternative.h>
C
Catalin Marinas 已提交
44

45 46
#define ARM64_ZONE_DMA_BITS	30

47 48 49 50 51 52
/*
 * We need to be able to catch inadvertent references to memstart_addr
 * that occur (potentially in generic code) before arm64_memblock_init()
 * executes, which assigns it its actual value. So use a default value
 * that cannot be mistaken for a real physical address.
 */
53
s64 memstart_addr __ro_after_init = -1;
54 55
EXPORT_SYMBOL(memstart_addr);

56 57 58 59 60 61
/*
 * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
 * memory as some devices, namely the Raspberry Pi 4, have peripherals with
 * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
 * bit addressable memory area.
 */
62
phys_addr_t arm64_dma_phys_limit __ro_after_init;
63
static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
C
Catalin Marinas 已提交
64

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
#ifdef CONFIG_KEXEC_CORE
/*
 * reserve_crashkernel() - reserves memory for crash kernel
 *
 * This function reserves memory area given in "crashkernel=" kernel command
 * line parameter. The memory reserved is used by dump capture kernel when
 * primary kernel is crashing.
 */
static void __init reserve_crashkernel(void)
{
	unsigned long long crash_base, crash_size;
	int ret;

	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
				&crash_size, &crash_base);
	/* no crashkernel= or invalid value specified */
	if (ret || !crash_size)
		return;

	crash_size = PAGE_ALIGN(crash_size);

	if (crash_base == 0) {
		/* Current arm64 boot protocol requires 2MB alignment */
88
		crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
				crash_size, SZ_2M);
		if (crash_base == 0) {
			pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
				crash_size);
			return;
		}
	} else {
		/* User specifies base address explicitly. */
		if (!memblock_is_region_memory(crash_base, crash_size)) {
			pr_warn("cannot reserve crashkernel: region is not memory\n");
			return;
		}

		if (memblock_is_region_reserved(crash_base, crash_size)) {
			pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
			return;
		}

		if (!IS_ALIGNED(crash_base, SZ_2M)) {
			pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
			return;
		}
	}
	memblock_reserve(crash_base, crash_size);

	pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
		crash_base, crash_base + crash_size, crash_size >> 20);

	crashk_res.start = crash_base;
	crashk_res.end = crash_base + crash_size - 1;
}
#else
static void __init reserve_crashkernel(void)
{
}
#endif /* CONFIG_KEXEC_CORE */

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
#ifdef CONFIG_CRASH_DUMP
static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
		const char *uname, int depth, void *data)
{
	const __be32 *reg;
	int len;

	if (depth != 1 || strcmp(uname, "chosen") != 0)
		return 0;

	reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
	if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
		return 1;

	elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
	elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);

	return 1;
}

/*
 * reserve_elfcorehdr() - reserves memory for elf core header
 *
 * This function reserves the memory occupied by an elf core header
 * described in the device tree. This region contains all the
 * information about primary kernel's core image and is used by a dump
 * capture kernel to access the system memory on primary kernel.
 */
static void __init reserve_elfcorehdr(void)
{
	of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);

	if (!elfcorehdr_size)
		return;

	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
		pr_warn("elfcorehdr is overlapped\n");
		return;
	}

	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);

	pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
		elfcorehdr_size >> 10, elfcorehdr_addr);
}
#else
static void __init reserve_elfcorehdr(void)
{
}
#endif /* CONFIG_CRASH_DUMP */
176

177
/*
178 179 180
 * Return the maximum physical address for a zone with a given address size
 * limit. It currently assumes that for memory starting above 4G, 32-bit
 * devices will use a DMA offset.
181
 */
182
static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
183
{
184 185
	phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, zone_bits);
	return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM());
186 187
}

188 189 190 191
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};

192 193 194
#ifdef CONFIG_ZONE_DMA
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
195
#ifdef CONFIG_ZONE_DMA32
196
	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
197
#endif
198 199
	max_zone_pfns[ZONE_NORMAL] = max;

200
	free_area_init(max_zone_pfns);
201 202
}

C
Catalin Marinas 已提交
203 204
int pfn_valid(unsigned long pfn)
{
205 206 207 208
	phys_addr_t addr = pfn << PAGE_SHIFT;

	if ((addr >> PAGE_SHIFT) != pfn)
		return 0;
R
Robin Murphy 已提交
209 210 211 212 213

#ifdef CONFIG_SPARSEMEM
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;

214
	if (!valid_section(__pfn_to_section(pfn)))
R
Robin Murphy 已提交
215 216
		return 0;
#endif
217
	return memblock_is_map_memory(addr);
C
Catalin Marinas 已提交
218 219 220
}
EXPORT_SYMBOL(pfn_valid);

221
static phys_addr_t memory_limit = PHYS_ADDR_MAX;
M
Mark Rutland 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237

/*
 * Limit the memory size that was specified via FDT.
 */
static int __init early_mem(char *p)
{
	if (!p)
		return 1;

	memory_limit = memparse(p, &p) & PAGE_MASK;
	pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);

	return 0;
}
early_param("mem", early_mem);

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
static int __init early_init_dt_scan_usablemem(unsigned long node,
		const char *uname, int depth, void *data)
{
	struct memblock_region *usablemem = data;
	const __be32 *reg;
	int len;

	if (depth != 1 || strcmp(uname, "chosen") != 0)
		return 0;

	reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
	if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
		return 1;

	usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
	usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);

	return 1;
}

static void __init fdt_enforce_memory_region(void)
{
	struct memblock_region reg = {
		.size = 0,
	};

	of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);

	if (reg.size)
		memblock_cap_memory_range(reg.base, reg.size);
}

C
Catalin Marinas 已提交
270 271
void __init arm64_memblock_init(void)
{
272
	const s64 linear_region_size = BIT(vabits_actual - 1);
273

274 275 276
	/* Handle linux,usable-memory-range property */
	fdt_enforce_memory_region();

277 278 279
	/* Remove memory above our supported physical address size */
	memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);

280 281 282 283 284 285 286 287 288 289 290
	/*
	 * Select a suitable value for the base of physical memory.
	 */
	memstart_addr = round_down(memblock_start_of_DRAM(),
				   ARM64_MEMSTART_ALIGN);

	/*
	 * Remove the memory that we will not be able to cover with the
	 * linear mapping. Take care not to clip the kernel which may be
	 * high in memory.
	 */
291 292
	memblock_remove(max_t(u64, memstart_addr + linear_region_size,
			__pa_symbol(_end)), ULLONG_MAX);
293 294 295 296 297 298
	if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
		/* ensure that memstart_addr remains sufficiently aligned */
		memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
					 ARM64_MEMSTART_ALIGN);
		memblock_remove(0, memstart_addr);
	}
299

300 301 302 303 304 305 306 307 308 309
	/*
	 * If we are running with a 52-bit kernel VA config on a system that
	 * does not support it, we have to place the available physical
	 * memory in the 48-bit addressable part of the linear region, i.e.,
	 * we have to move it upward. Since memstart_addr represents the
	 * physical address of PAGE_OFFSET, we have to *subtract* from it.
	 */
	if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
		memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);

310 311 312 313 314
	/*
	 * Apply the memory limit if it was set. Since the kernel may be loaded
	 * high up in memory, add back the kernel region that must be accessible
	 * via the linear mapping.
	 */
315
	if (memory_limit != PHYS_ADDR_MAX) {
316
		memblock_mem_limit_remove_map(memory_limit);
317
		memblock_add(__pa_symbol(_text), (u64)(_end - _text));
318
	}
M
Mark Rutland 已提交
319

320
	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
321 322 323 324 325
		/*
		 * Add back the memory we just removed if it results in the
		 * initrd to become inaccessible via the linear mapping.
		 * Otherwise, this is a no-op
		 */
326
		u64 base = phys_initrd_start & PAGE_MASK;
327
		u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
328 329 330 331 332 333 334 335 336 337 338 339 340

		/*
		 * We can only add back the initrd memory if we don't end up
		 * with more memory than we can address via the linear mapping.
		 * It is up to the bootloader to position the kernel and the
		 * initrd reasonably close to each other (i.e., within 32 GB of
		 * each other) so that all granule/#levels combinations can
		 * always access both.
		 */
		if (WARN(base < memblock_start_of_DRAM() ||
			 base + size > memblock_start_of_DRAM() +
				       linear_region_size,
			"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
341
			phys_initrd_size = 0;
342 343 344 345 346 347 348
		} else {
			memblock_remove(base, size); /* clear MEMBLOCK_ flags */
			memblock_add(base, size);
			memblock_reserve(base, size);
		}
	}

349 350 351 352 353 354 355 356 357 358 359
	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
		extern u16 memstart_offset_seed;
		u64 range = linear_region_size -
			    (memblock_end_of_DRAM() - memblock_start_of_DRAM());

		/*
		 * If the size of the linear region exceeds, by a sufficient
		 * margin, the size of the region that the available physical
		 * memory spans, randomize the linear region as well.
		 */
		if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
360
			range /= ARM64_MEMSTART_ALIGN;
361 362 363 364
			memstart_addr -= ARM64_MEMSTART_ALIGN *
					 ((range * memstart_offset_seed) >> 16);
		}
	}
M
Mark Rutland 已提交
365

366 367 368 369
	/*
	 * Register the kernel text, kernel data, initrd, and initial
	 * pagetables with memblock.
	 */
370
	memblock_reserve(__pa_symbol(_text), _end - _text);
371
	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
372
		/* the generic initrd code expects virtual addresses */
373 374
		initrd_start = __phys_to_virt(phys_initrd_start);
		initrd_end = initrd_start + phys_initrd_size;
375
	}
C
Catalin Marinas 已提交
376

377
	early_init_fdt_scan_reserved_mem();
378

379 380 381 382
	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
		zone_dma_bits = ARM64_ZONE_DMA_BITS;
		arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
	}
383

384
	if (IS_ENABLED(CONFIG_ZONE_DMA32))
385
		arm64_dma32_phys_limit = max_zone_phys(32);
386
	else
387
		arm64_dma32_phys_limit = PHYS_MASK + 1;
388 389 390

	reserve_crashkernel();

391 392
	reserve_elfcorehdr();

393 394
	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;

395
	dma_contiguous_reserve(arm64_dma32_phys_limit);
C
Catalin Marinas 已提交
396 397 398 399 400 401 402 403 404
}

void __init bootmem_init(void)
{
	unsigned long min, max;

	min = PFN_UP(memblock_start_of_DRAM());
	max = PFN_DOWN(memblock_end_of_DRAM());

V
Vladimir Murzin 已提交
405 406
	early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);

407
	max_pfn = max_low_pfn = max;
M
Miles Chen 已提交
408
	min_low_pfn = min;
409 410

	arm64_numa_init();
411 412 413 414 415 416

	/*
	 * must be done after arm64_numa_init() which calls numa_init() to
	 * initialize node_online_map that gets used in hugetlb_cma_reserve()
	 * while allocating required CMA size across online nodes.
	 */
417 418
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
	arm64_hugetlb_cma_reserve();
419 420
#endif

421 422
	dma_pernuma_cma_reserve();

C
Catalin Marinas 已提交
423
	/*
424 425
	 * sparse_init() tries to allocate memory from memblock, so must be
	 * done after the fixed reservations
C
Catalin Marinas 已提交
426 427 428 429
	 */
	sparse_init();
	zone_sizes_init(min, max);

430
	memblock_dump_all();
C
Catalin Marinas 已提交
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
}

#ifndef CONFIG_SPARSEMEM_VMEMMAP
static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
	struct page *start_pg, *end_pg;
	unsigned long pg, pgend;

	/*
	 * Convert start_pfn/end_pfn to a struct page pointer.
	 */
	start_pg = pfn_to_page(start_pfn - 1) + 1;
	end_pg = pfn_to_page(end_pfn - 1) + 1;

	/*
	 * Convert to physical addresses, and round start upwards and end
	 * downwards.
	 */
	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
	pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;

	/*
	 * If there are free pages between these, free the section of the
	 * memmap array.
	 */
	if (pg < pgend)
457
		memblock_free(pg, pgend - pg);
C
Catalin Marinas 已提交
458 459 460 461 462 463 464
}

/*
 * The mem_map array can get very big. Free the unused area of the memory map.
 */
static void __init free_unused_memmap(void)
{
465 466
	unsigned long start, end, prev_end = 0;
	int i;
C
Catalin Marinas 已提交
467

468
	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
C
Catalin Marinas 已提交
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
#ifdef CONFIG_SPARSEMEM
		/*
		 * Take care not to free memmap entries that don't exist due
		 * to SPARSEMEM sections which aren't present.
		 */
		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
#endif
		/*
		 * If we had a previous bank, and there is a space between the
		 * current bank and the previous, free it.
		 */
		if (prev_end && prev_end < start)
			free_memmap(prev_end, start);

		/*
		 * Align up here since the VM subsystem insists that the
		 * memmap entries are valid from the bank end aligned to
		 * MAX_ORDER_NR_PAGES.
		 */
488
		prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
C
Catalin Marinas 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
	}

#ifdef CONFIG_SPARSEMEM
	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
#endif
}
#endif	/* !CONFIG_SPARSEMEM_VMEMMAP */

/*
 * mem_init() marks the free areas in the mem_map and tells us how much memory
 * is free.  This is done after various parts of the system have claimed their
 * memory after the kernel image.
 */
void __init mem_init(void)
{
505
	if (swiotlb_force == SWIOTLB_FORCE ||
506
	    max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
507
		swiotlb_init(1);
508 509
	else
		swiotlb_force = SWIOTLB_NO_FORCE;
510

511
	set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
C
Catalin Marinas 已提交
512 513 514 515

#ifndef CONFIG_SPARSEMEM_VMEMMAP
	free_unused_memmap();
#endif
516
	/* this will put all unused low memory onto the freelists */
517
	memblock_free_all();
C
Catalin Marinas 已提交
518

519
	mem_init_print_info(NULL);
C
Catalin Marinas 已提交
520 521 522 523 524 525

	/*
	 * Check boundaries twice: Some fundamental inconsistencies can be
	 * detected at build time already.
	 */
#ifdef CONFIG_COMPAT
526
	BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
C
Catalin Marinas 已提交
527 528
#endif

529
	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
C
Catalin Marinas 已提交
530 531 532 533 534 535 536 537 538 539 540
		extern int sysctl_overcommit_memory;
		/*
		 * On a machine this small we won't get anywhere without
		 * overcommit, so turn it on by default.
		 */
		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
	}
}

void free_initmem(void)
{
541 542
	free_reserved_area(lm_alias(__init_begin),
			   lm_alias(__init_end),
543
			   POISON_FREE_INITMEM, "unused kernel");
544 545 546 547 548 549
	/*
	 * Unmap the __init region but leave the VM area in place. This
	 * prevents the region from being reused for kernel modules, which
	 * is not supported by kallsyms.
	 */
	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
C
Catalin Marinas 已提交
550 551
}

552
void dump_mem_limit(void)
553
{
554
	if (memory_limit != PHYS_ADDR_MAX) {
555 556 557 558 559
		pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
	} else {
		pr_emerg("Memory Limit: none\n");
	}
}