init.c 18.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
P
Palmer Dabbelt 已提交
2 3
/*
 * Copyright (C) 2012 Regents of the University of California
4
 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
P
Palmer Dabbelt 已提交
5 6 7 8 9
 */

#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memblock.h>
M
Mike Rapoport 已提交
10
#include <linux/initrd.h>
P
Palmer Dabbelt 已提交
11
#include <linux/swap.h>
C
Christoph Hellwig 已提交
12
#include <linux/sizes.h>
13
#include <linux/of_fdt.h>
14
#include <linux/libfdt.h>
Z
Zong Li 已提交
15
#include <linux/set_memory.h>
P
Palmer Dabbelt 已提交
16

17
#include <asm/fixmap.h>
P
Palmer Dabbelt 已提交
18 19
#include <asm/tlbflush.h>
#include <asm/sections.h>
20
#include <asm/soc.h>
P
Palmer Dabbelt 已提交
21
#include <asm/io.h>
Z
Zong Li 已提交
22
#include <asm/ptdump.h>
P
Palmer Dabbelt 已提交
23

24 25
#include "../kernel/head.h"

26 27 28 29
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
							__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);

30
extern char _start[];
A
Anup Patel 已提交
31 32 33
#define DTB_EARLY_BASE_VA      PGDIR_SIZE
void *dtb_early_va __initdata;
uintptr_t dtb_early_pa __initdata;
34

35 36 37 38 39 40 41 42 43
struct pt_alloc_ops {
	pte_t *(*get_pte_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pte)(uintptr_t va);
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pmd)(uintptr_t va);
#endif
};

P
Palmer Dabbelt 已提交
44 45
static void __init zone_sizes_init(void)
{
C
Christoph Hellwig 已提交
46
	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
P
Palmer Dabbelt 已提交
47

48
#ifdef CONFIG_ZONE_DMA32
49 50
	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
			(unsigned long) PFN_PHYS(max_low_pfn)));
51
#endif
C
Christoph Hellwig 已提交
52 53
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;

54
	free_area_init(max_zone_pfns);
P
Palmer Dabbelt 已提交
55 56
}

C
Christoph Hellwig 已提交
57
static void setup_zero_page(void)
P
Palmer Dabbelt 已提交
58 59 60 61
{
	memset((void *)empty_zero_page, 0, PAGE_SIZE);
}

62
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
Y
Yash Shah 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
		  (((t) - (b)) >> 10));
}

static inline void print_mlm(char *name, unsigned long b, unsigned long t)
{
	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
		  (((t) - (b)) >> 20));
}

static void print_vm_layout(void)
{
	pr_notice("Virtual kernel memory layout:\n");
	print_mlk("fixmap", (unsigned long)FIXADDR_START,
		  (unsigned long)FIXADDR_TOP);
	print_mlm("pci io", (unsigned long)PCI_IO_START,
		  (unsigned long)PCI_IO_END);
	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
		  (unsigned long)VMEMMAP_END);
	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
		  (unsigned long)VMALLOC_END);
	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
		  (unsigned long)high_memory);
}
#else
static void print_vm_layout(void) { }
#endif /* CONFIG_DEBUG_VM */

P
Palmer Dabbelt 已提交
93 94 95 96 97 98 99
void __init mem_init(void)
{
#ifdef CONFIG_FLATMEM
	BUG_ON(!mem_map);
#endif /* CONFIG_FLATMEM */

	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
100
	memblock_free_all();
P
Palmer Dabbelt 已提交
101 102

	mem_init_print_info(NULL);
Y
Yash Shah 已提交
103
	print_vm_layout();
P
Palmer Dabbelt 已提交
104 105 106
}

#ifdef CONFIG_BLK_DEV_INITRD
107 108
static void __init setup_initrd(void)
{
109
	phys_addr_t start;
110 111
	unsigned long size;

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	/* Ignore the virtul address computed during device tree parsing */
	initrd_start = initrd_end = 0;

	if (!phys_initrd_size)
		return;
	/*
	 * Round the memory region to page boundaries as per free_initrd_mem()
	 * This allows us to detect whether the pages overlapping the initrd
	 * are in use, but more importantly, reserves the entire set of pages
	 * as we don't want these pages allocated for other purposes.
	 */
	start = round_down(phys_initrd_start, PAGE_SIZE);
	size = phys_initrd_size + (phys_initrd_start - start);
	size = round_up(size, PAGE_SIZE);

	if (!memblock_is_region_memory(start, size)) {
		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
		       (u64)start, size);
130 131
		goto disable;
	}
132 133 134 135

	if (memblock_is_region_reserved(start, size)) {
		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
		       (u64)start, size);
136 137 138
		goto disable;
	}

139 140 141 142
	memblock_reserve(start, size);
	/* Now convert initrd to virtual addresses */
	initrd_start = (unsigned long)__va(phys_initrd_start);
	initrd_end = initrd_start + phys_initrd_size;
143 144 145 146 147 148 149 150 151 152
	initrd_below_start_ok = 1;

	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
		(void *)(initrd_start), size);
	return;
disable:
	pr_cont(" - disabling initrd\n");
	initrd_start = 0;
	initrd_end = 0;
}
P
Palmer Dabbelt 已提交
153
#endif /* CONFIG_BLK_DEV_INITRD */
154 155 156 157 158

void __init setup_bootmem(void)
{
	struct memblock_region *reg;
	phys_addr_t mem_size = 0;
159 160
	phys_addr_t total_mem = 0;
	phys_addr_t mem_start, end = 0;
161 162
	phys_addr_t vmlinux_end = __pa_symbol(&_end);
	phys_addr_t vmlinux_start = __pa_symbol(&_start);
163 164 165

	/* Find the memory region containing the kernel */
	for_each_memblock(memory, reg) {
166 167 168 169 170 171
		end = reg->base + reg->size;
		if (!total_mem)
			mem_start = reg->base;
		if (reg->base <= vmlinux_start && vmlinux_end <= end)
			BUG_ON(reg->size == 0);
		total_mem = total_mem + reg->size;
172
	}
173 174 175 176 177 178 179 180 181

	/*
	 * Remove memblock from the end of usable area to the
	 * end of region
	 */
	mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
	if (mem_start + mem_size < end)
		memblock_remove(mem_start + mem_size,
				end - mem_start - mem_size);
182

183 184 185
	/* Reserve from the start of the kernel to the end of the kernel */
	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);

186 187
	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
	max_low_pfn = max_pfn;
188
	set_max_mapnr(max_low_pfn);
189 190 191 192 193

#ifdef CONFIG_BLK_DEV_INITRD
	setup_initrd();
#endif /* CONFIG_BLK_DEV_INITRD */

194 195 196 197 198 199
	/*
	 * Avoid using early_init_fdt_reserve_self() since __pa() does
	 * not work for DTB pointers that are fixmap addresses
	 */
	memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));

200 201 202 203 204 205 206 207 208 209 210 211 212
	early_init_fdt_scan_reserved_mem();
	memblock_allow_resize();
	memblock_dump_all();

	for_each_memblock(memory, reg) {
		unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
		unsigned long end_pfn = memblock_region_memory_end_pfn(reg);

		memblock_set_node(PFN_PHYS(start_pfn),
				  PFN_PHYS(end_pfn - start_pfn),
				  &memblock.memory, 0);
	}
}
213

C
Christoph Hellwig 已提交
214
#ifdef CONFIG_MMU
215 216
static struct pt_alloc_ops pt_ops;

217 218 219 220 221
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);

222
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
223 224
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
225

226
#define MAX_EARLY_MAPPING_SIZE	SZ_128M
227

228
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
	unsigned long addr = __fix_to_virt(idx);
	pte_t *ptep;

	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);

	ptep = &fixmap_pte[pte_index(addr)];

	if (pgprot_val(prot)) {
		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
	} else {
		pte_clear(&init_mm, addr, ptep);
		local_flush_tlb_page(addr);
	}
}

247
static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
248
{
249
	return (pte_t *)((uintptr_t)pa);
250 251
}

252 253 254 255 256 257 258 259 260 261 262 263
static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
{
	clear_fixmap(FIX_PTE);
	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}

static inline pte_t *get_pte_virt_late(phys_addr_t pa)
{
	return (pte_t *) __va(pa);
}

static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
264 265 266 267 268
{
	/*
	 * We only create PMD or PGD early mappings so we
	 * should never reach here with MMU disabled.
	 */
269 270
	BUG();
}
271

272 273
static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
{
274 275 276
	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}

277 278 279 280 281 282 283 284 285 286
static phys_addr_t alloc_pte_late(uintptr_t va)
{
	unsigned long vaddr;

	vaddr = __get_free_page(GFP_KERNEL);
	if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
		BUG();
	return __pa(vaddr);
}

287 288 289 290
static void __init create_pte_mapping(pte_t *ptep,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
291
	uintptr_t pte_idx = pte_index(va);
292 293 294

	BUG_ON(sz != PAGE_SIZE);

295 296
	if (pte_none(ptep[pte_idx]))
		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
297 298 299 300 301 302 303 304 305 306 307 308 309 310
}

#ifndef __PAGETABLE_PMD_FOLDED

pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;

#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
#define NUM_EARLY_PMDS		1UL
#else
#define NUM_EARLY_PMDS		(1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
#endif
pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);

311
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
312
{
313 314
	/* Before MMU is enabled */
	return (pmd_t *)((uintptr_t)pa);
315 316
}

317
static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
318
{
319 320 321 322 323 324 325 326
	clear_fixmap(FIX_PMD);
	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}

static pmd_t *get_pmd_virt_late(phys_addr_t pa)
{
	return (pmd_t *) __va(pa);
}
327

328 329 330
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
	uintptr_t pmd_num;
331 332 333 334 335 336

	pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
	BUG_ON(pmd_num >= NUM_EARLY_PMDS);
	return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
}

337 338 339 340 341 342 343 344 345 346 347 348 349 350
static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
{
	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}

static phys_addr_t alloc_pmd_late(uintptr_t va)
{
	unsigned long vaddr;

	vaddr = __get_free_page(GFP_KERNEL);
	BUG_ON(!vaddr);
	return __pa(vaddr);
}

351 352 353 354 355 356
static void __init create_pmd_mapping(pmd_t *pmdp,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
	pte_t *ptep;
	phys_addr_t pte_phys;
357
	uintptr_t pmd_idx = pmd_index(va);
358 359

	if (sz == PMD_SIZE) {
360 361
		if (pmd_none(pmdp[pmd_idx]))
			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
362 363 364
		return;
	}

365
	if (pmd_none(pmdp[pmd_idx])) {
366
		pte_phys = pt_ops.alloc_pte(va);
367
		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
368
		ptep = pt_ops.get_pte_virt(pte_phys);
369 370
		memset(ptep, 0, PAGE_SIZE);
	} else {
371
		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
372
		ptep = pt_ops.get_pte_virt(pte_phys);
373 374 375 376 377 378
	}

	create_pte_mapping(ptep, va, pa, sz, prot);
}

#define pgd_next_t		pmd_t
379 380
#define alloc_pgd_next(__va)	pt_ops.alloc_pmd(__va)
#define get_pgd_next_virt(__pa)	pt_ops.get_pmd_virt(__pa)
381 382 383 384 385
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next		fixmap_pmd
#else
#define pgd_next_t		pte_t
386 387
#define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
#define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
388 389 390 391 392 393 394 395 396 397 398
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next		fixmap_pte
#endif

static void __init create_pgd_mapping(pgd_t *pgdp,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
	pgd_next_t *nextp;
	phys_addr_t next_phys;
399
	uintptr_t pgd_idx = pgd_index(va);
400 401

	if (sz == PGDIR_SIZE) {
402 403
		if (pgd_val(pgdp[pgd_idx]) == 0)
			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
404 405 406
		return;
	}

407
	if (pgd_val(pgdp[pgd_idx]) == 0) {
408
		next_phys = alloc_pgd_next(va);
409
		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
410 411 412
		nextp = get_pgd_next_virt(next_phys);
		memset(nextp, 0, PAGE_SIZE);
	} else {
413
		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
414 415 416 417 418 419 420 421
		nextp = get_pgd_next_virt(next_phys);
	}

	create_pgd_next_mapping(nextp, va, pa, sz, prot);
}

static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
{
422 423 424
	/* Upgrade to PMD_SIZE mappings whenever possible */
	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
		return PAGE_SIZE;
425

426
	return PMD_SIZE;
427 428
}

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
/*
 * setup_vm() is called from head.S with MMU-off.
 *
 * Following requirements should be honoured for setup_vm() to work
 * correctly:
 * 1) It should use PC-relative addressing for accessing kernel symbols.
 *    To achieve this we always use GCC cmodel=medany.
 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
 *    so disable compiler instrumentation when FTRACE is enabled.
 *
 * Currently, the above requirements are honoured by using custom CFLAGS
 * for init.o in mm/Makefile.
 */

#ifndef __riscv_cmodel_medany
444
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
445 446
#endif

447
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
448
{
A
Anup Patel 已提交
449
	uintptr_t va, pa, end_va;
450 451 452
	uintptr_t load_pa = (uintptr_t)(&_start);
	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
	uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
A
Atish Patra 已提交
453 454 455
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
456 457 458

	va_pa_offset = PAGE_OFFSET - load_pa;
	pfn_base = PFN_DOWN(load_pa);
459

460 461 462 463 464
	/*
	 * Enforce boot alignment requirements of RV32 and
	 * RV64 by only allowing PMD or PGD mappings.
	 */
	BUG_ON(map_size == PAGE_SIZE);
465 466 467

	/* Sanity check alignment and size */
	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
468 469 470
	BUG_ON((load_pa % map_size) != 0);
	BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);

471 472 473 474 475 476
	pt_ops.alloc_pte = alloc_pte_early;
	pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_early;
	pt_ops.get_pmd_virt = get_pmd_virt_early;
#endif
477 478 479
	/* Setup early PGD for fixmap */
	create_pgd_mapping(early_pg_dir, FIXADDR_START,
			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
480 481

#ifndef __PAGETABLE_PMD_FOLDED
482 483 484 485 486 487 488 489 490 491 492 493 494
	/* Setup fixmap PMD */
	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
	/* Setup trampoline PGD and PMD */
	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
#else
	/* Setup trampoline PGD */
	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif
495

496 497 498 499 500 501 502 503 504 505 506
	/*
	 * Setup early PGD covering entire kernel which will allows
	 * us to reach paging_init(). We map all memory banks later
	 * in setup_vm_final() below.
	 */
	end_va = PAGE_OFFSET + load_sz;
	for (va = PAGE_OFFSET; va < end_va; va += map_size)
		create_pgd_mapping(early_pg_dir, va,
				   load_pa + (va - PAGE_OFFSET),
				   map_size, PAGE_KERNEL_EXEC);

A
Anup Patel 已提交
507 508 509 510 511 512 513
	/* Create two consecutive PGD mappings for FDT early scan */
	pa = dtb_pa & ~(PGDIR_SIZE - 1);
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
			   pa, PGDIR_SIZE, PAGE_KERNEL);
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
			   pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
514
	dtb_early_pa = dtb_pa;
A
Atish Patra 已提交
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544

	/*
	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
	 * range can not span multiple pmds.
	 */
	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));

#ifndef __PAGETABLE_PMD_FOLDED
	/*
	 * Early ioremap fixmap is already created as it lies within first 2MB
	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
	 * the user if not.
	 */
	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
		WARN_ON(1);
		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
			fix_to_virt(FIX_BTMAP_BEGIN));
		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
			fix_to_virt(FIX_BTMAP_END));

		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
	}
#endif
545
}
546

547 548 549 550 551 552
static void __init setup_vm_final(void)
{
	uintptr_t va, map_size;
	phys_addr_t pa, start, end;
	struct memblock_region *reg;

553 554 555 556 557 558 559 560 561 562
	/**
	 * MMU is enabled at this point. But page table setup is not complete yet.
	 * fixmap page table alloc functions should be used at this point
	 */
	pt_ops.alloc_pte = alloc_pte_fixmap;
	pt_ops.get_pte_virt = get_pte_virt_fixmap;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_fixmap;
	pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
#endif
563 564
	/* Setup swapper PGD for fixmap */
	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
565
			   __pa_symbol(fixmap_pgd_next),
566
			   PGDIR_SIZE, PAGE_TABLE);
567

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
	/* Map all memory banks */
	for_each_memblock(memory, reg) {
		start = reg->base;
		end = start + reg->size;

		if (start >= end)
			break;
		if (memblock_is_nomap(reg))
			continue;
		if (start <= __pa(PAGE_OFFSET) &&
		    __pa(PAGE_OFFSET) < end)
			start = __pa(PAGE_OFFSET);

		map_size = best_map_size(start, end - start);
		for (pa = start; pa < end; pa += map_size) {
			va = (uintptr_t)__va(pa);
			create_pgd_mapping(swapper_pg_dir, va, pa,
					   map_size, PAGE_KERNEL_EXEC);
		}
587
	}
588

589 590 591 592 593
	/* Clear fixmap PTE and PMD mappings */
	clear_fixmap(FIX_PTE);
	clear_fixmap(FIX_PMD);

	/* Move to swapper page table */
594
	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
595
	local_flush_tlb_all();
596 597 598 599 600 601 602 603

	/* generic page allocation functions must be used to setup page table */
	pt_ops.alloc_pte = alloc_pte_late;
	pt_ops.get_pte_virt = get_pte_virt_late;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_late;
	pt_ops.get_pmd_virt = get_pmd_virt_late;
#endif
604
}
C
Christoph Hellwig 已提交
605 606 607
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
608 609 610 611 612 613 614
#ifdef CONFIG_BUILTIN_DTB
	dtb_early_va = soc_lookup_builtin_dtb();
	if (!dtb_early_va) {
		/* Fallback to first available DTS */
		dtb_early_va = (void *) __dtb_start;
	}
#else
C
Christoph Hellwig 已提交
615
	dtb_early_va = (void *)dtb_pa;
616
#endif
A
Anup Patel 已提交
617
	dtb_early_pa = dtb_pa;
C
Christoph Hellwig 已提交
618 619 620 621 622 623
}

static inline void setup_vm_final(void)
{
}
#endif /* CONFIG_MMU */
624

Z
Zong Li 已提交
625 626 627 628 629 630 631 632 633 634 635 636 637
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
	unsigned long text_start = (unsigned long)_text;
	unsigned long text_end = (unsigned long)_etext;
	unsigned long rodata_start = (unsigned long)__start_rodata;
	unsigned long data_start = (unsigned long)_data;
	unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));

	set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
	set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
Z
Zong Li 已提交
638 639

	debug_checkwx();
Z
Zong Li 已提交
640 641 642
}
#endif

Z
Zong Li 已提交
643
static void __init resource_init(void)
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
{
	struct memblock_region *region;

	for_each_memblock(memory, region) {
		struct resource *res;

		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
		if (!res)
			panic("%s: Failed to allocate %zu bytes\n", __func__,
			      sizeof(struct resource));

		if (memblock_is_nomap(region)) {
			res->name = "reserved";
			res->flags = IORESOURCE_MEM;
		} else {
			res->name = "System RAM";
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
		}
		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;

		request_resource(&iomem_resource, res);
	}
}

669 670 671
void __init paging_init(void)
{
	setup_vm_final();
L
Logan Gunthorpe 已提交
672
	sparse_init();
673 674
	setup_zero_page();
	zone_sizes_init();
675
	resource_init();
676
}
L
Logan Gunthorpe 已提交
677

678
#ifdef CONFIG_SPARSEMEM_VMEMMAP
L
Logan Gunthorpe 已提交
679 680 681
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
			       struct vmem_altmap *altmap)
{
682
	return vmemmap_populate_basepages(start, end, node, NULL);
L
Logan Gunthorpe 已提交
683 684
}
#endif