init.c 17.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
P
Palmer Dabbelt 已提交
2 3
/*
 * Copyright (C) 2012 Regents of the University of California
4
 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
P
Palmer Dabbelt 已提交
5 6 7 8 9
 */

#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memblock.h>
M
Mike Rapoport 已提交
10
#include <linux/initrd.h>
P
Palmer Dabbelt 已提交
11
#include <linux/swap.h>
C
Christoph Hellwig 已提交
12
#include <linux/sizes.h>
13
#include <linux/of_fdt.h>
14
#include <linux/libfdt.h>
Z
Zong Li 已提交
15
#include <linux/set_memory.h>
P
Palmer Dabbelt 已提交
16

17
#include <asm/fixmap.h>
P
Palmer Dabbelt 已提交
18 19
#include <asm/tlbflush.h>
#include <asm/sections.h>
20
#include <asm/soc.h>
P
Palmer Dabbelt 已提交
21
#include <asm/io.h>
Z
Zong Li 已提交
22
#include <asm/ptdump.h>
P
Palmer Dabbelt 已提交
23

24 25
#include "../kernel/head.h"

26 27 28 29
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
							__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);

30
extern char _start[];
A
Anup Patel 已提交
31 32 33
#define DTB_EARLY_BASE_VA      PGDIR_SIZE
void *dtb_early_va __initdata;
uintptr_t dtb_early_pa __initdata;
34

35 36 37 38 39 40 41 42
struct pt_alloc_ops {
	pte_t *(*get_pte_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pte)(uintptr_t va);
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pmd)(uintptr_t va);
#endif
};
43

P
Palmer Dabbelt 已提交
44 45
static void __init zone_sizes_init(void)
{
C
Christoph Hellwig 已提交
46
	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
P
Palmer Dabbelt 已提交
47

48
#ifdef CONFIG_ZONE_DMA32
49 50
	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
			(unsigned long) PFN_PHYS(max_low_pfn)));
51
#endif
C
Christoph Hellwig 已提交
52 53
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;

54
	free_area_init(max_zone_pfns);
P
Palmer Dabbelt 已提交
55 56
}

C
Christoph Hellwig 已提交
57
static void setup_zero_page(void)
P
Palmer Dabbelt 已提交
58 59 60 61
{
	memset((void *)empty_zero_page, 0, PAGE_SIZE);
}

62
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
Y
Yash Shah 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
		  (((t) - (b)) >> 10));
}

static inline void print_mlm(char *name, unsigned long b, unsigned long t)
{
	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
		  (((t) - (b)) >> 20));
}

static void print_vm_layout(void)
{
	pr_notice("Virtual kernel memory layout:\n");
	print_mlk("fixmap", (unsigned long)FIXADDR_START,
		  (unsigned long)FIXADDR_TOP);
	print_mlm("pci io", (unsigned long)PCI_IO_START,
		  (unsigned long)PCI_IO_END);
	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
		  (unsigned long)VMEMMAP_END);
	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
		  (unsigned long)VMALLOC_END);
	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
		  (unsigned long)high_memory);
}
#else
static void print_vm_layout(void) { }
#endif /* CONFIG_DEBUG_VM */

P
Palmer Dabbelt 已提交
93 94 95 96 97 98 99
void __init mem_init(void)
{
#ifdef CONFIG_FLATMEM
	BUG_ON(!mem_map);
#endif /* CONFIG_FLATMEM */

	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
100
	memblock_free_all();
P
Palmer Dabbelt 已提交
101 102

	mem_init_print_info(NULL);
Y
Yash Shah 已提交
103
	print_vm_layout();
P
Palmer Dabbelt 已提交
104 105 106
}

#ifdef CONFIG_BLK_DEV_INITRD
107 108
static void __init setup_initrd(void)
{
109
	phys_addr_t start;
110 111
	unsigned long size;

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	/* Ignore the virtul address computed during device tree parsing */
	initrd_start = initrd_end = 0;

	if (!phys_initrd_size)
		return;
	/*
	 * Round the memory region to page boundaries as per free_initrd_mem()
	 * This allows us to detect whether the pages overlapping the initrd
	 * are in use, but more importantly, reserves the entire set of pages
	 * as we don't want these pages allocated for other purposes.
	 */
	start = round_down(phys_initrd_start, PAGE_SIZE);
	size = phys_initrd_size + (phys_initrd_start - start);
	size = round_up(size, PAGE_SIZE);

	if (!memblock_is_region_memory(start, size)) {
		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
		       (u64)start, size);
130 131
		goto disable;
	}
132 133 134 135

	if (memblock_is_region_reserved(start, size)) {
		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
		       (u64)start, size);
136 137 138
		goto disable;
	}

139 140 141 142
	memblock_reserve(start, size);
	/* Now convert initrd to virtual addresses */
	initrd_start = (unsigned long)__va(phys_initrd_start);
	initrd_end = initrd_start + phys_initrd_size;
143 144 145 146 147 148 149 150 151 152
	initrd_below_start_ok = 1;

	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
		(void *)(initrd_start), size);
	return;
disable:
	pr_cont(" - disabling initrd\n");
	initrd_start = 0;
	initrd_end = 0;
}
P
Palmer Dabbelt 已提交
153
#endif /* CONFIG_BLK_DEV_INITRD */
154 155 156

void __init setup_bootmem(void)
{
157 158
	phys_addr_t mem_start = 0;
	phys_addr_t start, end = 0;
159 160
	phys_addr_t vmlinux_end = __pa_symbol(&_end);
	phys_addr_t vmlinux_start = __pa_symbol(&_start);
161
	u64 i;
162 163

	/* Find the memory region containing the kernel */
164 165
	for_each_mem_range(i, &start, &end) {
		phys_addr_t size = end - start;
166
		if (!mem_start)
167 168 169
			mem_start = start;
		if (start <= vmlinux_start && vmlinux_end <= end)
			BUG_ON(size == 0);
170
	}
171 172

	/*
173 174 175
	 * The maximal physical memory size is -PAGE_OFFSET.
	 * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
	 * as it is unusable by kernel.
176
	 */
177
	memblock_enforce_memory_limit(mem_start - PAGE_OFFSET);
178

179 180 181
	/* Reserve from the start of the kernel to the end of the kernel */
	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);

182 183
	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
	max_low_pfn = max_pfn;
184
	set_max_mapnr(max_low_pfn);
185 186 187 188 189

#ifdef CONFIG_BLK_DEV_INITRD
	setup_initrd();
#endif /* CONFIG_BLK_DEV_INITRD */

190 191 192 193 194 195
	/*
	 * Avoid using early_init_fdt_reserve_self() since __pa() does
	 * not work for DTB pointers that are fixmap addresses
	 */
	memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));

196 197 198 199
	early_init_fdt_scan_reserved_mem();
	memblock_allow_resize();
	memblock_dump_all();
}
200

C
Christoph Hellwig 已提交
201
#ifdef CONFIG_MMU
202 203
static struct pt_alloc_ops pt_ops;

204 205 206 207 208
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);

209
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
210 211
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
212

213
#define MAX_EARLY_MAPPING_SIZE	SZ_128M
214

215
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
216 217 218 219 220 221 222 223 224 225

void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
	unsigned long addr = __fix_to_virt(idx);
	pte_t *ptep;

	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);

	ptep = &fixmap_pte[pte_index(addr)];

226
	if (pgprot_val(prot))
227
		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
228
	else
229
		pte_clear(&init_mm, addr, ptep);
230
	local_flush_tlb_page(addr);
231 232
}

233
static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
234
{
235
	return (pte_t *)((uintptr_t)pa);
236 237
}

238 239 240 241 242 243 244 245 246 247 248 249
static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
{
	clear_fixmap(FIX_PTE);
	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}

static inline pte_t *get_pte_virt_late(phys_addr_t pa)
{
	return (pte_t *) __va(pa);
}

static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
250 251 252 253 254
{
	/*
	 * We only create PMD or PGD early mappings so we
	 * should never reach here with MMU disabled.
	 */
255 256
	BUG();
}
257

258 259
static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
{
260 261 262
	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}

263 264 265 266 267 268 269 270 271 272
static phys_addr_t alloc_pte_late(uintptr_t va)
{
	unsigned long vaddr;

	vaddr = __get_free_page(GFP_KERNEL);
	if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
		BUG();
	return __pa(vaddr);
}

273 274 275 276
static void __init create_pte_mapping(pte_t *ptep,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
277
	uintptr_t pte_idx = pte_index(va);
278 279 280

	BUG_ON(sz != PAGE_SIZE);

281 282
	if (pte_none(ptep[pte_idx]))
		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
283 284 285 286 287 288 289 290 291 292 293 294 295 296
}

#ifndef __PAGETABLE_PMD_FOLDED

pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;

#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
#define NUM_EARLY_PMDS		1UL
#else
#define NUM_EARLY_PMDS		(1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
#endif
pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);

297
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
298
{
299 300
	/* Before MMU is enabled */
	return (pmd_t *)((uintptr_t)pa);
301 302
}

303
static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
304
{
305 306 307
	clear_fixmap(FIX_PMD);
	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}
308

309 310 311 312
static pmd_t *get_pmd_virt_late(phys_addr_t pa)
{
	return (pmd_t *) __va(pa);
}
313

314 315 316
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
	uintptr_t pmd_num;
317 318 319 320 321 322

	pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
	BUG_ON(pmd_num >= NUM_EARLY_PMDS);
	return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336
static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
{
	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}

static phys_addr_t alloc_pmd_late(uintptr_t va)
{
	unsigned long vaddr;

	vaddr = __get_free_page(GFP_KERNEL);
	BUG_ON(!vaddr);
	return __pa(vaddr);
}

337 338 339 340 341 342
static void __init create_pmd_mapping(pmd_t *pmdp,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
	pte_t *ptep;
	phys_addr_t pte_phys;
343
	uintptr_t pmd_idx = pmd_index(va);
344 345

	if (sz == PMD_SIZE) {
346 347
		if (pmd_none(pmdp[pmd_idx]))
			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
348 349 350
		return;
	}

351
	if (pmd_none(pmdp[pmd_idx])) {
352
		pte_phys = pt_ops.alloc_pte(va);
353
		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
354
		ptep = pt_ops.get_pte_virt(pte_phys);
355 356
		memset(ptep, 0, PAGE_SIZE);
	} else {
357
		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
358
		ptep = pt_ops.get_pte_virt(pte_phys);
359 360 361 362 363 364
	}

	create_pte_mapping(ptep, va, pa, sz, prot);
}

#define pgd_next_t		pmd_t
365 366
#define alloc_pgd_next(__va)	pt_ops.alloc_pmd(__va)
#define get_pgd_next_virt(__pa)	pt_ops.get_pmd_virt(__pa)
367 368 369 370 371
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next		fixmap_pmd
#else
#define pgd_next_t		pte_t
372 373
#define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
#define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
374 375 376 377 378
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next		fixmap_pte
#endif

A
Atish Patra 已提交
379
void __init create_pgd_mapping(pgd_t *pgdp,
380 381 382 383 384
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
	pgd_next_t *nextp;
	phys_addr_t next_phys;
385
	uintptr_t pgd_idx = pgd_index(va);
386 387

	if (sz == PGDIR_SIZE) {
388 389
		if (pgd_val(pgdp[pgd_idx]) == 0)
			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
390 391 392
		return;
	}

393
	if (pgd_val(pgdp[pgd_idx]) == 0) {
394
		next_phys = alloc_pgd_next(va);
395
		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
396 397 398
		nextp = get_pgd_next_virt(next_phys);
		memset(nextp, 0, PAGE_SIZE);
	} else {
399
		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
400 401 402 403 404 405 406 407
		nextp = get_pgd_next_virt(next_phys);
	}

	create_pgd_next_mapping(nextp, va, pa, sz, prot);
}

static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
{
408 409 410
	/* Upgrade to PMD_SIZE mappings whenever possible */
	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
		return PAGE_SIZE;
411

412
	return PMD_SIZE;
413 414
}

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
/*
 * setup_vm() is called from head.S with MMU-off.
 *
 * Following requirements should be honoured for setup_vm() to work
 * correctly:
 * 1) It should use PC-relative addressing for accessing kernel symbols.
 *    To achieve this we always use GCC cmodel=medany.
 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
 *    so disable compiler instrumentation when FTRACE is enabled.
 *
 * Currently, the above requirements are honoured by using custom CFLAGS
 * for init.o in mm/Makefile.
 */

#ifndef __riscv_cmodel_medany
430
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
431 432
#endif

433
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
434
{
A
Anup Patel 已提交
435
	uintptr_t va, pa, end_va;
436 437 438
	uintptr_t load_pa = (uintptr_t)(&_start);
	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
	uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
A
Atish Patra 已提交
439 440 441
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
442 443 444

	va_pa_offset = PAGE_OFFSET - load_pa;
	pfn_base = PFN_DOWN(load_pa);
445

446 447 448 449 450
	/*
	 * Enforce boot alignment requirements of RV32 and
	 * RV64 by only allowing PMD or PGD mappings.
	 */
	BUG_ON(map_size == PAGE_SIZE);
451 452 453

	/* Sanity check alignment and size */
	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
454 455 456
	BUG_ON((load_pa % map_size) != 0);
	BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);

457 458 459 460 461 462
	pt_ops.alloc_pte = alloc_pte_early;
	pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_early;
	pt_ops.get_pmd_virt = get_pmd_virt_early;
#endif
463 464 465
	/* Setup early PGD for fixmap */
	create_pgd_mapping(early_pg_dir, FIXADDR_START,
			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
466 467

#ifndef __PAGETABLE_PMD_FOLDED
468 469 470 471 472 473 474 475 476 477 478 479 480
	/* Setup fixmap PMD */
	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
	/* Setup trampoline PGD and PMD */
	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
#else
	/* Setup trampoline PGD */
	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif
481

482 483 484 485 486 487 488 489 490 491 492
	/*
	 * Setup early PGD covering entire kernel which will allows
	 * us to reach paging_init(). We map all memory banks later
	 * in setup_vm_final() below.
	 */
	end_va = PAGE_OFFSET + load_sz;
	for (va = PAGE_OFFSET; va < end_va; va += map_size)
		create_pgd_mapping(early_pg_dir, va,
				   load_pa + (va - PAGE_OFFSET),
				   map_size, PAGE_KERNEL_EXEC);

A
Anup Patel 已提交
493 494 495 496 497 498 499
	/* Create two consecutive PGD mappings for FDT early scan */
	pa = dtb_pa & ~(PGDIR_SIZE - 1);
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
			   pa, PGDIR_SIZE, PAGE_KERNEL);
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
			   pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
500
	dtb_early_pa = dtb_pa;
A
Atish Patra 已提交
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530

	/*
	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
	 * range can not span multiple pmds.
	 */
	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));

#ifndef __PAGETABLE_PMD_FOLDED
	/*
	 * Early ioremap fixmap is already created as it lies within first 2MB
	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
	 * the user if not.
	 */
	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
		WARN_ON(1);
		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
			fix_to_virt(FIX_BTMAP_BEGIN));
		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
			fix_to_virt(FIX_BTMAP_END));

		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
	}
#endif
531
}
532

533 534 535 536
static void __init setup_vm_final(void)
{
	uintptr_t va, map_size;
	phys_addr_t pa, start, end;
537
	u64 i;
538

539 540 541 542 543 544 545 546 547 548
	/**
	 * MMU is enabled at this point. But page table setup is not complete yet.
	 * fixmap page table alloc functions should be used at this point
	 */
	pt_ops.alloc_pte = alloc_pte_fixmap;
	pt_ops.get_pte_virt = get_pte_virt_fixmap;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_fixmap;
	pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
#endif
549 550
	/* Setup swapper PGD for fixmap */
	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
551
			   __pa_symbol(fixmap_pgd_next),
552
			   PGDIR_SIZE, PAGE_TABLE);
553

554
	/* Map all memory banks */
555
	for_each_mem_range(i, &start, &end) {
556 557 558 559 560 561 562 563 564 565 566 567
		if (start >= end)
			break;
		if (start <= __pa(PAGE_OFFSET) &&
		    __pa(PAGE_OFFSET) < end)
			start = __pa(PAGE_OFFSET);

		map_size = best_map_size(start, end - start);
		for (pa = start; pa < end; pa += map_size) {
			va = (uintptr_t)__va(pa);
			create_pgd_mapping(swapper_pg_dir, va, pa,
					   map_size, PAGE_KERNEL_EXEC);
		}
568
	}
569

570 571 572 573 574
	/* Clear fixmap PTE and PMD mappings */
	clear_fixmap(FIX_PTE);
	clear_fixmap(FIX_PMD);

	/* Move to swapper page table */
575
	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
576
	local_flush_tlb_all();
577 578 579 580 581 582 583 584

	/* generic page allocation functions must be used to setup page table */
	pt_ops.alloc_pte = alloc_pte_late;
	pt_ops.get_pte_virt = get_pte_virt_late;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_late;
	pt_ops.get_pmd_virt = get_pmd_virt_late;
#endif
585
}
C
Christoph Hellwig 已提交
586 587 588
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
589 590 591 592 593 594 595
#ifdef CONFIG_BUILTIN_DTB
	dtb_early_va = soc_lookup_builtin_dtb();
	if (!dtb_early_va) {
		/* Fallback to first available DTS */
		dtb_early_va = (void *) __dtb_start;
	}
#else
C
Christoph Hellwig 已提交
596
	dtb_early_va = (void *)dtb_pa;
597
#endif
598
	dtb_early_pa = dtb_pa;
C
Christoph Hellwig 已提交
599 600 601 602 603 604
}

static inline void setup_vm_final(void)
{
}
#endif /* CONFIG_MMU */
605

Z
Zong Li 已提交
606 607 608 609 610 611 612 613 614 615 616 617 618
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
	unsigned long text_start = (unsigned long)_text;
	unsigned long text_end = (unsigned long)_etext;
	unsigned long rodata_start = (unsigned long)__start_rodata;
	unsigned long data_start = (unsigned long)_data;
	unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));

	set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
	set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
Z
Zong Li 已提交
619 620

	debug_checkwx();
Z
Zong Li 已提交
621 622 623
}
#endif

Z
Zong Li 已提交
624
static void __init resource_init(void)
625 626 627
{
	struct memblock_region *region;

628
	for_each_mem_region(region) {
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
		struct resource *res;

		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
		if (!res)
			panic("%s: Failed to allocate %zu bytes\n", __func__,
			      sizeof(struct resource));

		if (memblock_is_nomap(region)) {
			res->name = "reserved";
			res->flags = IORESOURCE_MEM;
		} else {
			res->name = "System RAM";
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
		}
		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;

		request_resource(&iomem_resource, res);
	}
}

650 651 652
void __init paging_init(void)
{
	setup_vm_final();
L
Logan Gunthorpe 已提交
653
	sparse_init();
654 655
	setup_zero_page();
	zone_sizes_init();
656
	resource_init();
657
}
L
Logan Gunthorpe 已提交
658

659
#ifdef CONFIG_SPARSEMEM_VMEMMAP
L
Logan Gunthorpe 已提交
660 661 662
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
			       struct vmem_altmap *altmap)
{
663
	return vmemmap_populate_basepages(start, end, node, NULL);
L
Logan Gunthorpe 已提交
664 665
}
#endif