init.c 17.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
P
Palmer Dabbelt 已提交
2 3
/*
 * Copyright (C) 2012 Regents of the University of California
4
 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
P
Palmer Dabbelt 已提交
5 6 7 8 9
 */

#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memblock.h>
M
Mike Rapoport 已提交
10
#include <linux/initrd.h>
P
Palmer Dabbelt 已提交
11
#include <linux/swap.h>
C
Christoph Hellwig 已提交
12
#include <linux/sizes.h>
13
#include <linux/of_fdt.h>
14
#include <linux/libfdt.h>
Z
Zong Li 已提交
15
#include <linux/set_memory.h>
P
Palmer Dabbelt 已提交
16

17
#include <asm/fixmap.h>
P
Palmer Dabbelt 已提交
18 19
#include <asm/tlbflush.h>
#include <asm/sections.h>
20
#include <asm/soc.h>
P
Palmer Dabbelt 已提交
21
#include <asm/io.h>
Z
Zong Li 已提交
22
#include <asm/ptdump.h>
P
Palmer Dabbelt 已提交
23

24 25
#include "../kernel/head.h"

26 27 28 29
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
							__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);

30
extern char _start[];
A
Anup Patel 已提交
31 32 33
#define DTB_EARLY_BASE_VA      PGDIR_SIZE
void *dtb_early_va __initdata;
uintptr_t dtb_early_pa __initdata;
34

35 36 37 38 39 40 41 42
struct pt_alloc_ops {
	pte_t *(*get_pte_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pte)(uintptr_t va);
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pmd)(uintptr_t va);
#endif
};
43

P
Palmer Dabbelt 已提交
44 45
static void __init zone_sizes_init(void)
{
C
Christoph Hellwig 已提交
46
	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
P
Palmer Dabbelt 已提交
47

48
#ifdef CONFIG_ZONE_DMA32
49 50
	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
			(unsigned long) PFN_PHYS(max_low_pfn)));
51
#endif
C
Christoph Hellwig 已提交
52 53
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;

54
	free_area_init(max_zone_pfns);
P
Palmer Dabbelt 已提交
55 56
}

C
Christoph Hellwig 已提交
57
static void setup_zero_page(void)
P
Palmer Dabbelt 已提交
58 59 60 61
{
	memset((void *)empty_zero_page, 0, PAGE_SIZE);
}

62
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
Y
Yash Shah 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
		  (((t) - (b)) >> 10));
}

static inline void print_mlm(char *name, unsigned long b, unsigned long t)
{
	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
		  (((t) - (b)) >> 20));
}

static void print_vm_layout(void)
{
	pr_notice("Virtual kernel memory layout:\n");
	print_mlk("fixmap", (unsigned long)FIXADDR_START,
		  (unsigned long)FIXADDR_TOP);
	print_mlm("pci io", (unsigned long)PCI_IO_START,
		  (unsigned long)PCI_IO_END);
	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
		  (unsigned long)VMEMMAP_END);
	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
		  (unsigned long)VMALLOC_END);
	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
		  (unsigned long)high_memory);
}
#else
static void print_vm_layout(void) { }
#endif /* CONFIG_DEBUG_VM */

P
Palmer Dabbelt 已提交
93 94 95 96 97 98 99
void __init mem_init(void)
{
#ifdef CONFIG_FLATMEM
	BUG_ON(!mem_map);
#endif /* CONFIG_FLATMEM */

	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
100
	memblock_free_all();
P
Palmer Dabbelt 已提交
101 102

	mem_init_print_info(NULL);
Y
Yash Shah 已提交
103
	print_vm_layout();
P
Palmer Dabbelt 已提交
104 105 106
}

#ifdef CONFIG_BLK_DEV_INITRD
107 108
static void __init setup_initrd(void)
{
109
	phys_addr_t start;
110 111
	unsigned long size;

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	/* Ignore the virtul address computed during device tree parsing */
	initrd_start = initrd_end = 0;

	if (!phys_initrd_size)
		return;
	/*
	 * Round the memory region to page boundaries as per free_initrd_mem()
	 * This allows us to detect whether the pages overlapping the initrd
	 * are in use, but more importantly, reserves the entire set of pages
	 * as we don't want these pages allocated for other purposes.
	 */
	start = round_down(phys_initrd_start, PAGE_SIZE);
	size = phys_initrd_size + (phys_initrd_start - start);
	size = round_up(size, PAGE_SIZE);

	if (!memblock_is_region_memory(start, size)) {
		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
		       (u64)start, size);
130 131
		goto disable;
	}
132 133 134 135

	if (memblock_is_region_reserved(start, size)) {
		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
		       (u64)start, size);
136 137 138
		goto disable;
	}

139 140 141 142
	memblock_reserve(start, size);
	/* Now convert initrd to virtual addresses */
	initrd_start = (unsigned long)__va(phys_initrd_start);
	initrd_end = initrd_start + phys_initrd_size;
143 144 145 146 147 148 149 150 151 152
	initrd_below_start_ok = 1;

	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
		(void *)(initrd_start), size);
	return;
disable:
	pr_cont(" - disabling initrd\n");
	initrd_start = 0;
	initrd_end = 0;
}
P
Palmer Dabbelt 已提交
153
#endif /* CONFIG_BLK_DEV_INITRD */
154 155 156 157

void __init setup_bootmem(void)
{
	phys_addr_t mem_size = 0;
158
	phys_addr_t total_mem = 0;
159
	phys_addr_t mem_start, start, end = 0;
160 161
	phys_addr_t vmlinux_end = __pa_symbol(&_end);
	phys_addr_t vmlinux_start = __pa_symbol(&_start);
162
	u64 i;
163 164

	/* Find the memory region containing the kernel */
165 166
	for_each_mem_range(i, &start, &end) {
		phys_addr_t size = end - start;
167
		if (!total_mem)
168 169 170 171
			mem_start = start;
		if (start <= vmlinux_start && vmlinux_end <= end)
			BUG_ON(size == 0);
		total_mem = total_mem + size;
172
	}
173 174 175 176 177 178 179 180 181

	/*
	 * Remove memblock from the end of usable area to the
	 * end of region
	 */
	mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
	if (mem_start + mem_size < end)
		memblock_remove(mem_start + mem_size,
				end - mem_start - mem_size);
182

183 184 185
	/* Reserve from the start of the kernel to the end of the kernel */
	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);

186 187
	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
	max_low_pfn = max_pfn;
188
	set_max_mapnr(max_low_pfn);
189 190 191 192 193

#ifdef CONFIG_BLK_DEV_INITRD
	setup_initrd();
#endif /* CONFIG_BLK_DEV_INITRD */

194 195 196 197 198 199
	/*
	 * Avoid using early_init_fdt_reserve_self() since __pa() does
	 * not work for DTB pointers that are fixmap addresses
	 */
	memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));

200 201 202 203
	early_init_fdt_scan_reserved_mem();
	memblock_allow_resize();
	memblock_dump_all();
}
204

C
Christoph Hellwig 已提交
205
#ifdef CONFIG_MMU
206 207
static struct pt_alloc_ops pt_ops;

208 209 210 211 212
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);

213
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
214 215
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
216

217
#define MAX_EARLY_MAPPING_SIZE	SZ_128M
218

219
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
220 221 222 223 224 225 226 227 228 229

void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
	unsigned long addr = __fix_to_virt(idx);
	pte_t *ptep;

	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);

	ptep = &fixmap_pte[pte_index(addr)];

230
	if (pgprot_val(prot))
231
		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
232
	else
233
		pte_clear(&init_mm, addr, ptep);
234
	local_flush_tlb_page(addr);
235 236
}

237
static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
238
{
239
	return (pte_t *)((uintptr_t)pa);
240 241
}

242 243 244 245 246 247 248 249 250 251 252 253
static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
{
	clear_fixmap(FIX_PTE);
	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}

static inline pte_t *get_pte_virt_late(phys_addr_t pa)
{
	return (pte_t *) __va(pa);
}

static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
254 255 256 257 258
{
	/*
	 * We only create PMD or PGD early mappings so we
	 * should never reach here with MMU disabled.
	 */
259 260
	BUG();
}
261

262 263
static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
{
264 265 266
	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}

267 268 269 270 271 272 273 274 275 276
static phys_addr_t alloc_pte_late(uintptr_t va)
{
	unsigned long vaddr;

	vaddr = __get_free_page(GFP_KERNEL);
	if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
		BUG();
	return __pa(vaddr);
}

277 278 279 280
static void __init create_pte_mapping(pte_t *ptep,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
281
	uintptr_t pte_idx = pte_index(va);
282 283 284

	BUG_ON(sz != PAGE_SIZE);

285 286
	if (pte_none(ptep[pte_idx]))
		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
287 288 289 290 291 292 293 294 295 296 297 298 299 300
}

#ifndef __PAGETABLE_PMD_FOLDED

pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;

#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
#define NUM_EARLY_PMDS		1UL
#else
#define NUM_EARLY_PMDS		(1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
#endif
pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);

301
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
302
{
303 304
	/* Before MMU is enabled */
	return (pmd_t *)((uintptr_t)pa);
305 306
}

307
static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
308
{
309 310 311
	clear_fixmap(FIX_PMD);
	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}
312

313 314 315 316
static pmd_t *get_pmd_virt_late(phys_addr_t pa)
{
	return (pmd_t *) __va(pa);
}
317

318 319 320
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
	uintptr_t pmd_num;
321 322 323 324 325 326

	pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
	BUG_ON(pmd_num >= NUM_EARLY_PMDS);
	return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
}

327 328 329 330 331 332 333 334 335 336 337 338 339 340
static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
{
	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}

static phys_addr_t alloc_pmd_late(uintptr_t va)
{
	unsigned long vaddr;

	vaddr = __get_free_page(GFP_KERNEL);
	BUG_ON(!vaddr);
	return __pa(vaddr);
}

341 342 343 344 345 346
static void __init create_pmd_mapping(pmd_t *pmdp,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
	pte_t *ptep;
	phys_addr_t pte_phys;
347
	uintptr_t pmd_idx = pmd_index(va);
348 349

	if (sz == PMD_SIZE) {
350 351
		if (pmd_none(pmdp[pmd_idx]))
			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
352 353 354
		return;
	}

355
	if (pmd_none(pmdp[pmd_idx])) {
356
		pte_phys = pt_ops.alloc_pte(va);
357
		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
358
		ptep = pt_ops.get_pte_virt(pte_phys);
359 360
		memset(ptep, 0, PAGE_SIZE);
	} else {
361
		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
362
		ptep = pt_ops.get_pte_virt(pte_phys);
363 364 365 366 367 368
	}

	create_pte_mapping(ptep, va, pa, sz, prot);
}

#define pgd_next_t		pmd_t
369 370
#define alloc_pgd_next(__va)	pt_ops.alloc_pmd(__va)
#define get_pgd_next_virt(__pa)	pt_ops.get_pmd_virt(__pa)
371 372 373 374 375
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next		fixmap_pmd
#else
#define pgd_next_t		pte_t
376 377
#define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
#define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
378 379 380 381 382
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next		fixmap_pte
#endif

A
Atish Patra 已提交
383
void __init create_pgd_mapping(pgd_t *pgdp,
384 385 386 387 388
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
	pgd_next_t *nextp;
	phys_addr_t next_phys;
389
	uintptr_t pgd_idx = pgd_index(va);
390 391

	if (sz == PGDIR_SIZE) {
392 393
		if (pgd_val(pgdp[pgd_idx]) == 0)
			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
394 395 396
		return;
	}

397
	if (pgd_val(pgdp[pgd_idx]) == 0) {
398
		next_phys = alloc_pgd_next(va);
399
		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
400 401 402
		nextp = get_pgd_next_virt(next_phys);
		memset(nextp, 0, PAGE_SIZE);
	} else {
403
		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
404 405 406 407 408 409 410 411
		nextp = get_pgd_next_virt(next_phys);
	}

	create_pgd_next_mapping(nextp, va, pa, sz, prot);
}

static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
{
412 413 414
	/* Upgrade to PMD_SIZE mappings whenever possible */
	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
		return PAGE_SIZE;
415

416
	return PMD_SIZE;
417 418
}

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
/*
 * setup_vm() is called from head.S with MMU-off.
 *
 * Following requirements should be honoured for setup_vm() to work
 * correctly:
 * 1) It should use PC-relative addressing for accessing kernel symbols.
 *    To achieve this we always use GCC cmodel=medany.
 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
 *    so disable compiler instrumentation when FTRACE is enabled.
 *
 * Currently, the above requirements are honoured by using custom CFLAGS
 * for init.o in mm/Makefile.
 */

#ifndef __riscv_cmodel_medany
434
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
435 436
#endif

437
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
438
{
A
Anup Patel 已提交
439
	uintptr_t va, pa, end_va;
440 441 442
	uintptr_t load_pa = (uintptr_t)(&_start);
	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
	uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
A
Atish Patra 已提交
443 444 445
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
446 447 448

	va_pa_offset = PAGE_OFFSET - load_pa;
	pfn_base = PFN_DOWN(load_pa);
449

450 451 452 453 454
	/*
	 * Enforce boot alignment requirements of RV32 and
	 * RV64 by only allowing PMD or PGD mappings.
	 */
	BUG_ON(map_size == PAGE_SIZE);
455 456 457

	/* Sanity check alignment and size */
	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
458 459 460
	BUG_ON((load_pa % map_size) != 0);
	BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);

461 462 463 464 465 466
	pt_ops.alloc_pte = alloc_pte_early;
	pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_early;
	pt_ops.get_pmd_virt = get_pmd_virt_early;
#endif
467 468 469
	/* Setup early PGD for fixmap */
	create_pgd_mapping(early_pg_dir, FIXADDR_START,
			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
470 471

#ifndef __PAGETABLE_PMD_FOLDED
472 473 474 475 476 477 478 479 480 481 482 483 484
	/* Setup fixmap PMD */
	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
	/* Setup trampoline PGD and PMD */
	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
#else
	/* Setup trampoline PGD */
	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif
485

486 487 488 489 490 491 492 493 494 495 496
	/*
	 * Setup early PGD covering entire kernel which will allows
	 * us to reach paging_init(). We map all memory banks later
	 * in setup_vm_final() below.
	 */
	end_va = PAGE_OFFSET + load_sz;
	for (va = PAGE_OFFSET; va < end_va; va += map_size)
		create_pgd_mapping(early_pg_dir, va,
				   load_pa + (va - PAGE_OFFSET),
				   map_size, PAGE_KERNEL_EXEC);

A
Anup Patel 已提交
497 498 499 500 501 502 503
	/* Create two consecutive PGD mappings for FDT early scan */
	pa = dtb_pa & ~(PGDIR_SIZE - 1);
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
			   pa, PGDIR_SIZE, PAGE_KERNEL);
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
			   pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
504
	dtb_early_pa = dtb_pa;
A
Atish Patra 已提交
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534

	/*
	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
	 * range can not span multiple pmds.
	 */
	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));

#ifndef __PAGETABLE_PMD_FOLDED
	/*
	 * Early ioremap fixmap is already created as it lies within first 2MB
	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
	 * the user if not.
	 */
	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
		WARN_ON(1);
		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
			fix_to_virt(FIX_BTMAP_BEGIN));
		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
			fix_to_virt(FIX_BTMAP_END));

		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
	}
#endif
535
}
536

537 538 539 540
static void __init setup_vm_final(void)
{
	uintptr_t va, map_size;
	phys_addr_t pa, start, end;
541
	u64 i;
542

543 544 545 546 547 548 549 550 551 552
	/**
	 * MMU is enabled at this point. But page table setup is not complete yet.
	 * fixmap page table alloc functions should be used at this point
	 */
	pt_ops.alloc_pte = alloc_pte_fixmap;
	pt_ops.get_pte_virt = get_pte_virt_fixmap;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_fixmap;
	pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
#endif
553 554
	/* Setup swapper PGD for fixmap */
	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
555
			   __pa_symbol(fixmap_pgd_next),
556
			   PGDIR_SIZE, PAGE_TABLE);
557

558
	/* Map all memory banks */
559
	for_each_mem_range(i, &start, &end) {
560 561 562 563 564 565 566 567 568 569 570 571
		if (start >= end)
			break;
		if (start <= __pa(PAGE_OFFSET) &&
		    __pa(PAGE_OFFSET) < end)
			start = __pa(PAGE_OFFSET);

		map_size = best_map_size(start, end - start);
		for (pa = start; pa < end; pa += map_size) {
			va = (uintptr_t)__va(pa);
			create_pgd_mapping(swapper_pg_dir, va, pa,
					   map_size, PAGE_KERNEL_EXEC);
		}
572
	}
573

574 575 576 577 578
	/* Clear fixmap PTE and PMD mappings */
	clear_fixmap(FIX_PTE);
	clear_fixmap(FIX_PMD);

	/* Move to swapper page table */
579
	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
580
	local_flush_tlb_all();
581 582 583 584 585 586 587 588

	/* generic page allocation functions must be used to setup page table */
	pt_ops.alloc_pte = alloc_pte_late;
	pt_ops.get_pte_virt = get_pte_virt_late;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_late;
	pt_ops.get_pmd_virt = get_pmd_virt_late;
#endif
589
}
C
Christoph Hellwig 已提交
590 591 592
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
593 594 595 596 597 598 599
#ifdef CONFIG_BUILTIN_DTB
	dtb_early_va = soc_lookup_builtin_dtb();
	if (!dtb_early_va) {
		/* Fallback to first available DTS */
		dtb_early_va = (void *) __dtb_start;
	}
#else
C
Christoph Hellwig 已提交
600
	dtb_early_va = (void *)dtb_pa;
601
#endif
602
	dtb_early_pa = dtb_pa;
C
Christoph Hellwig 已提交
603 604 605 606 607 608
}

static inline void setup_vm_final(void)
{
}
#endif /* CONFIG_MMU */
609

Z
Zong Li 已提交
610 611 612 613 614 615 616 617 618 619 620 621 622
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
	unsigned long text_start = (unsigned long)_text;
	unsigned long text_end = (unsigned long)_etext;
	unsigned long rodata_start = (unsigned long)__start_rodata;
	unsigned long data_start = (unsigned long)_data;
	unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));

	set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
	set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
Z
Zong Li 已提交
623 624

	debug_checkwx();
Z
Zong Li 已提交
625 626 627
}
#endif

628 629 630
void __init paging_init(void)
{
	setup_vm_final();
L
Logan Gunthorpe 已提交
631
	sparse_init();
632 633
	setup_zero_page();
	zone_sizes_init();
634
}
L
Logan Gunthorpe 已提交
635

636
#ifdef CONFIG_SPARSEMEM_VMEMMAP
L
Logan Gunthorpe 已提交
637 638 639
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
			       struct vmem_altmap *altmap)
{
640
	return vmemmap_populate_basepages(start, end, node, NULL);
L
Logan Gunthorpe 已提交
641 642
}
#endif