init.c 16.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
P
Palmer Dabbelt 已提交
2 3
/*
 * Copyright (C) 2012 Regents of the University of California
4
 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
P
Palmer Dabbelt 已提交
5 6 7 8 9
 */

#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memblock.h>
M
Mike Rapoport 已提交
10
#include <linux/initrd.h>
P
Palmer Dabbelt 已提交
11
#include <linux/swap.h>
C
Christoph Hellwig 已提交
12
#include <linux/sizes.h>
13
#include <linux/of_fdt.h>
14
#include <linux/libfdt.h>
Z
Zong Li 已提交
15
#include <linux/set_memory.h>
K
Kefeng Wang 已提交
16
#include <linux/dma-map-ops.h>
P
Palmer Dabbelt 已提交
17

18
#include <asm/fixmap.h>
P
Palmer Dabbelt 已提交
19 20
#include <asm/tlbflush.h>
#include <asm/sections.h>
21
#include <asm/soc.h>
P
Palmer Dabbelt 已提交
22
#include <asm/io.h>
Z
Zong Li 已提交
23
#include <asm/ptdump.h>
24
#include <asm/numa.h>
P
Palmer Dabbelt 已提交
25

26 27
#include "../kernel/head.h"

28 29 30 31
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
							__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);

32
extern char _start[];
A
Anup Patel 已提交
33 34 35
#define DTB_EARLY_BASE_VA      PGDIR_SIZE
void *dtb_early_va __initdata;
uintptr_t dtb_early_pa __initdata;
36

37 38 39 40 41 42 43 44
struct pt_alloc_ops {
	pte_t *(*get_pte_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pte)(uintptr_t va);
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pmd)(uintptr_t va);
#endif
};
45

K
Kefeng Wang 已提交
46 47
static phys_addr_t dma32_phys_limit __ro_after_init;

P
Palmer Dabbelt 已提交
48 49
static void __init zone_sizes_init(void)
{
C
Christoph Hellwig 已提交
50
	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
P
Palmer Dabbelt 已提交
51

52
#ifdef CONFIG_ZONE_DMA32
K
Kefeng Wang 已提交
53
	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
54
#endif
C
Christoph Hellwig 已提交
55 56
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;

57
	free_area_init(max_zone_pfns);
P
Palmer Dabbelt 已提交
58 59
}

C
Christoph Hellwig 已提交
60
static void setup_zero_page(void)
P
Palmer Dabbelt 已提交
61 62 63 64
{
	memset((void *)empty_zero_page, 0, PAGE_SIZE);
}

65
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
Y
Yash Shah 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
		  (((t) - (b)) >> 10));
}

static inline void print_mlm(char *name, unsigned long b, unsigned long t)
{
	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
		  (((t) - (b)) >> 20));
}

static void print_vm_layout(void)
{
	pr_notice("Virtual kernel memory layout:\n");
	print_mlk("fixmap", (unsigned long)FIXADDR_START,
		  (unsigned long)FIXADDR_TOP);
	print_mlm("pci io", (unsigned long)PCI_IO_START,
		  (unsigned long)PCI_IO_END);
	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
		  (unsigned long)VMEMMAP_END);
	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
		  (unsigned long)VMALLOC_END);
	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
		  (unsigned long)high_memory);
}
#else
static void print_vm_layout(void) { }
#endif /* CONFIG_DEBUG_VM */

P
Palmer Dabbelt 已提交
96 97 98 99 100 101 102
void __init mem_init(void)
{
#ifdef CONFIG_FLATMEM
	BUG_ON(!mem_map);
#endif /* CONFIG_FLATMEM */

	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
103
	memblock_free_all();
P
Palmer Dabbelt 已提交
104 105

	mem_init_print_info(NULL);
Y
Yash Shah 已提交
106
	print_vm_layout();
P
Palmer Dabbelt 已提交
107 108
}

109 110
void __init setup_bootmem(void)
{
111 112
	phys_addr_t vmlinux_end = __pa_symbol(&_end);
	phys_addr_t vmlinux_start = __pa_symbol(&_start);
K
Kefeng Wang 已提交
113
	phys_addr_t dram_end = memblock_end_of_DRAM();
A
Atish Patra 已提交
114
	phys_addr_t max_mapped_addr = __pa(~(ulong)0);
115

K
Kefeng Wang 已提交
116
	/* The maximal physical memory size is -PAGE_OFFSET. */
117
	memblock_enforce_memory_limit(-PAGE_OFFSET);
118

119 120 121
	/* Reserve from the start of the kernel to the end of the kernel */
	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);

A
Atish Patra 已提交
122 123 124 125 126 127 128 129 130 131
	/*
	 * memblock allocator is not aware of the fact that last 4K bytes of
	 * the addressable memory can not be mapped because of IS_ERR_VALUE
	 * macro. Make sure that last 4k bytes are not usable by memblock
	 * if end of dram is equal to maximum addressable memory.
	 */
	if (max_mapped_addr == (dram_end - 1))
		memblock_set_current_limit(max_mapped_addr - 4096);

	max_pfn = PFN_DOWN(dram_end);
132
	max_low_pfn = max_pfn;
K
Kefeng Wang 已提交
133
	dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
134
	set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
135

136
	reserve_initrd_mem();
137
	/*
138 139 140
	 * If DTB is built in, no need to reserve its memblock.
	 * Otherwise, do reserve it but avoid using
	 * early_init_fdt_reserve_self() since __pa() does
141 142
	 * not work for DTB pointers that are fixmap addresses
	 */
143 144
	if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
		memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
145

146
	early_init_fdt_scan_reserved_mem();
K
Kefeng Wang 已提交
147
	dma_contiguous_reserve(dma32_phys_limit);
148 149
	memblock_allow_resize();
}
150

C
Christoph Hellwig 已提交
151
#ifdef CONFIG_MMU
152 153
static struct pt_alloc_ops pt_ops;

154 155 156 157 158
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);

159
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
160 161
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
162

163
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
164 165 166 167 168 169 170 171 172 173

void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
	unsigned long addr = __fix_to_virt(idx);
	pte_t *ptep;

	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);

	ptep = &fixmap_pte[pte_index(addr)];

174
	if (pgprot_val(prot))
175
		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
176
	else
177
		pte_clear(&init_mm, addr, ptep);
178
	local_flush_tlb_page(addr);
179 180
}

181
static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
182
{
183
	return (pte_t *)((uintptr_t)pa);
184 185
}

186 187 188 189 190 191 192 193 194 195 196 197
static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
{
	clear_fixmap(FIX_PTE);
	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}

static inline pte_t *get_pte_virt_late(phys_addr_t pa)
{
	return (pte_t *) __va(pa);
}

static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
198 199 200 201 202
{
	/*
	 * We only create PMD or PGD early mappings so we
	 * should never reach here with MMU disabled.
	 */
203 204
	BUG();
}
205

206 207
static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
{
208 209 210
	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}

211 212 213 214 215 216 217 218 219 220
static phys_addr_t alloc_pte_late(uintptr_t va)
{
	unsigned long vaddr;

	vaddr = __get_free_page(GFP_KERNEL);
	if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
		BUG();
	return __pa(vaddr);
}

221 222 223 224
static void __init create_pte_mapping(pte_t *ptep,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
225
	uintptr_t pte_idx = pte_index(va);
226 227 228

	BUG_ON(sz != PAGE_SIZE);

229 230
	if (pte_none(ptep[pte_idx]))
		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
231 232 233 234 235 236
}

#ifndef __PAGETABLE_PMD_FOLDED

pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
237
pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
238
pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
239

240
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
241
{
242 243
	/* Before MMU is enabled */
	return (pmd_t *)((uintptr_t)pa);
244 245
}

246
static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
247
{
248 249 250
	clear_fixmap(FIX_PMD);
	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}
251

252 253 254 255
static pmd_t *get_pmd_virt_late(phys_addr_t pa)
{
	return (pmd_t *) __va(pa);
}
256

257 258
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
259
	BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);
260

261
	return (uintptr_t)early_pmd;
262 263
}

264 265 266 267 268 269 270 271 272 273 274 275 276 277
static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
{
	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}

static phys_addr_t alloc_pmd_late(uintptr_t va)
{
	unsigned long vaddr;

	vaddr = __get_free_page(GFP_KERNEL);
	BUG_ON(!vaddr);
	return __pa(vaddr);
}

278 279 280 281 282 283
static void __init create_pmd_mapping(pmd_t *pmdp,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
	pte_t *ptep;
	phys_addr_t pte_phys;
284
	uintptr_t pmd_idx = pmd_index(va);
285 286

	if (sz == PMD_SIZE) {
287 288
		if (pmd_none(pmdp[pmd_idx]))
			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
289 290 291
		return;
	}

292
	if (pmd_none(pmdp[pmd_idx])) {
293
		pte_phys = pt_ops.alloc_pte(va);
294
		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
295
		ptep = pt_ops.get_pte_virt(pte_phys);
296 297
		memset(ptep, 0, PAGE_SIZE);
	} else {
298
		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
299
		ptep = pt_ops.get_pte_virt(pte_phys);
300 301 302 303 304 305
	}

	create_pte_mapping(ptep, va, pa, sz, prot);
}

#define pgd_next_t		pmd_t
306 307
#define alloc_pgd_next(__va)	pt_ops.alloc_pmd(__va)
#define get_pgd_next_virt(__pa)	pt_ops.get_pmd_virt(__pa)
308 309 310 311 312
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next		fixmap_pmd
#else
#define pgd_next_t		pte_t
313 314
#define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
#define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
315 316 317 318 319
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next		fixmap_pte
#endif

A
Atish Patra 已提交
320
void __init create_pgd_mapping(pgd_t *pgdp,
321 322 323 324 325
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
	pgd_next_t *nextp;
	phys_addr_t next_phys;
326
	uintptr_t pgd_idx = pgd_index(va);
327 328

	if (sz == PGDIR_SIZE) {
329 330
		if (pgd_val(pgdp[pgd_idx]) == 0)
			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
331 332 333
		return;
	}

334
	if (pgd_val(pgdp[pgd_idx]) == 0) {
335
		next_phys = alloc_pgd_next(va);
336
		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
337 338 339
		nextp = get_pgd_next_virt(next_phys);
		memset(nextp, 0, PAGE_SIZE);
	} else {
340
		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
341 342 343 344 345 346 347 348
		nextp = get_pgd_next_virt(next_phys);
	}

	create_pgd_next_mapping(nextp, va, pa, sz, prot);
}

static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
{
349 350 351
	/* Upgrade to PMD_SIZE mappings whenever possible */
	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
		return PAGE_SIZE;
352

353
	return PMD_SIZE;
354 355
}

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
/*
 * setup_vm() is called from head.S with MMU-off.
 *
 * Following requirements should be honoured for setup_vm() to work
 * correctly:
 * 1) It should use PC-relative addressing for accessing kernel symbols.
 *    To achieve this we always use GCC cmodel=medany.
 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
 *    so disable compiler instrumentation when FTRACE is enabled.
 *
 * Currently, the above requirements are honoured by using custom CFLAGS
 * for init.o in mm/Makefile.
 */

#ifndef __riscv_cmodel_medany
371
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
372 373
#endif

374
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
375
{
A
Anup Patel 已提交
376
	uintptr_t va, pa, end_va;
377 378
	uintptr_t load_pa = (uintptr_t)(&_start);
	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
379
	uintptr_t map_size;
A
Atish Patra 已提交
380 381 382
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
383 384 385

	va_pa_offset = PAGE_OFFSET - load_pa;
	pfn_base = PFN_DOWN(load_pa);
386

387 388 389 390
	/*
	 * Enforce boot alignment requirements of RV32 and
	 * RV64 by only allowing PMD or PGD mappings.
	 */
391
	map_size = PMD_SIZE;
392 393 394

	/* Sanity check alignment and size */
	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
395 396
	BUG_ON((load_pa % map_size) != 0);

397 398 399 400 401 402
	pt_ops.alloc_pte = alloc_pte_early;
	pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_early;
	pt_ops.get_pmd_virt = get_pmd_virt_early;
#endif
403 404 405
	/* Setup early PGD for fixmap */
	create_pgd_mapping(early_pg_dir, FIXADDR_START,
			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
406 407

#ifndef __PAGETABLE_PMD_FOLDED
408 409 410 411 412 413 414 415 416 417 418 419 420
	/* Setup fixmap PMD */
	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
	/* Setup trampoline PGD and PMD */
	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
#else
	/* Setup trampoline PGD */
	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif
421

422 423 424 425 426 427 428 429 430 431 432
	/*
	 * Setup early PGD covering entire kernel which will allows
	 * us to reach paging_init(). We map all memory banks later
	 * in setup_vm_final() below.
	 */
	end_va = PAGE_OFFSET + load_sz;
	for (va = PAGE_OFFSET; va < end_va; va += map_size)
		create_pgd_mapping(early_pg_dir, va,
				   load_pa + (va - PAGE_OFFSET),
				   map_size, PAGE_KERNEL_EXEC);

433 434 435 436
#ifndef __PAGETABLE_PMD_FOLDED
	/* Setup early PMD for DTB */
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
			   (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
437
#ifndef CONFIG_BUILTIN_DTB
438 439 440 441 442 443 444
	/* Create two consecutive PMD mappings for FDT early scan */
	pa = dtb_pa & ~(PMD_SIZE - 1);
	create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
			   pa, PMD_SIZE, PAGE_KERNEL);
	create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
			   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
445 446 447
#else /* CONFIG_BUILTIN_DTB */
	dtb_early_va = __va(dtb_pa);
#endif /* CONFIG_BUILTIN_DTB */
448
#else
449
#ifndef CONFIG_BUILTIN_DTB
A
Anup Patel 已提交
450 451 452 453 454 455 456
	/* Create two consecutive PGD mappings for FDT early scan */
	pa = dtb_pa & ~(PGDIR_SIZE - 1);
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
			   pa, PGDIR_SIZE, PAGE_KERNEL);
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
			   pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
457 458 459
#else /* CONFIG_BUILTIN_DTB */
	dtb_early_va = __va(dtb_pa);
#endif /* CONFIG_BUILTIN_DTB */
460
#endif
461
	dtb_early_pa = dtb_pa;
A
Atish Patra 已提交
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491

	/*
	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
	 * range can not span multiple pmds.
	 */
	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));

#ifndef __PAGETABLE_PMD_FOLDED
	/*
	 * Early ioremap fixmap is already created as it lies within first 2MB
	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
	 * the user if not.
	 */
	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
		WARN_ON(1);
		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
			fix_to_virt(FIX_BTMAP_BEGIN));
		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
			fix_to_virt(FIX_BTMAP_END));

		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
	}
#endif
492
}
493

494 495 496 497
static void __init setup_vm_final(void)
{
	uintptr_t va, map_size;
	phys_addr_t pa, start, end;
498
	u64 i;
499

500 501 502 503 504 505 506 507 508 509
	/**
	 * MMU is enabled at this point. But page table setup is not complete yet.
	 * fixmap page table alloc functions should be used at this point
	 */
	pt_ops.alloc_pte = alloc_pte_fixmap;
	pt_ops.get_pte_virt = get_pte_virt_fixmap;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_fixmap;
	pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
#endif
510 511
	/* Setup swapper PGD for fixmap */
	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
512
			   __pa_symbol(fixmap_pgd_next),
513
			   PGDIR_SIZE, PAGE_TABLE);
514

515
	/* Map all memory banks */
516
	for_each_mem_range(i, &start, &end) {
517 518 519 520 521 522 523 524 525 526 527 528
		if (start >= end)
			break;
		if (start <= __pa(PAGE_OFFSET) &&
		    __pa(PAGE_OFFSET) < end)
			start = __pa(PAGE_OFFSET);

		map_size = best_map_size(start, end - start);
		for (pa = start; pa < end; pa += map_size) {
			va = (uintptr_t)__va(pa);
			create_pgd_mapping(swapper_pg_dir, va, pa,
					   map_size, PAGE_KERNEL_EXEC);
		}
529
	}
530

531 532 533 534 535
	/* Clear fixmap PTE and PMD mappings */
	clear_fixmap(FIX_PTE);
	clear_fixmap(FIX_PMD);

	/* Move to swapper page table */
536
	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
537
	local_flush_tlb_all();
538 539 540 541 542 543 544 545

	/* generic page allocation functions must be used to setup page table */
	pt_ops.alloc_pte = alloc_pte_late;
	pt_ops.get_pte_virt = get_pte_virt_late;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_late;
	pt_ops.get_pmd_virt = get_pmd_virt_late;
#endif
546
}
C
Christoph Hellwig 已提交
547 548 549 550
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
	dtb_early_va = (void *)dtb_pa;
551
	dtb_early_pa = dtb_pa;
C
Christoph Hellwig 已提交
552 553 554 555 556 557
}

static inline void setup_vm_final(void)
{
}
#endif /* CONFIG_MMU */
558

Z
Zong Li 已提交
559
#ifdef CONFIG_STRICT_KERNEL_RWX
560
void protect_kernel_text_data(void)
Z
Zong Li 已提交
561
{
562 563 564
	unsigned long text_start = (unsigned long)_start;
	unsigned long init_text_start = (unsigned long)__init_text_begin;
	unsigned long init_data_start = (unsigned long)__init_data_begin;
Z
Zong Li 已提交
565 566 567 568
	unsigned long rodata_start = (unsigned long)__start_rodata;
	unsigned long data_start = (unsigned long)_data;
	unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));

569 570 571 572
	set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
	set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
	set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
	/* rodata section is marked readonly in mark_rodata_ro */
Z
Zong Li 已提交
573 574 575 576
	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
	set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
}

577
void mark_rodata_ro(void)
578
{
579 580
	unsigned long rodata_start = (unsigned long)__start_rodata;
	unsigned long data_start = (unsigned long)_data;
581

582
	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
583

Z
Zong Li 已提交
584
	debug_checkwx();
585
}
Z
Zong Li 已提交
586
#endif
587

588 589 590 591
void __init paging_init(void)
{
	setup_vm_final();
	setup_zero_page();
592 593 594 595
}

void __init misc_mem_init(void)
{
596
	arch_numa_init();
597
	sparse_init();
598
	zone_sizes_init();
599
	memblock_dump_all();
600
}
L
Logan Gunthorpe 已提交
601

602
#ifdef CONFIG_SPARSEMEM_VMEMMAP
L
Logan Gunthorpe 已提交
603 604 605
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
			       struct vmem_altmap *altmap)
{
606
	return vmemmap_populate_basepages(start, end, node, NULL);
L
Logan Gunthorpe 已提交
607 608
}
#endif