init.c 19.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
P
Palmer Dabbelt 已提交
2 3
/*
 * Copyright (C) 2012 Regents of the University of California
4
 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
P
Palmer Dabbelt 已提交
5 6 7 8 9
 */

#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memblock.h>
M
Mike Rapoport 已提交
10
#include <linux/initrd.h>
P
Palmer Dabbelt 已提交
11
#include <linux/swap.h>
C
Christoph Hellwig 已提交
12
#include <linux/sizes.h>
13
#include <linux/of_fdt.h>
14
#include <linux/libfdt.h>
Z
Zong Li 已提交
15
#include <linux/set_memory.h>
K
Kefeng Wang 已提交
16
#include <linux/dma-map-ops.h>
P
Palmer Dabbelt 已提交
17

18
#include <asm/fixmap.h>
P
Palmer Dabbelt 已提交
19 20
#include <asm/tlbflush.h>
#include <asm/sections.h>
21
#include <asm/soc.h>
P
Palmer Dabbelt 已提交
22
#include <asm/io.h>
Z
Zong Li 已提交
23
#include <asm/ptdump.h>
24
#include <asm/numa.h>
P
Palmer Dabbelt 已提交
25

26 27
#include "../kernel/head.h"

28 29 30
unsigned long kernel_virt_addr = KERNEL_LINK_ADDR;
EXPORT_SYMBOL(kernel_virt_addr);

31 32 33 34
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
							__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);

35
extern char _start[];
A
Anup Patel 已提交
36 37 38
#define DTB_EARLY_BASE_VA      PGDIR_SIZE
void *dtb_early_va __initdata;
uintptr_t dtb_early_pa __initdata;
39

40 41 42 43 44 45 46 47
struct pt_alloc_ops {
	pte_t *(*get_pte_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pte)(uintptr_t va);
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pmd)(uintptr_t va);
#endif
};
48

K
Kefeng Wang 已提交
49 50
static phys_addr_t dma32_phys_limit __ro_after_init;

P
Palmer Dabbelt 已提交
51 52
static void __init zone_sizes_init(void)
{
C
Christoph Hellwig 已提交
53
	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
P
Palmer Dabbelt 已提交
54

55
#ifdef CONFIG_ZONE_DMA32
K
Kefeng Wang 已提交
56
	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
57
#endif
C
Christoph Hellwig 已提交
58 59
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;

60
	free_area_init(max_zone_pfns);
P
Palmer Dabbelt 已提交
61 62
}

63
static void __init setup_zero_page(void)
P
Palmer Dabbelt 已提交
64 65 66 67
{
	memset((void *)empty_zero_page, 0, PAGE_SIZE);
}

68
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
Y
Yash Shah 已提交
69 70 71 72 73 74 75 76 77 78 79 80
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
		  (((t) - (b)) >> 10));
}

static inline void print_mlm(char *name, unsigned long b, unsigned long t)
{
	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
		  (((t) - (b)) >> 20));
}

81
static void __init print_vm_layout(void)
Y
Yash Shah 已提交
82 83 84 85 86 87 88 89 90 91 92 93
{
	pr_notice("Virtual kernel memory layout:\n");
	print_mlk("fixmap", (unsigned long)FIXADDR_START,
		  (unsigned long)FIXADDR_TOP);
	print_mlm("pci io", (unsigned long)PCI_IO_START,
		  (unsigned long)PCI_IO_END);
	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
		  (unsigned long)VMEMMAP_END);
	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
		  (unsigned long)VMALLOC_END);
	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
		  (unsigned long)high_memory);
94 95 96 97
#ifdef CONFIG_64BIT
	print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
		  (unsigned long)ADDRESS_SPACE_END);
#endif
Y
Yash Shah 已提交
98 99 100 101 102
}
#else
static void print_vm_layout(void) { }
#endif /* CONFIG_DEBUG_VM */

P
Palmer Dabbelt 已提交
103 104 105 106 107 108 109
void __init mem_init(void)
{
#ifdef CONFIG_FLATMEM
	BUG_ON(!mem_map);
#endif /* CONFIG_FLATMEM */

	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
110
	memblock_free_all();
P
Palmer Dabbelt 已提交
111 112

	mem_init_print_info(NULL);
Y
Yash Shah 已提交
113
	print_vm_layout();
P
Palmer Dabbelt 已提交
114 115
}

116 117
void __init setup_bootmem(void)
{
118 119
	phys_addr_t vmlinux_end = __pa_symbol(&_end);
	phys_addr_t vmlinux_start = __pa_symbol(&_start);
K
Kefeng Wang 已提交
120
	phys_addr_t dram_end = memblock_end_of_DRAM();
A
Atish Patra 已提交
121
	phys_addr_t max_mapped_addr = __pa(~(ulong)0);
122

K
Kefeng Wang 已提交
123
	/* The maximal physical memory size is -PAGE_OFFSET. */
124
	memblock_enforce_memory_limit(-PAGE_OFFSET);
125

126 127 128 129 130 131 132
	/*
	 * Reserve from the start of the kernel to the end of the kernel
	 * and make sure we align the reservation on PMD_SIZE since we will
	 * map the kernel in the linear mapping as read-only: we do not want
	 * any allocation to happen between _end and the next pmd aligned page.
	 */
	memblock_reserve(vmlinux_start, (vmlinux_end - vmlinux_start + PMD_SIZE - 1) & PMD_MASK);
133

A
Atish Patra 已提交
134 135 136 137 138 139 140 141 142
	/*
	 * memblock allocator is not aware of the fact that last 4K bytes of
	 * the addressable memory can not be mapped because of IS_ERR_VALUE
	 * macro. Make sure that last 4k bytes are not usable by memblock
	 * if end of dram is equal to maximum addressable memory.
	 */
	if (max_mapped_addr == (dram_end - 1))
		memblock_set_current_limit(max_mapped_addr - 4096);

K
Kefeng Wang 已提交
143 144 145
	min_low_pfn = PFN_UP(memblock_start_of_DRAM());
	max_low_pfn = max_pfn = PFN_DOWN(dram_end);

K
Kefeng Wang 已提交
146
	dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
147
	set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
148

149
	reserve_initrd_mem();
150
	/*
151 152 153
	 * If DTB is built in, no need to reserve its memblock.
	 * Otherwise, do reserve it but avoid using
	 * early_init_fdt_reserve_self() since __pa() does
154 155
	 * not work for DTB pointers that are fixmap addresses
	 */
156 157
	if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
		memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
158

159
	early_init_fdt_scan_reserved_mem();
K
Kefeng Wang 已提交
160
	dma_contiguous_reserve(dma32_phys_limit);
161 162
	memblock_allow_resize();
}
163

C
Christoph Hellwig 已提交
164
#ifdef CONFIG_MMU
165
static struct pt_alloc_ops pt_ops __ro_after_init;
166

167
/* Offset between linear mapping virtual address and kernel load address */
168
unsigned long va_pa_offset __ro_after_init;
169
EXPORT_SYMBOL(va_pa_offset);
170 171 172 173 174
#ifdef CONFIG_64BIT
/* Offset between kernel mapping virtual address and kernel load address */
unsigned long va_kernel_pa_offset;
EXPORT_SYMBOL(va_kernel_pa_offset);
#endif
175
unsigned long pfn_base __ro_after_init;
176 177
EXPORT_SYMBOL(pfn_base);

178
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
179 180
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
181

182
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
183 184 185 186 187 188 189 190 191 192

void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
	unsigned long addr = __fix_to_virt(idx);
	pte_t *ptep;

	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);

	ptep = &fixmap_pte[pte_index(addr)];

193
	if (pgprot_val(prot))
194
		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
195
	else
196
		pte_clear(&init_mm, addr, ptep);
197
	local_flush_tlb_page(addr);
198 199
}

200
static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
201
{
202
	return (pte_t *)((uintptr_t)pa);
203 204
}

205 206 207 208 209 210 211 212 213 214 215 216
static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
{
	clear_fixmap(FIX_PTE);
	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}

static inline pte_t *get_pte_virt_late(phys_addr_t pa)
{
	return (pte_t *) __va(pa);
}

static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
217 218 219 220 221
{
	/*
	 * We only create PMD or PGD early mappings so we
	 * should never reach here with MMU disabled.
	 */
222 223
	BUG();
}
224

225 226
static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
{
227 228 229
	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}

230 231 232 233 234
static phys_addr_t alloc_pte_late(uintptr_t va)
{
	unsigned long vaddr;

	vaddr = __get_free_page(GFP_KERNEL);
235 236
	BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)));

237 238 239
	return __pa(vaddr);
}

240 241 242 243
static void __init create_pte_mapping(pte_t *ptep,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
244
	uintptr_t pte_idx = pte_index(va);
245 246 247

	BUG_ON(sz != PAGE_SIZE);

248 249
	if (pte_none(ptep[pte_idx]))
		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
250 251 252 253 254 255
}

#ifndef __PAGETABLE_PMD_FOLDED

pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
256
pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
257
pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
258

259
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
260
{
261 262
	/* Before MMU is enabled */
	return (pmd_t *)((uintptr_t)pa);
263 264
}

265
static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
266
{
267 268 269
	clear_fixmap(FIX_PMD);
	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}
270

271 272 273 274
static pmd_t *get_pmd_virt_late(phys_addr_t pa)
{
	return (pmd_t *) __va(pa);
}
275

276 277
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
278
	BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT);
279

280
	return (uintptr_t)early_pmd;
281 282
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296
static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
{
	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}

static phys_addr_t alloc_pmd_late(uintptr_t va)
{
	unsigned long vaddr;

	vaddr = __get_free_page(GFP_KERNEL);
	BUG_ON(!vaddr);
	return __pa(vaddr);
}

297 298 299 300 301 302
static void __init create_pmd_mapping(pmd_t *pmdp,
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
	pte_t *ptep;
	phys_addr_t pte_phys;
303
	uintptr_t pmd_idx = pmd_index(va);
304 305

	if (sz == PMD_SIZE) {
306 307
		if (pmd_none(pmdp[pmd_idx]))
			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
308 309 310
		return;
	}

311
	if (pmd_none(pmdp[pmd_idx])) {
312
		pte_phys = pt_ops.alloc_pte(va);
313
		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
314
		ptep = pt_ops.get_pte_virt(pte_phys);
315 316
		memset(ptep, 0, PAGE_SIZE);
	} else {
317
		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
318
		ptep = pt_ops.get_pte_virt(pte_phys);
319 320 321 322 323 324
	}

	create_pte_mapping(ptep, va, pa, sz, prot);
}

#define pgd_next_t		pmd_t
325 326
#define alloc_pgd_next(__va)	pt_ops.alloc_pmd(__va)
#define get_pgd_next_virt(__pa)	pt_ops.get_pmd_virt(__pa)
327 328 329 330 331
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next		fixmap_pmd
#else
#define pgd_next_t		pte_t
332 333
#define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
#define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
334 335 336 337 338
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next		fixmap_pte
#endif

A
Atish Patra 已提交
339
void __init create_pgd_mapping(pgd_t *pgdp,
340 341 342 343 344
				      uintptr_t va, phys_addr_t pa,
				      phys_addr_t sz, pgprot_t prot)
{
	pgd_next_t *nextp;
	phys_addr_t next_phys;
345
	uintptr_t pgd_idx = pgd_index(va);
346 347

	if (sz == PGDIR_SIZE) {
348 349
		if (pgd_val(pgdp[pgd_idx]) == 0)
			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
350 351 352
		return;
	}

353
	if (pgd_val(pgdp[pgd_idx]) == 0) {
354
		next_phys = alloc_pgd_next(va);
355
		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
356 357 358
		nextp = get_pgd_next_virt(next_phys);
		memset(nextp, 0, PAGE_SIZE);
	} else {
359
		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
360 361 362 363 364 365 366 367
		nextp = get_pgd_next_virt(next_phys);
	}

	create_pgd_next_mapping(nextp, va, pa, sz, prot);
}

static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
{
368 369 370
	/* Upgrade to PMD_SIZE mappings whenever possible */
	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
		return PAGE_SIZE;
371

372
	return PMD_SIZE;
373 374
}

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
/*
 * setup_vm() is called from head.S with MMU-off.
 *
 * Following requirements should be honoured for setup_vm() to work
 * correctly:
 * 1) It should use PC-relative addressing for accessing kernel symbols.
 *    To achieve this we always use GCC cmodel=medany.
 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
 *    so disable compiler instrumentation when FTRACE is enabled.
 *
 * Currently, the above requirements are honoured by using custom CFLAGS
 * for init.o in mm/Makefile.
 */

#ifndef __riscv_cmodel_medany
390
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
391 392
#endif

393 394 395 396 397 398 399 400 401 402 403 404 405
uintptr_t load_pa, load_sz;

static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
{
	uintptr_t va, end_va;

	end_va = kernel_virt_addr + load_sz;
	for (va = kernel_virt_addr; va < end_va; va += map_size)
		create_pgd_mapping(pgdir, va,
				   load_pa + (va - kernel_virt_addr),
				   map_size, PAGE_KERNEL_EXEC);
}

406
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
407
{
408
	uintptr_t pa;
409
	uintptr_t map_size;
A
Atish Patra 已提交
410 411 412
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
413 414
	load_pa = (uintptr_t)(&_start);
	load_sz = (uintptr_t)(&_end) - load_pa;
415 416

	va_pa_offset = PAGE_OFFSET - load_pa;
417 418 419 420
#ifdef CONFIG_64BIT
	va_kernel_pa_offset = kernel_virt_addr - load_pa;
#endif

421
	pfn_base = PFN_DOWN(load_pa);
422

423 424 425 426
	/*
	 * Enforce boot alignment requirements of RV32 and
	 * RV64 by only allowing PMD or PGD mappings.
	 */
427
	map_size = PMD_SIZE;
428 429 430

	/* Sanity check alignment and size */
	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
431 432
	BUG_ON((load_pa % map_size) != 0);

433 434 435 436 437 438
	pt_ops.alloc_pte = alloc_pte_early;
	pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_early;
	pt_ops.get_pmd_virt = get_pmd_virt_early;
#endif
439 440 441
	/* Setup early PGD for fixmap */
	create_pgd_mapping(early_pg_dir, FIXADDR_START,
			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
442 443

#ifndef __PAGETABLE_PMD_FOLDED
444 445 446 447
	/* Setup fixmap PMD */
	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
	/* Setup trampoline PGD and PMD */
448
	create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
449
			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
450
	create_pmd_mapping(trampoline_pmd, kernel_virt_addr,
451 452 453
			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
#else
	/* Setup trampoline PGD */
454
	create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
455 456
			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif
457

458
	/*
459
	 * Setup early PGD covering entire kernel which will allow
460 461 462
	 * us to reach paging_init(). We map all memory banks later
	 * in setup_vm_final() below.
	 */
463
	create_kernel_page_table(early_pg_dir, map_size);
464

465 466 467 468
#ifndef __PAGETABLE_PMD_FOLDED
	/* Setup early PMD for DTB */
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
			   (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
469
#ifndef CONFIG_BUILTIN_DTB
470 471 472 473 474 475 476
	/* Create two consecutive PMD mappings for FDT early scan */
	pa = dtb_pa & ~(PMD_SIZE - 1);
	create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
			   pa, PMD_SIZE, PAGE_KERNEL);
	create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
			   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
477
#else /* CONFIG_BUILTIN_DTB */
478 479 480 481 482 483 484 485
#ifdef CONFIG_64BIT
	/*
	 * __va can't be used since it would return a linear mapping address
	 * whereas dtb_early_va will be used before setup_vm_final installs
	 * the linear mapping.
	 */
	dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
#else
486
	dtb_early_va = __va(dtb_pa);
487
#endif /* CONFIG_64BIT */
488
#endif /* CONFIG_BUILTIN_DTB */
489
#else
490
#ifndef CONFIG_BUILTIN_DTB
A
Anup Patel 已提交
491 492 493 494 495 496 497
	/* Create two consecutive PGD mappings for FDT early scan */
	pa = dtb_pa & ~(PGDIR_SIZE - 1);
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
			   pa, PGDIR_SIZE, PAGE_KERNEL);
	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
			   pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
498
#else /* CONFIG_BUILTIN_DTB */
499 500 501
#ifdef CONFIG_64BIT
	dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
#else
502
	dtb_early_va = __va(dtb_pa);
503
#endif /* CONFIG_64BIT */
504
#endif /* CONFIG_BUILTIN_DTB */
505
#endif
506
	dtb_early_pa = dtb_pa;
A
Atish Patra 已提交
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536

	/*
	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
	 * range can not span multiple pmds.
	 */
	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));

#ifndef __PAGETABLE_PMD_FOLDED
	/*
	 * Early ioremap fixmap is already created as it lies within first 2MB
	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
	 * the user if not.
	 */
	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
		WARN_ON(1);
		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
			fix_to_virt(FIX_BTMAP_BEGIN));
		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
			fix_to_virt(FIX_BTMAP_END));

		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
	}
#endif
537
}
538

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
#ifdef CONFIG_64BIT
void protect_kernel_linear_mapping_text_rodata(void)
{
	unsigned long text_start = (unsigned long)lm_alias(_start);
	unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
	unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
	unsigned long data_start = (unsigned long)lm_alias(_data);

	set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
	set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);

	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
}
#endif

555 556 557 558
static void __init setup_vm_final(void)
{
	uintptr_t va, map_size;
	phys_addr_t pa, start, end;
559
	u64 i;
560

561 562 563 564 565 566 567 568 569 570
	/**
	 * MMU is enabled at this point. But page table setup is not complete yet.
	 * fixmap page table alloc functions should be used at this point
	 */
	pt_ops.alloc_pte = alloc_pte_fixmap;
	pt_ops.get_pte_virt = get_pte_virt_fixmap;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_fixmap;
	pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
#endif
571 572
	/* Setup swapper PGD for fixmap */
	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
573
			   __pa_symbol(fixmap_pgd_next),
574
			   PGDIR_SIZE, PAGE_TABLE);
575

576
	/* Map all memory banks in the linear mapping */
577
	for_each_mem_range(i, &start, &end) {
578 579 580 581 582 583 584 585 586 587
		if (start >= end)
			break;
		if (start <= __pa(PAGE_OFFSET) &&
		    __pa(PAGE_OFFSET) < end)
			start = __pa(PAGE_OFFSET);

		map_size = best_map_size(start, end - start);
		for (pa = start; pa < end; pa += map_size) {
			va = (uintptr_t)__va(pa);
			create_pgd_mapping(swapper_pg_dir, va, pa,
588 589 590 591 592 593 594 595
					   map_size,
#ifdef CONFIG_64BIT
					   PAGE_KERNEL
#else
					   PAGE_KERNEL_EXEC
#endif
					);

596
		}
597
	}
598

599 600 601 602 603
#ifdef CONFIG_64BIT
	/* Map the kernel */
	create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
#endif

604 605 606 607 608
	/* Clear fixmap PTE and PMD mappings */
	clear_fixmap(FIX_PTE);
	clear_fixmap(FIX_PMD);

	/* Move to swapper page table */
609
	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
610
	local_flush_tlb_all();
611 612 613 614 615 616 617 618

	/* generic page allocation functions must be used to setup page table */
	pt_ops.alloc_pte = alloc_pte_late;
	pt_ops.get_pte_virt = get_pte_virt_late;
#ifndef __PAGETABLE_PMD_FOLDED
	pt_ops.alloc_pmd = alloc_pmd_late;
	pt_ops.get_pmd_virt = get_pmd_virt_late;
#endif
619
}
C
Christoph Hellwig 已提交
620 621 622 623
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
	dtb_early_va = (void *)dtb_pa;
624
	dtb_early_pa = dtb_pa;
C
Christoph Hellwig 已提交
625 626 627 628 629 630
}

static inline void setup_vm_final(void)
{
}
#endif /* CONFIG_MMU */
631

Z
Zong Li 已提交
632
#ifdef CONFIG_STRICT_KERNEL_RWX
633
void __init protect_kernel_text_data(void)
Z
Zong Li 已提交
634
{
635 636 637
	unsigned long text_start = (unsigned long)_start;
	unsigned long init_text_start = (unsigned long)__init_text_begin;
	unsigned long init_data_start = (unsigned long)__init_data_begin;
Z
Zong Li 已提交
638 639 640 641
	unsigned long rodata_start = (unsigned long)__start_rodata;
	unsigned long data_start = (unsigned long)_data;
	unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));

642 643 644 645
	set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
	set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
	set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
	/* rodata section is marked readonly in mark_rodata_ro */
Z
Zong Li 已提交
646 647 648 649
	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
	set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
}

650
void mark_rodata_ro(void)
651
{
652 653
	unsigned long rodata_start = (unsigned long)__start_rodata;
	unsigned long data_start = (unsigned long)_data;
654

655
	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
656

Z
Zong Li 已提交
657
	debug_checkwx();
658
}
Z
Zong Li 已提交
659
#endif
660

661 662 663 664
void __init paging_init(void)
{
	setup_vm_final();
	setup_zero_page();
665 666 667 668
}

void __init misc_mem_init(void)
{
K
Kefeng Wang 已提交
669
	early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
670
	arch_numa_init();
671
	sparse_init();
672
	zone_sizes_init();
673
	memblock_dump_all();
674
}
L
Logan Gunthorpe 已提交
675

676
#ifdef CONFIG_SPARSEMEM_VMEMMAP
L
Logan Gunthorpe 已提交
677 678 679
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
			       struct vmem_altmap *altmap)
{
680
	return vmemmap_populate_basepages(start, end, node, NULL);
L
Logan Gunthorpe 已提交
681 682
}
#endif