mmu.c 17.5 KB
Newer Older
C
Catalin Marinas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Based on arch/arm/mm/mmu.c
 *
 * Copyright (C) 1995-2005 Russell King
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
24
#include <linux/libfdt.h>
C
Catalin Marinas 已提交
25 26 27 28
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/fs.h>
29
#include <linux/io.h>
30
#include <linux/slab.h>
31
#include <linux/stop_machine.h>
C
Catalin Marinas 已提交
32

33
#include <asm/barrier.h>
C
Catalin Marinas 已提交
34
#include <asm/cputype.h>
35
#include <asm/fixmap.h>
36
#include <asm/kernel-pgtable.h>
C
Catalin Marinas 已提交
37 38 39 40
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
41
#include <asm/memblock.h>
C
Catalin Marinas 已提交
42 43 44 45
#include <asm/mmu_context.h>

#include "mm.h"

46 47
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);

C
Catalin Marinas 已提交
48 49 50 51
/*
 * Empty_zero_page is a special page that is used for zero-initialized data
 * and COW.
 */
52
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
C
Catalin Marinas 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65
EXPORT_SYMBOL(empty_zero_page);

pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
			      unsigned long size, pgprot_t vma_prot)
{
	if (!pfn_valid(pfn))
		return pgprot_noncached(vma_prot);
	else if (file->f_flags & O_SYNC)
		return pgprot_writecombine(vma_prot);
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

66
static void __init *early_pgtable_alloc(void)
C
Catalin Marinas 已提交
67
{
68 69 70
	phys_addr_t phys;
	void *ptr;

71
	phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
72 73
	BUG_ON(!phys);
	ptr = __va(phys);
74 75 76 77
	memset(ptr, 0, PAGE_SIZE);

	/* Ensure the zeroed page is visible to the page table walker */
	dsb(ishst);
C
Catalin Marinas 已提交
78 79 80
	return ptr;
}

81 82 83 84 85 86 87 88 89 90 91
/*
 * remap a PMD into pages
 */
static void split_pmd(pmd_t *pmd, pte_t *pte)
{
	unsigned long pfn = pmd_pfn(*pmd);
	int i = 0;

	do {
		/*
		 * Need to have the least restrictive permissions available
92
		 * permissions will be fixed up later
93
		 */
94
		set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
95 96 97 98 99
		pfn++;
	} while (pte++, i++, i < PTRS_PER_PTE);
}

static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
100
				  unsigned long end, unsigned long pfn,
101
				  pgprot_t prot,
102
				  void *(*pgtable_alloc)(void))
C
Catalin Marinas 已提交
103 104 105
{
	pte_t *pte;

106
	if (pmd_none(*pmd) || pmd_sect(*pmd)) {
107
		pte = pgtable_alloc();
108 109
		if (pmd_sect(*pmd))
			split_pmd(pmd, pte);
C
Catalin Marinas 已提交
110
		__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
111
		flush_tlb_all();
C
Catalin Marinas 已提交
112
	}
113
	BUG_ON(pmd_bad(*pmd));
C
Catalin Marinas 已提交
114 115 116

	pte = pte_offset_kernel(pmd, addr);
	do {
117 118 119
		set_pte(pte, pfn_pte(pfn, prot));
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
C
Catalin Marinas 已提交
120 121
}

122
static void split_pud(pud_t *old_pud, pmd_t *pmd)
123 124 125 126 127 128
{
	unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
	pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
	int i = 0;

	do {
129
		set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
130 131 132 133 134
		addr += PMD_SIZE;
	} while (pmd++, i++, i < PTRS_PER_PMD);
}

static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
135
				  unsigned long addr, unsigned long end,
136
				  phys_addr_t phys, pgprot_t prot,
137
				  void *(*pgtable_alloc)(void))
C
Catalin Marinas 已提交
138 139 140 141 142 143 144
{
	pmd_t *pmd;
	unsigned long next;

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
	 */
145
	if (pud_none(*pud) || pud_sect(*pud)) {
146
		pmd = pgtable_alloc();
147 148 149 150 151 152 153
		if (pud_sect(*pud)) {
			/*
			 * need to have the 1G of mappings continue to be
			 * present
			 */
			split_pud(pud, pmd);
		}
154
		pud_populate(mm, pud, pmd);
155
		flush_tlb_all();
C
Catalin Marinas 已提交
156
	}
157
	BUG_ON(pud_bad(*pud));
C
Catalin Marinas 已提交
158 159 160 161 162

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		/* try section mapping first */
163 164
		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
			pmd_t old_pmd =*pmd;
165 166
			set_pmd(pmd, __pmd(phys |
					   pgprot_val(mk_sect_prot(prot))));
167 168 169 170
			/*
			 * Check for previous table entries created during
			 * boot (__create_page_tables) and flush them.
			 */
171
			if (!pmd_none(old_pmd)) {
172
				flush_tlb_all();
173 174
				if (pmd_table(old_pmd)) {
					phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
175 176
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
177 178
				}
			}
179
		} else {
180
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
181
				       prot, pgtable_alloc);
182
		}
C
Catalin Marinas 已提交
183 184 185 186
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);
}

187 188 189 190 191 192 193 194 195 196 197 198 199
static inline bool use_1G_block(unsigned long addr, unsigned long next,
			unsigned long phys)
{
	if (PAGE_SHIFT != 12)
		return false;

	if (((addr | next | phys) & ~PUD_MASK) != 0)
		return false;

	return true;
}

static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
200
				  unsigned long addr, unsigned long end,
201
				  phys_addr_t phys, pgprot_t prot,
202
				  void *(*pgtable_alloc)(void))
C
Catalin Marinas 已提交
203
{
204
	pud_t *pud;
C
Catalin Marinas 已提交
205 206
	unsigned long next;

207
	if (pgd_none(*pgd)) {
208
		pud = pgtable_alloc();
209
		pgd_populate(mm, pgd, pud);
210 211 212 213
	}
	BUG_ON(pgd_bad(*pgd));

	pud = pud_offset(pgd, addr);
C
Catalin Marinas 已提交
214 215
	do {
		next = pud_addr_end(addr, end);
216 217 218 219

		/*
		 * For 4K granule only, attempt to put down a 1GB block
		 */
220
		if (use_1G_block(addr, next, phys)) {
221
			pud_t old_pud = *pud;
222 223
			set_pud(pud, __pud(phys |
					   pgprot_val(mk_sect_prot(prot))));
224 225 226 227 228 229 230 231 232 233

			/*
			 * If we have an old value for a pud, it will
			 * be pointing to a pmd table that we no longer
			 * need (from swapper_pg_dir).
			 *
			 * Look up the old pmd table and free it.
			 */
			if (!pud_none(old_pud)) {
				flush_tlb_all();
234 235
				if (pud_table(old_pud)) {
					phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
236 237
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
238
				}
239 240
			}
		} else {
241 242
			alloc_init_pmd(mm, pud, addr, next, phys, prot,
				       pgtable_alloc);
243
		}
C
Catalin Marinas 已提交
244 245 246 247 248 249 250 251
		phys += next - addr;
	} while (pud++, addr = next, addr != end);
}

/*
 * Create the page directory entries and any necessary page tables for the
 * mapping specified by 'md'.
 */
252
static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
253
				    phys_addr_t phys, unsigned long virt,
254
				    phys_addr_t size, pgprot_t prot,
255
				    void *(*pgtable_alloc)(void))
C
Catalin Marinas 已提交
256 257 258
{
	unsigned long addr, length, end, next;

259 260 261 262 263 264 265
	/*
	 * If the virtual and physical address don't have the same offset
	 * within a page, we cannot map the region as the caller expects.
	 */
	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
		return;

266
	phys &= PAGE_MASK;
C
Catalin Marinas 已提交
267 268 269 270 271 272
	addr = virt & PAGE_MASK;
	length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));

	end = addr + length;
	do {
		next = pgd_addr_end(addr, end);
273
		alloc_init_pud(mm, pgd, addr, next, phys, prot, pgtable_alloc);
C
Catalin Marinas 已提交
274 275 276 277
		phys += next - addr;
	} while (pgd++, addr = next, addr != end);
}

278
static void *late_pgtable_alloc(void)
279
{
280
	void *ptr = (void *)__get_free_page(PGALLOC_GFP);
281
	BUG_ON(!ptr);
282 283 284

	/* Ensure the zeroed page is visible to the page table walker */
	dsb(ishst);
285 286 287
	return ptr;
}

288
static void __init create_mapping(phys_addr_t phys, unsigned long virt,
289
				  phys_addr_t size, pgprot_t prot)
290 291 292 293 294 295
{
	if (virt < VMALLOC_START) {
		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
			&phys, virt);
		return;
	}
296
	__create_mapping(&init_mm, pgd_offset_k(virt), phys, virt,
297
			 size, prot, early_pgtable_alloc);
298 299
}

300 301 302 303
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
			       unsigned long virt, phys_addr_t size,
			       pgprot_t prot)
{
304
	__create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
305
				late_pgtable_alloc);
306 307
}

308 309 310 311 312 313 314 315 316
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
				  phys_addr_t size, pgprot_t prot)
{
	if (virt < VMALLOC_START) {
		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
			&phys, virt);
		return;
	}

317
	return __create_mapping(&init_mm, pgd_offset_k(virt),
318
				phys, virt, size, prot, late_pgtable_alloc);
319 320 321 322 323 324 325 326 327 328
}

#ifdef CONFIG_DEBUG_RODATA
static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
{
	/*
	 * Set up the executable regions using the existing section mappings
	 * for now. This will get more fine grained later once all memory
	 * is mapped
	 */
329 330
	unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
	unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362

	if (end < kernel_x_start) {
		create_mapping(start, __phys_to_virt(start),
			end - start, PAGE_KERNEL);
	} else if (start >= kernel_x_end) {
		create_mapping(start, __phys_to_virt(start),
			end - start, PAGE_KERNEL);
	} else {
		if (start < kernel_x_start)
			create_mapping(start, __phys_to_virt(start),
				kernel_x_start - start,
				PAGE_KERNEL);
		create_mapping(kernel_x_start,
				__phys_to_virt(kernel_x_start),
				kernel_x_end - kernel_x_start,
				PAGE_KERNEL_EXEC);
		if (kernel_x_end < end)
			create_mapping(kernel_x_end,
				__phys_to_virt(kernel_x_end),
				end - kernel_x_end,
				PAGE_KERNEL);
	}

}
#else
static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
{
	create_mapping(start, __phys_to_virt(start), end - start,
			PAGE_KERNEL_EXEC);
}
#endif

C
Catalin Marinas 已提交
363 364 365
static void __init map_mem(void)
{
	struct memblock_region *reg;
366
	phys_addr_t limit;
C
Catalin Marinas 已提交
367

368 369 370 371 372
	/*
	 * Temporarily limit the memblock range. We need to do this as
	 * create_mapping requires puds, pmds and ptes to be allocated from
	 * memory addressable from the initial direct kernel mapping.
	 *
373
	 * The initial direct kernel mapping, located at swapper_pg_dir, gives
374 375 376
	 * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
	 * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
	 * per Documentation/arm64/booting.txt).
377
	 */
378
	limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
379
	memblock_set_current_limit(limit);
380

C
Catalin Marinas 已提交
381 382 383 384 385 386 387
	/* map all the memory banks */
	for_each_memblock(memory, reg) {
		phys_addr_t start = reg->base;
		phys_addr_t end = start + reg->size;

		if (start >= end)
			break;
388 389
		if (memblock_is_nomap(reg))
			continue;
C
Catalin Marinas 已提交
390

391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
		if (ARM64_SWAPPER_USES_SECTION_MAPS) {
			/*
			 * For the first memory bank align the start address and
			 * current memblock limit to prevent create_mapping() from
			 * allocating pte page tables from unmapped memory. With
			 * the section maps, if the first block doesn't end on section
			 * size boundary, create_mapping() will try to allocate a pte
			 * page, which may be returned from an unmapped area.
			 * When section maps are not used, the pte page table for the
			 * current limit is already present in swapper_pg_dir.
			 */
			if (start < limit)
				start = ALIGN(start, SECTION_SIZE);
			if (end < limit) {
				limit = end & SECTION_MASK;
				memblock_set_current_limit(limit);
			}
408
		}
409
		__map_memblock(start, end);
C
Catalin Marinas 已提交
410
	}
411 412 413

	/* Limit no longer required. */
	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
C
Catalin Marinas 已提交
414 415
}

416
static void __init fixup_executable(void)
417 418 419
{
#ifdef CONFIG_DEBUG_RODATA
	/* now that we are actually fully mapped, make the start/end more fine grained */
420
	if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
421
		unsigned long aligned_start = round_down(__pa(_stext),
422
							 SWAPPER_BLOCK_SIZE);
423 424 425 426 427 428

		create_mapping(aligned_start, __phys_to_virt(aligned_start),
				__pa(_stext) - aligned_start,
				PAGE_KERNEL);
	}

429
	if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
430
		unsigned long aligned_end = round_up(__pa(__init_end),
431
							  SWAPPER_BLOCK_SIZE);
432 433 434 435 436 437 438 439 440 441 442 443
		create_mapping(__pa(__init_end), (unsigned long)__init_end,
				aligned_end - __pa(__init_end),
				PAGE_KERNEL);
	}
#endif
}

#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void)
{
	create_mapping_late(__pa(_stext), (unsigned long)_stext,
				(unsigned long)_etext - (unsigned long)_stext,
444
				PAGE_KERNEL_ROX);
445 446 447 448 449 450 451 452 453 454 455

}
#endif

void fixup_init(void)
{
	create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
			(unsigned long)__init_end - (unsigned long)__init_begin,
			PAGE_KERNEL);
}

C
Catalin Marinas 已提交
456 457 458 459 460 461 462
/*
 * paging_init() sets up the page tables, initialises the zone memory
 * maps and sets up the zero page.
 */
void __init paging_init(void)
{
	map_mem();
463
	fixup_executable();
C
Catalin Marinas 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488

	bootmem_init();
}

/*
 * Check whether a kernel address is valid (derived from arch/x86/).
 */
int kern_addr_valid(unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	if ((((long)addr) >> VA_BITS) != -1UL)
		return 0;

	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd))
		return 0;

	pud = pud_offset(pgd, addr);
	if (pud_none(*pud))
		return 0;

489 490 491
	if (pud_sect(*pud))
		return pfn_valid(pud_pfn(*pud));

C
Catalin Marinas 已提交
492 493 494 495
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd))
		return 0;

496 497 498
	if (pmd_sect(*pmd))
		return pfn_valid(pmd_pfn(*pmd));

C
Catalin Marinas 已提交
499 500 501 502 503 504 505
	pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte))
		return 0;

	return pfn_valid(pte_pfn(*pte));
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
506
#if !ARM64_SWAPPER_USES_SECTION_MAPS
507
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
C
Catalin Marinas 已提交
508
{
509
	return vmemmap_populate_basepages(start, end, node);
C
Catalin Marinas 已提交
510
}
511
#else	/* !ARM64_SWAPPER_USES_SECTION_MAPS */
512
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
C
Catalin Marinas 已提交
513
{
514
	unsigned long addr = start;
C
Catalin Marinas 已提交
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
	unsigned long next;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	do {
		next = pmd_addr_end(addr, end);

		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
			return -ENOMEM;

		pud = vmemmap_pud_populate(pgd, addr, node);
		if (!pud)
			return -ENOMEM;

		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd)) {
			void *p = NULL;

			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
			if (!p)
				return -ENOMEM;

539
			set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
C
Catalin Marinas 已提交
540 541 542 543 544 545 546
		} else
			vmemmap_verify((pte_t *)pmd, node, addr, next);
	} while (addr = next, addr != end);

	return 0;
}
#endif	/* CONFIG_ARM64_64K_PAGES */
547
void vmemmap_free(unsigned long start, unsigned long end)
548 549
{
}
C
Catalin Marinas 已提交
550
#endif	/* CONFIG_SPARSEMEM_VMEMMAP */
551 552

static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
553
#if CONFIG_PGTABLE_LEVELS > 2
554 555
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
#endif
556
#if CONFIG_PGTABLE_LEVELS > 3
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
#endif

static inline pud_t * fixmap_pud(unsigned long addr)
{
	pgd_t *pgd = pgd_offset_k(addr);

	BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));

	return pud_offset(pgd, addr);
}

static inline pmd_t * fixmap_pmd(unsigned long addr)
{
	pud_t *pud = fixmap_pud(addr);

	BUG_ON(pud_none(*pud) || pud_bad(*pud));

	return pmd_offset(pud, addr);
}

static inline pte_t * fixmap_pte(unsigned long addr)
{
	pmd_t *pmd = fixmap_pmd(addr);

	BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));

	return pte_offset_kernel(pmd, addr);
}

void __init early_fixmap_init(void)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	unsigned long addr = FIXADDR_START;

	pgd = pgd_offset_k(addr);
	pgd_populate(&init_mm, pgd, bm_pud);
	pud = pud_offset(pgd, addr);
	pud_populate(&init_mm, pud, bm_pmd);
	pmd = pmd_offset(pud, addr);
	pmd_populate_kernel(&init_mm, pmd, bm_pte);

	/*
	 * The boot-ioremap range spans multiple pmds, for which
	 * we are not preparted:
	 */
	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));

	if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
	     || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
		WARN_ON(1);
		pr_warn("pmd %p != %p, %p\n",
			pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
			fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
			fix_to_virt(FIX_BTMAP_BEGIN));
		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
			fix_to_virt(FIX_BTMAP_END));

		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
	}
}

void __set_fixmap(enum fixed_addresses idx,
			       phys_addr_t phys, pgprot_t flags)
{
	unsigned long addr = __fix_to_virt(idx);
	pte_t *pte;

630
	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
631 632 633 634 635 636 637 638 639 640

	pte = fixmap_pte(addr);

	if (pgprot_val(flags)) {
		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
	} else {
		pte_clear(&init_mm, addr, pte);
		flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
	}
}
641 642 643 644

void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
{
	const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
645
	pgprot_t prot = PAGE_KERNEL_RO;
646
	int size, offset;
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
	void *dt_virt;

	/*
	 * Check whether the physical FDT address is set and meets the minimum
	 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
	 * at least 8 bytes so that we can always access the size field of the
	 * FDT header after mapping the first chunk, double check here if that
	 * is indeed the case.
	 */
	BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
	if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
		return NULL;

	/*
	 * Make sure that the FDT region can be mapped without the need to
	 * allocate additional translation table pages, so that it is safe
	 * to call create_mapping() this early.
	 *
	 * On 64k pages, the FDT will be mapped using PTEs, so we need to
	 * be in the same PMD as the rest of the fixmap.
	 * On 4k pages, we'll use section mappings for the FDT so we only
	 * have to be in the same PUD.
	 */
	BUILD_BUG_ON(dt_virt_base % SZ_2M);

672 673
	BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
		     __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
674

675
	offset = dt_phys % SWAPPER_BLOCK_SIZE;
676 677 678
	dt_virt = (void *)dt_virt_base + offset;

	/* map the first chunk so we can read the size from the header */
679 680
	create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
		       SWAPPER_BLOCK_SIZE, prot);
681 682 683 684 685 686 687 688

	if (fdt_check_header(dt_virt) != 0)
		return NULL;

	size = fdt_totalsize(dt_virt);
	if (size > MAX_FDT_SIZE)
		return NULL;

689 690 691
	if (offset + size > SWAPPER_BLOCK_SIZE)
		create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
			       round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
692 693 694 695 696

	memblock_reserve(dt_phys, size);

	return dt_virt;
}