tlb_nohash.c 19.3 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * This file contains the routines for TLB flushing.
 * On machines where the MMU does not use a hash table to store virtual to
 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
 * this does -not- include 603 however which shares the implementation with
 * hash based processors)
 *
 *  -- BenH
 *
10 11
 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
 *                     IBM Corp.
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 *  Derived from arch/ppc/mm/init.c:
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

#include <linux/kernel.h>
31
#include <linux/export.h>
32 33 34 35 36 37
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
Y
Yinghai Lu 已提交
38
#include <linux/memblock.h>
39
#include <linux/of_fdt.h>
B
Becky Bruce 已提交
40
#include <linux/hugetlb.h>
41 42 43

#include <asm/tlbflush.h>
#include <asm/tlb.h>
44
#include <asm/code-patching.h>
45
#include <asm/cputhreads.h>
B
Becky Bruce 已提交
46
#include <asm/hugetlb.h>
47
#include <asm/paca.h>
48 49 50

#include "mmu_decl.h"

B
Becky Bruce 已提交
51 52 53 54 55 56
/*
 * This struct lists the sw-supported page sizes.  The hardawre MMU may support
 * other sizes not listed here.   The .ind field is only used on MMUs that have
 * indirect page table entries.
 */
#ifdef CONFIG_PPC_BOOK3E_MMU
57
#ifdef CONFIG_PPC_FSL_BOOK3E
B
Becky Bruce 已提交
58 59 60 61 62
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
	[MMU_PAGE_4K] = {
		.shift	= 12,
		.enc	= BOOK3E_PAGESZ_4K,
	},
63 64 65 66
	[MMU_PAGE_2M] = {
		.shift	= 21,
		.enc	= BOOK3E_PAGESZ_2M,
	},
B
Becky Bruce 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
	[MMU_PAGE_4M] = {
		.shift	= 22,
		.enc	= BOOK3E_PAGESZ_4M,
	},
	[MMU_PAGE_16M] = {
		.shift	= 24,
		.enc	= BOOK3E_PAGESZ_16M,
	},
	[MMU_PAGE_64M] = {
		.shift	= 26,
		.enc	= BOOK3E_PAGESZ_64M,
	},
	[MMU_PAGE_256M] = {
		.shift	= 28,
		.enc	= BOOK3E_PAGESZ_256M,
	},
	[MMU_PAGE_1G] = {
		.shift	= 30,
		.enc	= BOOK3E_PAGESZ_1GB,
	},
};
#else
89 90 91
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
	[MMU_PAGE_4K] = {
		.shift	= 12,
92
		.ind	= 20,
93 94 95 96 97 98 99 100
		.enc	= BOOK3E_PAGESZ_4K,
	},
	[MMU_PAGE_16K] = {
		.shift	= 14,
		.enc	= BOOK3E_PAGESZ_16K,
	},
	[MMU_PAGE_64K] = {
		.shift	= 16,
101
		.ind	= 28,
102 103 104 105 106 107 108 109
		.enc	= BOOK3E_PAGESZ_64K,
	},
	[MMU_PAGE_1M] = {
		.shift	= 20,
		.enc	= BOOK3E_PAGESZ_1M,
	},
	[MMU_PAGE_16M] = {
		.shift	= 24,
110
		.ind	= 36,
111 112 113 114 115 116 117 118 119 120 121
		.enc	= BOOK3E_PAGESZ_16M,
	},
	[MMU_PAGE_256M] = {
		.shift	= 28,
		.enc	= BOOK3E_PAGESZ_256M,
	},
	[MMU_PAGE_1G] = {
		.shift	= 30,
		.enc	= BOOK3E_PAGESZ_1GB,
	},
};
B
Becky Bruce 已提交
122 123
#endif /* CONFIG_FSL_BOOKE */

124 125 126 127 128 129 130 131 132 133
static inline int mmu_get_tsize(int psize)
{
	return mmu_psize_defs[psize].enc;
}
#else
static inline int mmu_get_tsize(int psize)
{
	/* This isn't used on !Book3E for now */
	return 0;
}
B
Becky Bruce 已提交
134
#endif /* CONFIG_PPC_BOOK3E_MMU */
135 136 137 138 139 140 141 142 143

/* The variables below are currently only used on 64-bit Book3E
 * though this will probably be made common with other nohash
 * implementations at some point
 */
#ifdef CONFIG_PPC64

int mmu_linear_psize;		/* Page size used for the linear mapping */
int mmu_pte_psize;		/* Page size used for PTE pages */
144
int mmu_vmemmap_psize;		/* Page size used for the virtual mem map */
145
int book3e_htw_mode;		/* HW tablewalk?  Value is PPC_HTW_* */
146 147
unsigned long linear_map_top;	/* Top of linear mapping */

148 149 150 151 152 153 154 155 156

/*
 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
 * exceptions.  This is used for bolted and e6500 TLB miss handlers which
 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
 * this is set to zero.
 */
int extlb_level_exc;

157 158
#endif /* CONFIG_PPC64 */

159 160 161 162 163 164
#ifdef CONFIG_PPC_FSL_BOOK3E
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
DEFINE_PER_CPU(int, next_tlbcam_idx);
EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
#endif

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
/*
 * Base TLB flushing operations:
 *
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes kernel pages
 *
 *  - local_* variants of page and mm only apply to the current
 *    processor
 */

/*
 * These are the base non-SMP variants of page and mm flushing
 */
void local_flush_tlb_mm(struct mm_struct *mm)
{
	unsigned int pid;

	preempt_disable();
	pid = mm->context.id;
	if (pid != MMU_NO_CONTEXT)
		_tlbil_pid(pid);
	preempt_enable();
}
EXPORT_SYMBOL(local_flush_tlb_mm);

192 193
void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
			    int tsize, int ind)
194 195 196 197
{
	unsigned int pid;

	preempt_disable();
198
	pid = mm ? mm->context.id : 0;
199
	if (pid != MMU_NO_CONTEXT)
200
		_tlbil_va(vmaddr, pid, tsize, ind);
201 202 203
	preempt_enable();
}

204 205 206
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
	__local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
207
			       mmu_get_tsize(mmu_virtual_psize), 0);
208 209
}
EXPORT_SYMBOL(local_flush_tlb_page);
210 211 212 213 214 215

/*
 * And here are the SMP non-local implementations
 */
#ifdef CONFIG_SMP

216
static DEFINE_RAW_SPINLOCK(tlbivax_lock);
217

218 219 220
static int mm_is_core_local(struct mm_struct *mm)
{
	return cpumask_subset(mm_cpumask(mm),
221
			      topology_sibling_cpumask(smp_processor_id()));
222 223
}

224 225 226
struct tlb_flush_param {
	unsigned long addr;
	unsigned int pid;
227 228
	unsigned int tsize;
	unsigned int ind;
229 230 231 232 233 234 235 236 237 238 239 240 241
};

static void do_flush_tlb_mm_ipi(void *param)
{
	struct tlb_flush_param *p = param;

	_tlbil_pid(p ? p->pid : 0);
}

static void do_flush_tlb_page_ipi(void *param)
{
	struct tlb_flush_param *p = param;

242
	_tlbil_va(p->addr, p->pid, p->tsize, p->ind);
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
}


/* Note on invalidations and PID:
 *
 * We snapshot the PID with preempt disabled. At this point, it can still
 * change either because:
 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
 * - we are invaliating some target that isn't currently running here
 *   and is concurrently acquiring a new PID on another CPU
 * - some other CPU is re-acquiring a lost PID for this mm
 * etc...
 *
 * However, this shouldn't be a problem as we only guarantee
 * invalidation of TLB entries present prior to this call, so we
 * don't care about the PID changing, and invalidating a stale PID
 * is generally harmless.
 */

void flush_tlb_mm(struct mm_struct *mm)
{
	unsigned int pid;

	preempt_disable();
	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;
270
	if (!mm_is_core_local(mm)) {
271
		struct tlb_flush_param p = { .pid = pid };
272 273 274
		/* Ignores smp_processor_id() even if set. */
		smp_call_function_many(mm_cpumask(mm),
				       do_flush_tlb_mm_ipi, &p, 1);
275 276 277 278 279 280 281
	}
	_tlbil_pid(pid);
 no_context:
	preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_mm);

282 283
void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
		      int tsize, int ind)
284
{
285
	struct cpumask *cpu_mask;
286 287
	unsigned int pid;

288 289 290 291 292
	/*
	 * This function as well as __local_flush_tlb_page() must only be called
	 * for user contexts.
	 */
	if (unlikely(WARN_ON(!mm)))
293 294
		return;

295
	preempt_disable();
296
	pid = mm->context.id;
297 298
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto bail;
299
	cpu_mask = mm_cpumask(mm);
300
	if (!mm_is_core_local(mm)) {
301 302 303 304
		/* If broadcast tlbivax is supported, use it */
		if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
			int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
			if (lock)
305
				raw_spin_lock(&tlbivax_lock);
306
			_tlbivax_bcast(vmaddr, pid, tsize, ind);
307
			if (lock)
308
				raw_spin_unlock(&tlbivax_lock);
309 310
			goto bail;
		} else {
311 312 313 314 315 316
			struct tlb_flush_param p = {
				.pid = pid,
				.addr = vmaddr,
				.tsize = tsize,
				.ind = ind,
			};
317 318
			/* Ignores smp_processor_id() even if set in cpu_mask */
			smp_call_function_many(cpu_mask,
319 320 321
					       do_flush_tlb_page_ipi, &p, 1);
		}
	}
322
	_tlbil_va(vmaddr, pid, tsize, ind);
323 324 325
 bail:
	preempt_enable();
}
326 327 328

void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
B
Becky Bruce 已提交
329
#ifdef CONFIG_HUGETLB_PAGE
330
	if (vma && is_vm_hugetlb_page(vma))
B
Becky Bruce 已提交
331 332 333
		flush_hugetlb_page(vma, vmaddr);
#endif

334
	__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
335
			 mmu_get_tsize(mmu_virtual_psize), 0);
336
}
337 338 339 340
EXPORT_SYMBOL(flush_tlb_page);

#endif /* CONFIG_SMP */

341 342 343 344 345 346 347 348 349 350 351
#ifdef CONFIG_PPC_47x
void __init early_init_mmu_47x(void)
{
#ifdef CONFIG_SMP
	unsigned long root = of_get_flat_dt_root();
	if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
		mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
#endif /* CONFIG_SMP */
}
#endif /* CONFIG_PPC_47x */

352 353 354 355 356 357 358 359 360 361
/*
 * Flush kernel TLB entries in the given range
 */
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
	preempt_disable();
	smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
	_tlbil_pid(0);
	preempt_enable();
362
#else
363
	_tlbil_pid(0);
364
#endif
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
}
EXPORT_SYMBOL(flush_tlb_kernel_range);

/*
 * Currently, for range flushing, we just do a full mm flush. This should
 * be optimized based on a threshold on the size of the range, since
 * some implementation can stack multiple tlbivax before a tlbsync but
 * for now, we keep it that way
 */
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
		     unsigned long end)

{
	flush_tlb_mm(vma->vm_mm);
}
EXPORT_SYMBOL(flush_tlb_range);
381 382 383 384 385

void tlb_flush(struct mmu_gather *tlb)
{
	flush_tlb_mm(tlb->mm);
}
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401

/*
 * Below are functions specific to the 64-bit variant of Book3E though that
 * may change in the future
 */

#ifdef CONFIG_PPC64

/*
 * Handling of virtual linear page tables or indirect TLB entries
 * flushing when PTE pages are freed
 */
void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
{
	int tsize = mmu_psize_defs[mmu_pte_psize].enc;

402
	if (book3e_htw_mode != PPC_HTW_NONE) {
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
		unsigned long start = address & PMD_MASK;
		unsigned long end = address + PMD_SIZE;
		unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;

		/* This isn't the most optimal, ideally we would factor out the
		 * while preempt & CPU mask mucking around, or even the IPI but
		 * it will do for now
		 */
		while (start < end) {
			__flush_tlb_page(tlb->mm, start, tsize, 1);
			start += size;
		}
	} else {
		unsigned long rmask = 0xf000000000000000ul;
		unsigned long rid = (address & rmask) | 0x1000000000000000ul;
		unsigned long vpte = address & ~rmask;

#ifdef CONFIG_PPC_64K_PAGES
		vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
#else
		vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
#endif
		vpte |= rid;
		__flush_tlb_page(tlb->mm, vpte, tsize, 0);
	}
}

430 431
static void setup_page_sizes(void)
{
432 433 434
	unsigned int tlb0cfg;
	unsigned int tlb0ps;
	unsigned int eptcfg;
435 436
	int i, psize;

437 438
#ifdef CONFIG_PPC_FSL_BOOK3E
	unsigned int mmucfg = mfspr(SPRN_MMUCFG);
439
	int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
440

441
	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
442 443 444 445 446 447 448 449 450 451 452 453 454
		unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
		unsigned int min_pg, max_pg;

		min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
		max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;

		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
			struct mmu_psize_def *def;
			unsigned int shift;

			def = &mmu_psize_defs[psize];
			shift = def->shift;

455
			if (shift == 0 || shift & 1)
456 457 458 459 460 461 462 463 464
				continue;

			/* adjust to be in terms of 4^shift Kb */
			shift = (shift - 10) >> 1;

			if ((shift >= min_pg) && (shift <= max_pg))
				def->flags |= MMU_PAGE_SIZE_DIRECT;
		}

465
		goto out;
466
	}
467 468

	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
		u32 tlb1cfg, tlb1ps;

		tlb0cfg = mfspr(SPRN_TLB0CFG);
		tlb1cfg = mfspr(SPRN_TLB1CFG);
		tlb1ps = mfspr(SPRN_TLB1PS);
		eptcfg = mfspr(SPRN_EPTCFG);

		if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
			book3e_htw_mode = PPC_HTW_E6500;

		/*
		 * We expect 4K subpage size and unrestricted indirect size.
		 * The lack of a restriction on indirect size is a Freescale
		 * extension, indicated by PSn = 0 but SPSn != 0.
		 */
		if (eptcfg != 2)
			book3e_htw_mode = PPC_HTW_NONE;
486 487 488 489 490 491

		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
			struct mmu_psize_def *def = &mmu_psize_defs[psize];

			if (tlb1ps & (1U << (def->shift - 10))) {
				def->flags |= MMU_PAGE_SIZE_DIRECT;
492 493 494

				if (book3e_htw_mode && psize == MMU_PAGE_2M)
					def->flags |= MMU_PAGE_SIZE_INDIRECT;
495 496 497
			}
		}

498
		goto out;
499
	}
500 501 502 503 504 505
#endif

	tlb0cfg = mfspr(SPRN_TLB0CFG);
	tlb0ps = mfspr(SPRN_TLB0PS);
	eptcfg = mfspr(SPRN_EPTCFG);

506 507 508 509 510 511 512 513 514
	/* Look for supported direct sizes */
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		struct mmu_psize_def *def = &mmu_psize_defs[psize];

		if (tlb0ps & (1U << (def->shift - 10)))
			def->flags |= MMU_PAGE_SIZE_DIRECT;
	}

	/* Indirect page sizes supported ? */
515 516 517 518 519
	if ((tlb0cfg & TLBnCFG_IND) == 0 ||
	    (tlb0cfg & TLBnCFG_PT) == 0)
		goto out;

	book3e_htw_mode = PPC_HTW_IBM;
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544

	/* Now, we only deal with one IND page size for each
	 * direct size. Hopefully all implementations today are
	 * unambiguous, but we might want to be careful in the
	 * future.
	 */
	for (i = 0; i < 3; i++) {
		unsigned int ps, sps;

		sps = eptcfg & 0x1f;
		eptcfg >>= 5;
		ps = eptcfg & 0x1f;
		eptcfg >>= 5;
		if (!ps || !sps)
			continue;
		for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
			struct mmu_psize_def *def = &mmu_psize_defs[psize];

			if (ps == (def->shift - 10))
				def->flags |= MMU_PAGE_SIZE_INDIRECT;
			if (sps == (def->shift - 10))
				def->ind = ps + 10;
		}
	}

545
out:
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	/* Cleanup array and print summary */
	pr_info("MMU: Supported page sizes\n");
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		struct mmu_psize_def *def = &mmu_psize_defs[psize];
		const char *__page_type_names[] = {
			"unsupported",
			"direct",
			"indirect",
			"direct & indirect"
		};
		if (def->flags == 0) {
			def->shift = 0;	
			continue;
		}
		pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
			__page_type_names[def->flags & 0x3]);
	}
}

565 566
static void setup_mmu_htw(void)
{
567 568 569 570 571 572 573
	/*
	 * If we want to use HW tablewalk, enable it by patching the TLB miss
	 * handlers to branch to the one dedicated to it.
	 */

	switch (book3e_htw_mode) {
	case PPC_HTW_IBM:
574 575
		patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
		patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
576
		break;
577
#ifdef CONFIG_PPC_FSL_BOOK3E
578
	case PPC_HTW_E6500:
579
		extlb_level_exc = EX_TLB_SIZE;
580 581 582
		patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
		patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
		break;
583
#endif
584
	}
585
	pr_info("MMU: Book3E HW tablewalk %s\n",
586
		book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
587 588 589 590 591
}

/*
 * Early initialization of the MMU TLB code
 */
592
static void early_init_this_mmu(void)
593
{
594 595 596 597 598
	unsigned int mas4;

	/* Set MAS4 based on page table setting */

	mas4 = 0x4 << MAS4_WIMGED_SHIFT;
599 600 601 602 603 604 605 606 607 608
	switch (book3e_htw_mode) {
	case PPC_HTW_E6500:
		mas4 |= MAS4_INDD;
		mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
		mas4 |= MAS4_TLBSELD(1);
		mmu_pte_psize = MMU_PAGE_2M;
		break;

	case PPC_HTW_IBM:
		mas4 |= MAS4_INDD;
609 610 611 612 613 614 615
#ifdef CONFIG_PPC_64K_PAGES
		mas4 |=	BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
		mmu_pte_psize = MMU_PAGE_256M;
#else
		mas4 |=	BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
		mmu_pte_psize = MMU_PAGE_1M;
#endif
616 617 618
		break;

	case PPC_HTW_NONE:
619 620 621 622 623 624
#ifdef CONFIG_PPC_64K_PAGES
		mas4 |=	BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
#else
		mas4 |=	BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
#endif
		mmu_pte_psize = mmu_virtual_psize;
625
		break;
626 627 628
	}
	mtspr(SPRN_MAS4, mas4);

629 630 631
#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
		unsigned int num_cams;
632 633
		int __maybe_unused cpu = smp_processor_id();
		bool map = true;
634 635 636

		/* use a quarter of the TLBCAM for bolted linear map */
		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651

		/*
		 * Only do the mapping once per core, or else the
		 * transient mapping would cause problems.
		 */
#ifdef CONFIG_SMP
		if (cpu != boot_cpuid &&
		    (cpu != cpu_first_thread_sibling(cpu) ||
		     cpu == cpu_first_thread_sibling(boot_cpuid)))
			map = false;
#endif

		if (map)
			linear_map_top = map_mem_in_cams(linear_map_top,
							 num_cams);
652 653
	}
#endif
654

655 656 657 658 659
	/* A sync won't hurt us after mucking around with
	 * the MMU configuration
	 */
	mb();
}
660

661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
static void __init early_init_mmu_global(void)
{
	/* XXX This will have to be decided at runtime, but right
	 * now our boot and TLB miss code hard wires it. Ideally
	 * we should find out a suitable page size and patch the
	 * TLB miss code (either that or use the PACA to store
	 * the value we want)
	 */
	mmu_linear_psize = MMU_PAGE_1G;

	/* XXX This should be decided at runtime based on supported
	 * page sizes in the TLB, but for now let's assume 16M is
	 * always there and a good fit (which it probably is)
	 *
	 * Freescale booke only supports 4K pages in TLB0, so use that.
	 */
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
		mmu_vmemmap_psize = MMU_PAGE_4K;
	else
		mmu_vmemmap_psize = MMU_PAGE_16M;

	/* XXX This code only checks for TLB 0 capabilities and doesn't
	 *     check what page size combos are supported by the HW. It
	 *     also doesn't handle the case where a separate array holds
	 *     the IND entries from the array loaded by the PT.
	 */
	/* Look for supported page sizes */
	setup_page_sizes();

	/* Look for HW tablewalk support */
	setup_mmu_htw();

#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
695
		if (book3e_htw_mode == PPC_HTW_NONE) {
696
			extlb_level_exc = EX_TLB_SIZE;
697 698 699 700
			patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
			patch_exception(0x1e0,
				exc_instruction_tlb_miss_bolted_book3e);
		}
701 702 703
	}
#endif

704 705
	/* Set the global containing the top of the linear mapping
	 * for use by the TLB miss code
706
	 */
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
	linear_map_top = memblock_end_of_DRAM();
}

static void __init early_mmu_set_memory_limit(void)
{
#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
		/*
		 * Limit memory so we dont have linear faults.
		 * Unlike memblock_set_current_limit, which limits
		 * memory available during early boot, this permanently
		 * reduces the memory available to Linux.  We need to
		 * do this because highmem is not supported on 64-bit.
		 */
		memblock_enforce_memory_limit(linear_map_top);
	}
#endif
724 725

	memblock_set_current_limit(linear_map_top);
726 727
}

728
/* boot cpu only */
729 730
void __init early_init_mmu(void)
{
731 732 733
	early_init_mmu_global();
	early_init_this_mmu();
	early_mmu_set_memory_limit();
734 735
}

736
void early_init_mmu_secondary(void)
737
{
738
	early_init_this_mmu();
739 740
}

741 742 743
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
				phys_addr_t first_memblock_size)
{
744
	/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
745 746
	 * the bolted TLB entry. We know for now that only 1G
	 * entries are supported though that may eventually
747 748 749 750 751 752 753 754
	 * change.
	 *
	 * on FSL Embedded 64-bit, we adjust the RMA size to match the
	 * first bolted TLB entry size.  We still limit max to 1G even if
	 * the TLB could cover more.  This is due to what the early init
	 * code is setup to do.
	 *
	 * We crop it to the size of the first MEMBLOCK to
755 756
	 * avoid going over total available memory just in case...
	 */
757 758 759 760 761 762 763 764 765
#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
		unsigned long linear_sz;
		linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET,
					first_memblock_base);
		ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
	} else
#endif
		ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
766 767

	/* Finally limit subsequent allocations */
768
	memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
769
}
770 771 772 773 774 775 776
#else /* ! CONFIG_PPC64 */
void __init early_init_mmu(void)
{
#ifdef CONFIG_PPC_47x
	early_init_mmu_47x();
#endif
}
777
#endif /* CONFIG_PPC64 */