tlb_nohash.c 18.7 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * This file contains the routines for TLB flushing.
 * On machines where the MMU does not use a hash table to store virtual to
 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
 * this does -not- include 603 however which shares the implementation with
 * hash based processors)
 *
 *  -- BenH
 *
10 11
 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
 *                     IBM Corp.
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 *  Derived from arch/ppc/mm/init.c:
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

#include <linux/kernel.h>
31
#include <linux/export.h>
32 33 34 35 36 37
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
Y
Yinghai Lu 已提交
38
#include <linux/memblock.h>
39
#include <linux/of_fdt.h>
B
Becky Bruce 已提交
40
#include <linux/hugetlb.h>
41 42 43

#include <asm/tlbflush.h>
#include <asm/tlb.h>
44
#include <asm/code-patching.h>
B
Becky Bruce 已提交
45
#include <asm/hugetlb.h>
46
#include <asm/paca.h>
47 48 49

#include "mmu_decl.h"

B
Becky Bruce 已提交
50 51 52 53 54 55
/*
 * This struct lists the sw-supported page sizes.  The hardawre MMU may support
 * other sizes not listed here.   The .ind field is only used on MMUs that have
 * indirect page table entries.
 */
#ifdef CONFIG_PPC_BOOK3E_MMU
56
#ifdef CONFIG_PPC_FSL_BOOK3E
B
Becky Bruce 已提交
57 58 59 60 61
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
	[MMU_PAGE_4K] = {
		.shift	= 12,
		.enc	= BOOK3E_PAGESZ_4K,
	},
62 63 64 65
	[MMU_PAGE_2M] = {
		.shift	= 21,
		.enc	= BOOK3E_PAGESZ_2M,
	},
B
Becky Bruce 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
	[MMU_PAGE_4M] = {
		.shift	= 22,
		.enc	= BOOK3E_PAGESZ_4M,
	},
	[MMU_PAGE_16M] = {
		.shift	= 24,
		.enc	= BOOK3E_PAGESZ_16M,
	},
	[MMU_PAGE_64M] = {
		.shift	= 26,
		.enc	= BOOK3E_PAGESZ_64M,
	},
	[MMU_PAGE_256M] = {
		.shift	= 28,
		.enc	= BOOK3E_PAGESZ_256M,
	},
	[MMU_PAGE_1G] = {
		.shift	= 30,
		.enc	= BOOK3E_PAGESZ_1GB,
	},
};
#else
88 89 90
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
	[MMU_PAGE_4K] = {
		.shift	= 12,
91
		.ind	= 20,
92 93 94 95 96 97 98 99
		.enc	= BOOK3E_PAGESZ_4K,
	},
	[MMU_PAGE_16K] = {
		.shift	= 14,
		.enc	= BOOK3E_PAGESZ_16K,
	},
	[MMU_PAGE_64K] = {
		.shift	= 16,
100
		.ind	= 28,
101 102 103 104 105 106 107 108
		.enc	= BOOK3E_PAGESZ_64K,
	},
	[MMU_PAGE_1M] = {
		.shift	= 20,
		.enc	= BOOK3E_PAGESZ_1M,
	},
	[MMU_PAGE_16M] = {
		.shift	= 24,
109
		.ind	= 36,
110 111 112 113 114 115 116 117 118 119 120
		.enc	= BOOK3E_PAGESZ_16M,
	},
	[MMU_PAGE_256M] = {
		.shift	= 28,
		.enc	= BOOK3E_PAGESZ_256M,
	},
	[MMU_PAGE_1G] = {
		.shift	= 30,
		.enc	= BOOK3E_PAGESZ_1GB,
	},
};
B
Becky Bruce 已提交
121 122
#endif /* CONFIG_FSL_BOOKE */

123 124 125 126 127 128 129 130 131 132
static inline int mmu_get_tsize(int psize)
{
	return mmu_psize_defs[psize].enc;
}
#else
static inline int mmu_get_tsize(int psize)
{
	/* This isn't used on !Book3E for now */
	return 0;
}
B
Becky Bruce 已提交
133
#endif /* CONFIG_PPC_BOOK3E_MMU */
134 135 136 137 138 139 140 141 142

/* The variables below are currently only used on 64-bit Book3E
 * though this will probably be made common with other nohash
 * implementations at some point
 */
#ifdef CONFIG_PPC64

int mmu_linear_psize;		/* Page size used for the linear mapping */
int mmu_pte_psize;		/* Page size used for PTE pages */
143
int mmu_vmemmap_psize;		/* Page size used for the virtual mem map */
144
int book3e_htw_mode;		/* HW tablewalk?  Value is PPC_HTW_* */
145 146
unsigned long linear_map_top;	/* Top of linear mapping */

147 148 149 150 151 152 153 154 155

/*
 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
 * exceptions.  This is used for bolted and e6500 TLB miss handlers which
 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
 * this is set to zero.
 */
int extlb_level_exc;

156 157
#endif /* CONFIG_PPC64 */

158 159 160 161 162 163
#ifdef CONFIG_PPC_FSL_BOOK3E
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
DEFINE_PER_CPU(int, next_tlbcam_idx);
EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
#endif

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
/*
 * Base TLB flushing operations:
 *
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes kernel pages
 *
 *  - local_* variants of page and mm only apply to the current
 *    processor
 */

/*
 * These are the base non-SMP variants of page and mm flushing
 */
void local_flush_tlb_mm(struct mm_struct *mm)
{
	unsigned int pid;

	preempt_disable();
	pid = mm->context.id;
	if (pid != MMU_NO_CONTEXT)
		_tlbil_pid(pid);
	preempt_enable();
}
EXPORT_SYMBOL(local_flush_tlb_mm);

191 192
void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
			    int tsize, int ind)
193 194 195 196
{
	unsigned int pid;

	preempt_disable();
197
	pid = mm ? mm->context.id : 0;
198
	if (pid != MMU_NO_CONTEXT)
199
		_tlbil_va(vmaddr, pid, tsize, ind);
200 201 202
	preempt_enable();
}

203 204 205
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
	__local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
206
			       mmu_get_tsize(mmu_virtual_psize), 0);
207 208
}
EXPORT_SYMBOL(local_flush_tlb_page);
209 210 211 212 213 214

/*
 * And here are the SMP non-local implementations
 */
#ifdef CONFIG_SMP

215
static DEFINE_RAW_SPINLOCK(tlbivax_lock);
216

217 218 219 220 221 222
static int mm_is_core_local(struct mm_struct *mm)
{
	return cpumask_subset(mm_cpumask(mm),
			      topology_thread_cpumask(smp_processor_id()));
}

223 224 225
struct tlb_flush_param {
	unsigned long addr;
	unsigned int pid;
226 227
	unsigned int tsize;
	unsigned int ind;
228 229 230 231 232 233 234 235 236 237 238 239 240
};

static void do_flush_tlb_mm_ipi(void *param)
{
	struct tlb_flush_param *p = param;

	_tlbil_pid(p ? p->pid : 0);
}

static void do_flush_tlb_page_ipi(void *param)
{
	struct tlb_flush_param *p = param;

241
	_tlbil_va(p->addr, p->pid, p->tsize, p->ind);
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
}


/* Note on invalidations and PID:
 *
 * We snapshot the PID with preempt disabled. At this point, it can still
 * change either because:
 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
 * - we are invaliating some target that isn't currently running here
 *   and is concurrently acquiring a new PID on another CPU
 * - some other CPU is re-acquiring a lost PID for this mm
 * etc...
 *
 * However, this shouldn't be a problem as we only guarantee
 * invalidation of TLB entries present prior to this call, so we
 * don't care about the PID changing, and invalidating a stale PID
 * is generally harmless.
 */

void flush_tlb_mm(struct mm_struct *mm)
{
	unsigned int pid;

	preempt_disable();
	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;
269
	if (!mm_is_core_local(mm)) {
270
		struct tlb_flush_param p = { .pid = pid };
271 272 273
		/* Ignores smp_processor_id() even if set. */
		smp_call_function_many(mm_cpumask(mm),
				       do_flush_tlb_mm_ipi, &p, 1);
274 275 276 277 278 279 280
	}
	_tlbil_pid(pid);
 no_context:
	preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_mm);

281 282
void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
		      int tsize, int ind)
283
{
284
	struct cpumask *cpu_mask;
285 286 287
	unsigned int pid;

	preempt_disable();
288
	pid = mm ? mm->context.id : 0;
289 290
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto bail;
291
	cpu_mask = mm_cpumask(mm);
292
	if (!mm_is_core_local(mm)) {
293 294 295 296
		/* If broadcast tlbivax is supported, use it */
		if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
			int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
			if (lock)
297
				raw_spin_lock(&tlbivax_lock);
298
			_tlbivax_bcast(vmaddr, pid, tsize, ind);
299
			if (lock)
300
				raw_spin_unlock(&tlbivax_lock);
301 302
			goto bail;
		} else {
303 304 305 306 307 308
			struct tlb_flush_param p = {
				.pid = pid,
				.addr = vmaddr,
				.tsize = tsize,
				.ind = ind,
			};
309 310
			/* Ignores smp_processor_id() even if set in cpu_mask */
			smp_call_function_many(cpu_mask,
311 312 313
					       do_flush_tlb_page_ipi, &p, 1);
		}
	}
314
	_tlbil_va(vmaddr, pid, tsize, ind);
315 316 317
 bail:
	preempt_enable();
}
318 319 320

void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
B
Becky Bruce 已提交
321
#ifdef CONFIG_HUGETLB_PAGE
322
	if (vma && is_vm_hugetlb_page(vma))
B
Becky Bruce 已提交
323 324 325
		flush_hugetlb_page(vma, vmaddr);
#endif

326
	__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
327
			 mmu_get_tsize(mmu_virtual_psize), 0);
328
}
329 330 331 332
EXPORT_SYMBOL(flush_tlb_page);

#endif /* CONFIG_SMP */

333 334 335 336 337 338 339 340 341 342 343
#ifdef CONFIG_PPC_47x
void __init early_init_mmu_47x(void)
{
#ifdef CONFIG_SMP
	unsigned long root = of_get_flat_dt_root();
	if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
		mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
#endif /* CONFIG_SMP */
}
#endif /* CONFIG_PPC_47x */

344 345 346 347 348 349 350 351 352 353
/*
 * Flush kernel TLB entries in the given range
 */
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
	preempt_disable();
	smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
	_tlbil_pid(0);
	preempt_enable();
354
#else
355
	_tlbil_pid(0);
356
#endif
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
}
EXPORT_SYMBOL(flush_tlb_kernel_range);

/*
 * Currently, for range flushing, we just do a full mm flush. This should
 * be optimized based on a threshold on the size of the range, since
 * some implementation can stack multiple tlbivax before a tlbsync but
 * for now, we keep it that way
 */
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
		     unsigned long end)

{
	flush_tlb_mm(vma->vm_mm);
}
EXPORT_SYMBOL(flush_tlb_range);
373 374 375 376 377

void tlb_flush(struct mmu_gather *tlb)
{
	flush_tlb_mm(tlb->mm);
}
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393

/*
 * Below are functions specific to the 64-bit variant of Book3E though that
 * may change in the future
 */

#ifdef CONFIG_PPC64

/*
 * Handling of virtual linear page tables or indirect TLB entries
 * flushing when PTE pages are freed
 */
void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
{
	int tsize = mmu_psize_defs[mmu_pte_psize].enc;

394
	if (book3e_htw_mode != PPC_HTW_NONE) {
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
		unsigned long start = address & PMD_MASK;
		unsigned long end = address + PMD_SIZE;
		unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;

		/* This isn't the most optimal, ideally we would factor out the
		 * while preempt & CPU mask mucking around, or even the IPI but
		 * it will do for now
		 */
		while (start < end) {
			__flush_tlb_page(tlb->mm, start, tsize, 1);
			start += size;
		}
	} else {
		unsigned long rmask = 0xf000000000000000ul;
		unsigned long rid = (address & rmask) | 0x1000000000000000ul;
		unsigned long vpte = address & ~rmask;

#ifdef CONFIG_PPC_64K_PAGES
		vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
#else
		vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
#endif
		vpte |= rid;
		__flush_tlb_page(tlb->mm, vpte, tsize, 0);
	}
}

422 423
static void setup_page_sizes(void)
{
424 425 426
	unsigned int tlb0cfg;
	unsigned int tlb0ps;
	unsigned int eptcfg;
427 428
	int i, psize;

429 430
#ifdef CONFIG_PPC_FSL_BOOK3E
	unsigned int mmucfg = mfspr(SPRN_MMUCFG);
431
	int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
432

433
	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
434 435 436 437 438 439 440 441 442 443 444 445 446
		unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
		unsigned int min_pg, max_pg;

		min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
		max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;

		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
			struct mmu_psize_def *def;
			unsigned int shift;

			def = &mmu_psize_defs[psize];
			shift = def->shift;

447
			if (shift == 0 || shift & 1)
448 449 450 451 452 453 454 455 456
				continue;

			/* adjust to be in terms of 4^shift Kb */
			shift = (shift - 10) >> 1;

			if ((shift >= min_pg) && (shift <= max_pg))
				def->flags |= MMU_PAGE_SIZE_DIRECT;
		}

457
		goto out;
458
	}
459 460

	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
		u32 tlb1cfg, tlb1ps;

		tlb0cfg = mfspr(SPRN_TLB0CFG);
		tlb1cfg = mfspr(SPRN_TLB1CFG);
		tlb1ps = mfspr(SPRN_TLB1PS);
		eptcfg = mfspr(SPRN_EPTCFG);

		if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
			book3e_htw_mode = PPC_HTW_E6500;

		/*
		 * We expect 4K subpage size and unrestricted indirect size.
		 * The lack of a restriction on indirect size is a Freescale
		 * extension, indicated by PSn = 0 but SPSn != 0.
		 */
		if (eptcfg != 2)
			book3e_htw_mode = PPC_HTW_NONE;
478 479 480 481 482 483

		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
			struct mmu_psize_def *def = &mmu_psize_defs[psize];

			if (tlb1ps & (1U << (def->shift - 10))) {
				def->flags |= MMU_PAGE_SIZE_DIRECT;
484 485 486

				if (book3e_htw_mode && psize == MMU_PAGE_2M)
					def->flags |= MMU_PAGE_SIZE_INDIRECT;
487 488 489
			}
		}

490
		goto out;
491
	}
492 493 494 495 496 497
#endif

	tlb0cfg = mfspr(SPRN_TLB0CFG);
	tlb0ps = mfspr(SPRN_TLB0PS);
	eptcfg = mfspr(SPRN_EPTCFG);

498 499 500 501 502 503 504 505 506
	/* Look for supported direct sizes */
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		struct mmu_psize_def *def = &mmu_psize_defs[psize];

		if (tlb0ps & (1U << (def->shift - 10)))
			def->flags |= MMU_PAGE_SIZE_DIRECT;
	}

	/* Indirect page sizes supported ? */
507 508 509 510 511
	if ((tlb0cfg & TLBnCFG_IND) == 0 ||
	    (tlb0cfg & TLBnCFG_PT) == 0)
		goto out;

	book3e_htw_mode = PPC_HTW_IBM;
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536

	/* Now, we only deal with one IND page size for each
	 * direct size. Hopefully all implementations today are
	 * unambiguous, but we might want to be careful in the
	 * future.
	 */
	for (i = 0; i < 3; i++) {
		unsigned int ps, sps;

		sps = eptcfg & 0x1f;
		eptcfg >>= 5;
		ps = eptcfg & 0x1f;
		eptcfg >>= 5;
		if (!ps || !sps)
			continue;
		for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
			struct mmu_psize_def *def = &mmu_psize_defs[psize];

			if (ps == (def->shift - 10))
				def->flags |= MMU_PAGE_SIZE_INDIRECT;
			if (sps == (def->shift - 10))
				def->ind = ps + 10;
		}
	}

537
out:
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
	/* Cleanup array and print summary */
	pr_info("MMU: Supported page sizes\n");
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		struct mmu_psize_def *def = &mmu_psize_defs[psize];
		const char *__page_type_names[] = {
			"unsupported",
			"direct",
			"indirect",
			"direct & indirect"
		};
		if (def->flags == 0) {
			def->shift = 0;	
			continue;
		}
		pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
			__page_type_names[def->flags & 0x3]);
	}
}

557 558
static void setup_mmu_htw(void)
{
559 560 561 562 563 564 565
	/*
	 * If we want to use HW tablewalk, enable it by patching the TLB miss
	 * handlers to branch to the one dedicated to it.
	 */

	switch (book3e_htw_mode) {
	case PPC_HTW_IBM:
566 567
		patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
		patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
568
		break;
569
#ifdef CONFIG_PPC_FSL_BOOK3E
570
	case PPC_HTW_E6500:
571
		extlb_level_exc = EX_TLB_SIZE;
572 573 574
		patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
		patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
		break;
575
#endif
576
	}
577
	pr_info("MMU: Book3E HW tablewalk %s\n",
578
		book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
579 580 581 582 583
}

/*
 * Early initialization of the MMU TLB code
 */
584
static void early_init_this_mmu(void)
585
{
586 587 588 589 590
	unsigned int mas4;

	/* Set MAS4 based on page table setting */

	mas4 = 0x4 << MAS4_WIMGED_SHIFT;
591 592 593 594 595 596 597 598 599 600
	switch (book3e_htw_mode) {
	case PPC_HTW_E6500:
		mas4 |= MAS4_INDD;
		mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
		mas4 |= MAS4_TLBSELD(1);
		mmu_pte_psize = MMU_PAGE_2M;
		break;

	case PPC_HTW_IBM:
		mas4 |= MAS4_INDD;
601 602 603 604 605 606 607
#ifdef CONFIG_PPC_64K_PAGES
		mas4 |=	BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
		mmu_pte_psize = MMU_PAGE_256M;
#else
		mas4 |=	BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
		mmu_pte_psize = MMU_PAGE_1M;
#endif
608 609 610
		break;

	case PPC_HTW_NONE:
611 612 613 614 615 616
#ifdef CONFIG_PPC_64K_PAGES
		mas4 |=	BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
#else
		mas4 |=	BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
#endif
		mmu_pte_psize = mmu_virtual_psize;
617
		break;
618 619 620
	}
	mtspr(SPRN_MAS4, mas4);

621 622 623 624 625 626 627
#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
		unsigned int num_cams;

		/* use a quarter of the TLBCAM for bolted linear map */
		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
		linear_map_top = map_mem_in_cams(linear_map_top, num_cams);
628 629
	}
#endif
630

631 632 633 634 635
	/* A sync won't hurt us after mucking around with
	 * the MMU configuration
	 */
	mb();
}
636

637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
static void __init early_init_mmu_global(void)
{
	/* XXX This will have to be decided at runtime, but right
	 * now our boot and TLB miss code hard wires it. Ideally
	 * we should find out a suitable page size and patch the
	 * TLB miss code (either that or use the PACA to store
	 * the value we want)
	 */
	mmu_linear_psize = MMU_PAGE_1G;

	/* XXX This should be decided at runtime based on supported
	 * page sizes in the TLB, but for now let's assume 16M is
	 * always there and a good fit (which it probably is)
	 *
	 * Freescale booke only supports 4K pages in TLB0, so use that.
	 */
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
		mmu_vmemmap_psize = MMU_PAGE_4K;
	else
		mmu_vmemmap_psize = MMU_PAGE_16M;

	/* XXX This code only checks for TLB 0 capabilities and doesn't
	 *     check what page size combos are supported by the HW. It
	 *     also doesn't handle the case where a separate array holds
	 *     the IND entries from the array loaded by the PT.
	 */
	/* Look for supported page sizes */
	setup_page_sizes();

	/* Look for HW tablewalk support */
	setup_mmu_htw();

#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
671
		if (book3e_htw_mode == PPC_HTW_NONE) {
672
			extlb_level_exc = EX_TLB_SIZE;
673 674 675 676
			patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
			patch_exception(0x1e0,
				exc_instruction_tlb_miss_bolted_book3e);
		}
677 678 679
	}
#endif

680 681
	/* Set the global containing the top of the linear mapping
	 * for use by the TLB miss code
682
	 */
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
	linear_map_top = memblock_end_of_DRAM();
}

static void __init early_mmu_set_memory_limit(void)
{
#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
		/*
		 * Limit memory so we dont have linear faults.
		 * Unlike memblock_set_current_limit, which limits
		 * memory available during early boot, this permanently
		 * reduces the memory available to Linux.  We need to
		 * do this because highmem is not supported on 64-bit.
		 */
		memblock_enforce_memory_limit(linear_map_top);
	}
#endif
700 701

	memblock_set_current_limit(linear_map_top);
702 703
}

704
/* boot cpu only */
705 706
void __init early_init_mmu(void)
{
707 708 709
	early_init_mmu_global();
	early_init_this_mmu();
	early_mmu_set_memory_limit();
710 711
}

712
void early_init_mmu_secondary(void)
713
{
714
	early_init_this_mmu();
715 716
}

717 718 719
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
				phys_addr_t first_memblock_size)
{
720
	/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
721 722
	 * the bolted TLB entry. We know for now that only 1G
	 * entries are supported though that may eventually
723 724 725 726 727 728 729 730
	 * change.
	 *
	 * on FSL Embedded 64-bit, we adjust the RMA size to match the
	 * first bolted TLB entry size.  We still limit max to 1G even if
	 * the TLB could cover more.  This is due to what the early init
	 * code is setup to do.
	 *
	 * We crop it to the size of the first MEMBLOCK to
731 732
	 * avoid going over total available memory just in case...
	 */
733 734 735 736 737 738 739 740 741
#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
		unsigned long linear_sz;
		linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET,
					first_memblock_base);
		ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
	} else
#endif
		ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
742 743

	/* Finally limit subsequent allocations */
744
	memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
745
}
746 747 748 749 750 751 752
#else /* ! CONFIG_PPC64 */
void __init early_init_mmu(void)
{
#ifdef CONFIG_PPC_47x
	early_init_mmu_47x();
#endif
}
753
#endif /* CONFIG_PPC64 */