tlb_nohash.c 17.0 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * This file contains the routines for TLB flushing.
 * On machines where the MMU does not use a hash table to store virtual to
 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
 * this does -not- include 603 however which shares the implementation with
 * hash based processors)
 *
 *  -- BenH
 *
10 11
 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
 *                     IBM Corp.
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 *  Derived from arch/ppc/mm/init.c:
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

#include <linux/kernel.h>
31
#include <linux/export.h>
32 33 34 35 36 37
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
Y
Yinghai Lu 已提交
38
#include <linux/memblock.h>
39
#include <linux/of_fdt.h>
B
Becky Bruce 已提交
40
#include <linux/hugetlb.h>
41 42 43

#include <asm/tlbflush.h>
#include <asm/tlb.h>
44
#include <asm/code-patching.h>
B
Becky Bruce 已提交
45
#include <asm/hugetlb.h>
46 47 48

#include "mmu_decl.h"

B
Becky Bruce 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
/*
 * This struct lists the sw-supported page sizes.  The hardawre MMU may support
 * other sizes not listed here.   The .ind field is only used on MMUs that have
 * indirect page table entries.
 */
#ifdef CONFIG_PPC_BOOK3E_MMU
#ifdef CONFIG_FSL_BOOKE
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
	[MMU_PAGE_4K] = {
		.shift	= 12,
		.enc	= BOOK3E_PAGESZ_4K,
	},
	[MMU_PAGE_4M] = {
		.shift	= 22,
		.enc	= BOOK3E_PAGESZ_4M,
	},
	[MMU_PAGE_16M] = {
		.shift	= 24,
		.enc	= BOOK3E_PAGESZ_16M,
	},
	[MMU_PAGE_64M] = {
		.shift	= 26,
		.enc	= BOOK3E_PAGESZ_64M,
	},
	[MMU_PAGE_256M] = {
		.shift	= 28,
		.enc	= BOOK3E_PAGESZ_256M,
	},
	[MMU_PAGE_1G] = {
		.shift	= 30,
		.enc	= BOOK3E_PAGESZ_1GB,
	},
};
#else
83 84 85
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
	[MMU_PAGE_4K] = {
		.shift	= 12,
86
		.ind	= 20,
87 88 89 90 91 92 93 94
		.enc	= BOOK3E_PAGESZ_4K,
	},
	[MMU_PAGE_16K] = {
		.shift	= 14,
		.enc	= BOOK3E_PAGESZ_16K,
	},
	[MMU_PAGE_64K] = {
		.shift	= 16,
95
		.ind	= 28,
96 97 98 99 100 101 102 103
		.enc	= BOOK3E_PAGESZ_64K,
	},
	[MMU_PAGE_1M] = {
		.shift	= 20,
		.enc	= BOOK3E_PAGESZ_1M,
	},
	[MMU_PAGE_16M] = {
		.shift	= 24,
104
		.ind	= 36,
105 106 107 108 109 110 111 112 113 114 115
		.enc	= BOOK3E_PAGESZ_16M,
	},
	[MMU_PAGE_256M] = {
		.shift	= 28,
		.enc	= BOOK3E_PAGESZ_256M,
	},
	[MMU_PAGE_1G] = {
		.shift	= 30,
		.enc	= BOOK3E_PAGESZ_1GB,
	},
};
B
Becky Bruce 已提交
116 117
#endif /* CONFIG_FSL_BOOKE */

118 119 120 121 122 123 124 125 126 127
static inline int mmu_get_tsize(int psize)
{
	return mmu_psize_defs[psize].enc;
}
#else
static inline int mmu_get_tsize(int psize)
{
	/* This isn't used on !Book3E for now */
	return 0;
}
B
Becky Bruce 已提交
128
#endif /* CONFIG_PPC_BOOK3E_MMU */
129 130 131 132 133 134 135 136 137

/* The variables below are currently only used on 64-bit Book3E
 * though this will probably be made common with other nohash
 * implementations at some point
 */
#ifdef CONFIG_PPC64

int mmu_linear_psize;		/* Page size used for the linear mapping */
int mmu_pte_psize;		/* Page size used for PTE pages */
138
int mmu_vmemmap_psize;		/* Page size used for the virtual mem map */
139 140 141 142 143
int book3e_htw_enabled;		/* Is HW tablewalk enabled ? */
unsigned long linear_map_top;	/* Top of linear mapping */

#endif /* CONFIG_PPC64 */

144 145 146 147 148 149
#ifdef CONFIG_PPC_FSL_BOOK3E
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
DEFINE_PER_CPU(int, next_tlbcam_idx);
EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
#endif

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
/*
 * Base TLB flushing operations:
 *
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes kernel pages
 *
 *  - local_* variants of page and mm only apply to the current
 *    processor
 */

/*
 * These are the base non-SMP variants of page and mm flushing
 */
void local_flush_tlb_mm(struct mm_struct *mm)
{
	unsigned int pid;

	preempt_disable();
	pid = mm->context.id;
	if (pid != MMU_NO_CONTEXT)
		_tlbil_pid(pid);
	preempt_enable();
}
EXPORT_SYMBOL(local_flush_tlb_mm);

177 178
void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
			    int tsize, int ind)
179 180 181 182
{
	unsigned int pid;

	preempt_disable();
183
	pid = mm ? mm->context.id : 0;
184
	if (pid != MMU_NO_CONTEXT)
185
		_tlbil_va(vmaddr, pid, tsize, ind);
186 187 188
	preempt_enable();
}

189 190 191
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
	__local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
192
			       mmu_get_tsize(mmu_virtual_psize), 0);
193 194
}
EXPORT_SYMBOL(local_flush_tlb_page);
195 196 197 198 199 200

/*
 * And here are the SMP non-local implementations
 */
#ifdef CONFIG_SMP

201
static DEFINE_RAW_SPINLOCK(tlbivax_lock);
202

203 204 205 206 207 208
static int mm_is_core_local(struct mm_struct *mm)
{
	return cpumask_subset(mm_cpumask(mm),
			      topology_thread_cpumask(smp_processor_id()));
}

209 210 211
struct tlb_flush_param {
	unsigned long addr;
	unsigned int pid;
212 213
	unsigned int tsize;
	unsigned int ind;
214 215 216 217 218 219 220 221 222 223 224 225 226
};

static void do_flush_tlb_mm_ipi(void *param)
{
	struct tlb_flush_param *p = param;

	_tlbil_pid(p ? p->pid : 0);
}

static void do_flush_tlb_page_ipi(void *param)
{
	struct tlb_flush_param *p = param;

227
	_tlbil_va(p->addr, p->pid, p->tsize, p->ind);
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
}


/* Note on invalidations and PID:
 *
 * We snapshot the PID with preempt disabled. At this point, it can still
 * change either because:
 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
 * - we are invaliating some target that isn't currently running here
 *   and is concurrently acquiring a new PID on another CPU
 * - some other CPU is re-acquiring a lost PID for this mm
 * etc...
 *
 * However, this shouldn't be a problem as we only guarantee
 * invalidation of TLB entries present prior to this call, so we
 * don't care about the PID changing, and invalidating a stale PID
 * is generally harmless.
 */

void flush_tlb_mm(struct mm_struct *mm)
{
	unsigned int pid;

	preempt_disable();
	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;
255
	if (!mm_is_core_local(mm)) {
256
		struct tlb_flush_param p = { .pid = pid };
257 258 259
		/* Ignores smp_processor_id() even if set. */
		smp_call_function_many(mm_cpumask(mm),
				       do_flush_tlb_mm_ipi, &p, 1);
260 261 262 263 264 265 266
	}
	_tlbil_pid(pid);
 no_context:
	preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_mm);

267 268
void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
		      int tsize, int ind)
269
{
270
	struct cpumask *cpu_mask;
271 272 273
	unsigned int pid;

	preempt_disable();
274
	pid = mm ? mm->context.id : 0;
275 276
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto bail;
277
	cpu_mask = mm_cpumask(mm);
278
	if (!mm_is_core_local(mm)) {
279 280 281 282
		/* If broadcast tlbivax is supported, use it */
		if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
			int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
			if (lock)
283
				raw_spin_lock(&tlbivax_lock);
284
			_tlbivax_bcast(vmaddr, pid, tsize, ind);
285
			if (lock)
286
				raw_spin_unlock(&tlbivax_lock);
287 288
			goto bail;
		} else {
289 290 291 292 293 294
			struct tlb_flush_param p = {
				.pid = pid,
				.addr = vmaddr,
				.tsize = tsize,
				.ind = ind,
			};
295 296
			/* Ignores smp_processor_id() even if set in cpu_mask */
			smp_call_function_many(cpu_mask,
297 298 299
					       do_flush_tlb_page_ipi, &p, 1);
		}
	}
300
	_tlbil_va(vmaddr, pid, tsize, ind);
301 302 303
 bail:
	preempt_enable();
}
304 305 306

void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
B
Becky Bruce 已提交
307 308 309 310 311
#ifdef CONFIG_HUGETLB_PAGE
	if (is_vm_hugetlb_page(vma))
		flush_hugetlb_page(vma, vmaddr);
#endif

312
	__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
313
			 mmu_get_tsize(mmu_virtual_psize), 0);
314
}
315 316 317 318
EXPORT_SYMBOL(flush_tlb_page);

#endif /* CONFIG_SMP */

319 320 321 322 323 324 325 326 327 328 329
#ifdef CONFIG_PPC_47x
void __init early_init_mmu_47x(void)
{
#ifdef CONFIG_SMP
	unsigned long root = of_get_flat_dt_root();
	if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
		mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
#endif /* CONFIG_SMP */
}
#endif /* CONFIG_PPC_47x */

330 331 332 333 334 335 336 337 338 339
/*
 * Flush kernel TLB entries in the given range
 */
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
	preempt_disable();
	smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
	_tlbil_pid(0);
	preempt_enable();
340
#else
341
	_tlbil_pid(0);
342
#endif
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
}
EXPORT_SYMBOL(flush_tlb_kernel_range);

/*
 * Currently, for range flushing, we just do a full mm flush. This should
 * be optimized based on a threshold on the size of the range, since
 * some implementation can stack multiple tlbivax before a tlbsync but
 * for now, we keep it that way
 */
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
		     unsigned long end)

{
	flush_tlb_mm(vma->vm_mm);
}
EXPORT_SYMBOL(flush_tlb_range);
359 360 361 362 363

void tlb_flush(struct mmu_gather *tlb)
{
	flush_tlb_mm(tlb->mm);
}
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407

/*
 * Below are functions specific to the 64-bit variant of Book3E though that
 * may change in the future
 */

#ifdef CONFIG_PPC64

/*
 * Handling of virtual linear page tables or indirect TLB entries
 * flushing when PTE pages are freed
 */
void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
{
	int tsize = mmu_psize_defs[mmu_pte_psize].enc;

	if (book3e_htw_enabled) {
		unsigned long start = address & PMD_MASK;
		unsigned long end = address + PMD_SIZE;
		unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;

		/* This isn't the most optimal, ideally we would factor out the
		 * while preempt & CPU mask mucking around, or even the IPI but
		 * it will do for now
		 */
		while (start < end) {
			__flush_tlb_page(tlb->mm, start, tsize, 1);
			start += size;
		}
	} else {
		unsigned long rmask = 0xf000000000000000ul;
		unsigned long rid = (address & rmask) | 0x1000000000000000ul;
		unsigned long vpte = address & ~rmask;

#ifdef CONFIG_PPC_64K_PAGES
		vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
#else
		vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
#endif
		vpte |= rid;
		__flush_tlb_page(tlb->mm, vpte, tsize, 0);
	}
}

408 409
static void setup_page_sizes(void)
{
410 411 412
	unsigned int tlb0cfg;
	unsigned int tlb0ps;
	unsigned int eptcfg;
413 414
	int i, psize;

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
#ifdef CONFIG_PPC_FSL_BOOK3E
	unsigned int mmucfg = mfspr(SPRN_MMUCFG);

	if (((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) &&
		(mmu_has_feature(MMU_FTR_TYPE_FSL_E))) {
		unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
		unsigned int min_pg, max_pg;

		min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
		max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;

		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
			struct mmu_psize_def *def;
			unsigned int shift;

			def = &mmu_psize_defs[psize];
			shift = def->shift;

			if (shift == 0)
				continue;

			/* adjust to be in terms of 4^shift Kb */
			shift = (shift - 10) >> 1;

			if ((shift >= min_pg) && (shift <= max_pg))
				def->flags |= MMU_PAGE_SIZE_DIRECT;
		}

		goto no_indirect;
	}
#endif

	tlb0cfg = mfspr(SPRN_TLB0CFG);
	tlb0ps = mfspr(SPRN_TLB0PS);
	eptcfg = mfspr(SPRN_EPTCFG);

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
	/* Look for supported direct sizes */
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		struct mmu_psize_def *def = &mmu_psize_defs[psize];

		if (tlb0ps & (1U << (def->shift - 10)))
			def->flags |= MMU_PAGE_SIZE_DIRECT;
	}

	/* Indirect page sizes supported ? */
	if ((tlb0cfg & TLBnCFG_IND) == 0)
		goto no_indirect;

	/* Now, we only deal with one IND page size for each
	 * direct size. Hopefully all implementations today are
	 * unambiguous, but we might want to be careful in the
	 * future.
	 */
	for (i = 0; i < 3; i++) {
		unsigned int ps, sps;

		sps = eptcfg & 0x1f;
		eptcfg >>= 5;
		ps = eptcfg & 0x1f;
		eptcfg >>= 5;
		if (!ps || !sps)
			continue;
		for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
			struct mmu_psize_def *def = &mmu_psize_defs[psize];

			if (ps == (def->shift - 10))
				def->flags |= MMU_PAGE_SIZE_INDIRECT;
			if (sps == (def->shift - 10))
				def->ind = ps + 10;
		}
	}
 no_indirect:

	/* Cleanup array and print summary */
	pr_info("MMU: Supported page sizes\n");
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		struct mmu_psize_def *def = &mmu_psize_defs[psize];
		const char *__page_type_names[] = {
			"unsupported",
			"direct",
			"indirect",
			"direct & indirect"
		};
		if (def->flags == 0) {
			def->shift = 0;	
			continue;
		}
		pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
			__page_type_names[def->flags & 0x3]);
	}
}

507
static void __patch_exception(int exc, unsigned long addr)
508 509
{
	extern unsigned int interrupt_base_book3e;
510 511 512 513 514 515 516 517 518 519
 	unsigned int *ibase = &interrupt_base_book3e;
 
	/* Our exceptions vectors start with a NOP and -then- a branch
	 * to deal with single stepping from userspace which stops on
	 * the second instruction. Thus we need to patch the second
	 * instruction of the exception, not the first one
	 */

	patch_branch(ibase + (exc / 4) + 1, addr, 0);
}
520

521 522 523 524
#define patch_exception(exc, name) do { \
	extern unsigned int name; \
	__patch_exception((exc), (unsigned long)&name); \
} while (0)
525

526 527
static void setup_mmu_htw(void)
{
528 529 530 531 532 533 534 535 536 537 538
	/* Check if HW tablewalk is present, and if yes, enable it by:
	 *
	 * - patching the TLB miss handlers to branch to the
	 *   one dedicates to it
	 *
	 * - setting the global book3e_htw_enabled
       	 */
	unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);

	if ((tlb0cfg & TLBnCFG_IND) &&
	    (tlb0cfg & TLBnCFG_PT)) {
539 540
		patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
		patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
541 542
		book3e_htw_enabled = 1;
	}
543 544
	pr_info("MMU: Book3E HW tablewalk %s\n",
		book3e_htw_enabled ? "enabled" : "not supported");
545 546 547 548 549 550 551
}

/*
 * Early initialization of the MMU TLB code
 */
static void __early_init_mmu(int boot_cpu)
{
552 553 554
	unsigned int mas4;

	/* XXX This will have to be decided at runtime, but right
555 556 557 558
	 * now our boot and TLB miss code hard wires it. Ideally
	 * we should find out a suitable page size and patch the
	 * TLB miss code (either that or use the PACA to store
	 * the value we want)
559 560 561
	 */
	mmu_linear_psize = MMU_PAGE_1G;

562 563 564 565 566
	/* XXX This should be decided at runtime based on supported
	 * page sizes in the TLB, but for now let's assume 16M is
	 * always there and a good fit (which it probably is)
	 */
	mmu_vmemmap_psize = MMU_PAGE_16M;
567 568 569 570 571 572 573

	/* XXX This code only checks for TLB 0 capabilities and doesn't
	 *     check what page size combos are supported by the HW. It
	 *     also doesn't handle the case where a separate array holds
	 *     the IND entries from the array loaded by the PT.
	 */
	if (boot_cpu) {
574 575
		/* Look for supported page sizes */
		setup_page_sizes();
576

577 578
		/* Look for HW tablewalk support */
		setup_mmu_htw();
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
	}

	/* Set MAS4 based on page table setting */

	mas4 = 0x4 << MAS4_WIMGED_SHIFT;
	if (book3e_htw_enabled) {
		mas4 |= mas4 | MAS4_INDD;
#ifdef CONFIG_PPC_64K_PAGES
		mas4 |=	BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
		mmu_pte_psize = MMU_PAGE_256M;
#else
		mas4 |=	BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
		mmu_pte_psize = MMU_PAGE_1M;
#endif
	} else {
#ifdef CONFIG_PPC_64K_PAGES
		mas4 |=	BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
#else
		mas4 |=	BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
#endif
		mmu_pte_psize = mmu_virtual_psize;
	}
	mtspr(SPRN_MAS4, mas4);

	/* Set the global containing the top of the linear mapping
	 * for use by the TLB miss code
	 */
Y
Yinghai Lu 已提交
606
	linear_map_top = memblock_end_of_DRAM();
607

608 609 610 611 612 613 614 615 616 617 618
#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
		unsigned int num_cams;

		/* use a quarter of the TLBCAM for bolted linear map */
		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
		linear_map_top = map_mem_in_cams(linear_map_top, num_cams);

		/* limit memory so we dont have linear faults */
		memblock_enforce_memory_limit(linear_map_top);
		memblock_analyze();
619 620 621

		patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
		patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
622 623 624
	}
#endif

625 626 627 628
	/* A sync won't hurt us after mucking around with
	 * the MMU configuration
	 */
	mb();
629 630

	memblock_set_current_limit(linear_map_top);
631 632 633 634 635 636 637 638 639 640 641 642
}

void __init early_init_mmu(void)
{
	__early_init_mmu(1);
}

void __cpuinit early_init_mmu_secondary(void)
{
	__early_init_mmu(0);
}

643 644 645
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
				phys_addr_t first_memblock_size)
{
646
	/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
647 648
	 * the bolted TLB entry. We know for now that only 1G
	 * entries are supported though that may eventually
649 650 651 652 653 654 655 656
	 * change.
	 *
	 * on FSL Embedded 64-bit, we adjust the RMA size to match the
	 * first bolted TLB entry size.  We still limit max to 1G even if
	 * the TLB could cover more.  This is due to what the early init
	 * code is setup to do.
	 *
	 * We crop it to the size of the first MEMBLOCK to
657 658
	 * avoid going over total available memory just in case...
	 */
659 660 661 662 663 664 665 666 667
#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
		unsigned long linear_sz;
		linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET,
					first_memblock_base);
		ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
	} else
#endif
		ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
668 669

	/* Finally limit subsequent allocations */
670
	memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
671
}
672 673 674 675 676 677 678
#else /* ! CONFIG_PPC64 */
void __init early_init_mmu(void)
{
#ifdef CONFIG_PPC_47x
	early_init_mmu_47x();
#endif
}
679
#endif /* CONFIG_PPC64 */