hash_utils_64.c 38.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
 *   {mikejc|engebret}@us.ibm.com
 *
 *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
 *
 * SMP scalability work:
 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
 * 
 *    Module name: htab.c
 *
 *    Description:
 *      PowerPC Hashed Page Table functions
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#undef DEBUG
22
#undef DEBUG_LOW
L
Linus Torvalds 已提交
23 24 25 26 27 28 29

#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/sysctl.h>
30
#include <linux/export.h>
L
Linus Torvalds 已提交
31 32 33 34
#include <linux/ctype.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/signal.h>
Y
Yinghai Lu 已提交
35
#include <linux/memblock.h>
36
#include <linux/context_tracking.h>
L
Linus Torvalds 已提交
37 38 39 40 41 42 43 44 45

#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/machdep.h>
46
#include <asm/prom.h>
L
Linus Torvalds 已提交
47 48 49 50 51 52 53
#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/eeh.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/cputable.h>
#include <asm/sections.h>
54
#include <asm/spu.h>
55
#include <asm/udbg.h>
56
#include <asm/code-patching.h>
57
#include <asm/fadump.h>
58
#include <asm/firmware.h>
59
#include <asm/tm.h>
L
Linus Torvalds 已提交
60 61 62 63 64 65 66

#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif

67 68 69 70 71 72 73 74
#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
#else
#define DBG_LOW(fmt...)
#endif

#define KB (1024)
#define MB (1024*KB)
75
#define GB (1024L*MB)
76

L
Linus Torvalds 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
/*
 * Note:  pte   --> Linux PTE
 *        HPTE  --> PowerPC Hashed Page Table Entry
 *
 * Execution context:
 *   htab_initialize is called with the MMU off (of course), but
 *   the kernel has been copied down to zero so it can directly
 *   reference global data.  At this point it is very difficult
 *   to print debug info.
 *
 */

#ifdef CONFIG_U3_DART
extern unsigned long dart_tablebase;
#endif /* CONFIG_U3_DART */

93 94 95
static unsigned long _SDR1;
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];

96
struct hash_pte *htab_address;
97
unsigned long htab_size_bytes;
98
unsigned long htab_hash_mask;
A
Alexander Graf 已提交
99
EXPORT_SYMBOL_GPL(htab_hash_mask);
100 101
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
102
int mmu_vmalloc_psize = MMU_PAGE_4K;
103 104 105
#ifdef CONFIG_SPARSEMEM_VMEMMAP
int mmu_vmemmap_psize = MMU_PAGE_4K;
#endif
106
int mmu_io_psize = MMU_PAGE_4K;
P
Paul Mackerras 已提交
107 108
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
109
u16 mmu_slb_size = 64;
A
Alexander Graf 已提交
110
EXPORT_SYMBOL_GPL(mmu_slb_size);
111 112 113
#ifdef CONFIG_PPC_64K_PAGES
int mmu_ci_restrictions;
#endif
114 115 116
#ifdef CONFIG_DEBUG_PAGEALLOC
static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count;
117
static DEFINE_SPINLOCK(linear_map_hash_lock);
118
#endif /* CONFIG_DEBUG_PAGEALLOC */
L
Linus Torvalds 已提交
119

120 121 122
/* There are definitions of page sizes arrays to be used when none
 * is provided by the firmware.
 */
L
Linus Torvalds 已提交
123

124 125
/* Pre-POWER4 CPUs (4k pages only)
 */
126
static struct mmu_psize_def mmu_psize_defaults_old[] = {
127 128 129
	[MMU_PAGE_4K] = {
		.shift	= 12,
		.sllp	= 0,
130
		.penc   = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
131 132 133 134 135 136 137 138 139
		.avpnm	= 0,
		.tlbiel = 0,
	},
};

/* POWER4, GPUL, POWER5
 *
 * Support for 16Mb large pages
 */
140
static struct mmu_psize_def mmu_psize_defaults_gp[] = {
141 142 143
	[MMU_PAGE_4K] = {
		.shift	= 12,
		.sllp	= 0,
144
		.penc   = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
145 146 147 148 149 150
		.avpnm	= 0,
		.tlbiel = 1,
	},
	[MMU_PAGE_16M] = {
		.shift	= 24,
		.sllp	= SLB_VSID_L,
151 152
		.penc   = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0,
			    [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },
153 154 155 156 157
		.avpnm	= 0x1UL,
		.tlbiel = 0,
	},
};

158 159 160 161 162 163 164 165 166 167 168 169 170 171
static unsigned long htab_convert_pte_flags(unsigned long pteflags)
{
	unsigned long rflags = pteflags & 0x1fa;

	/* _PAGE_EXEC -> NOEXEC */
	if ((pteflags & _PAGE_EXEC) == 0)
		rflags |= HPTE_R_N;

	/* PP bits. PAGE_USER is already PP bit 0x2, so we only
	 * need to add in 0x1 if it's a read-only user page
	 */
	if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
					 (pteflags & _PAGE_DIRTY)))
		rflags |= 1;
172 173 174 175
	/*
	 * Always add "C" bit for perf. Memory coherence is always enabled
	 */
	return rflags | HPTE_R_C | HPTE_R_M;
176
}
177 178

int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
179
		      unsigned long pstart, unsigned long prot,
P
Paul Mackerras 已提交
180
		      int psize, int ssize)
L
Linus Torvalds 已提交
181
{
182 183 184
	unsigned long vaddr, paddr;
	unsigned int step, shift;
	int ret = 0;
L
Linus Torvalds 已提交
185

186 187
	shift = mmu_psize_defs[psize].shift;
	step = 1 << shift;
L
Linus Torvalds 已提交
188

189 190 191 192 193
	prot = htab_convert_pte_flags(prot);

	DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
	    vstart, vend, pstart, prot, psize, ssize);

194 195
	for (vaddr = vstart, paddr = pstart; vaddr < vend;
	     vaddr += step, paddr += step) {
196
		unsigned long hash, hpteg;
P
Paul Mackerras 已提交
197
		unsigned long vsid = get_kernel_vsid(vaddr, ssize);
198
		unsigned long vpn  = hpt_vpn(vaddr, vsid, ssize);
199 200
		unsigned long tprot = prot;

201 202 203 204 205
		/*
		 * If we hit a bad address return error.
		 */
		if (!vsid)
			return -1;
206
		/* Make kernel text executable */
207
		if (overlaps_kernel_text(vaddr, vaddr + step))
208
			tprot &= ~HPTE_R_N;
L
Linus Torvalds 已提交
209

210 211 212 213 214 215 216 217 218 219 220 221 222 223
		/*
		 * If relocatable, check if it overlaps interrupt vectors that
		 * are copied down to real 0. For relocatable kernel
		 * (e.g. kdump case) we copy interrupt vectors down to real
		 * address 0. Mark that region as executable. This is
		 * because on p8 system with relocation on exception feature
		 * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
		 * in order to execute the interrupt handlers in virtual
		 * mode the vector region need to be marked as executable.
		 */
		if ((PHYSICAL_START > MEMORY_START) &&
			overlaps_interrupt_vector_text(vaddr, vaddr + step))
				tprot &= ~HPTE_R_N;

224
		hash = hpt_hash(vpn, shift, ssize);
L
Linus Torvalds 已提交
225 226
		hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);

227
		BUG_ON(!ppc_md.hpte_insert);
228
		ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
229
					 HPTE_V_BOLTED, psize, psize, ssize);
230

231 232
		if (ret < 0)
			break;
233 234 235 236
#ifdef CONFIG_DEBUG_PAGEALLOC
		if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
			linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
#endif /* CONFIG_DEBUG_PAGEALLOC */
237 238 239
	}
	return ret < 0 ? ret : 0;
}
L
Linus Torvalds 已提交
240

241
#ifdef CONFIG_MEMORY_HOTPLUG
242
static int htab_remove_mapping(unsigned long vstart, unsigned long vend,
243 244 245 246 247 248 249 250 251
		      int psize, int ssize)
{
	unsigned long vaddr;
	unsigned int step, shift;

	shift = mmu_psize_defs[psize].shift;
	step = 1 << shift;

	if (!ppc_md.hpte_removebolted) {
252 253 254
		printk(KERN_WARNING "Platform doesn't implement "
				"hpte_removebolted\n");
		return -EINVAL;
255 256 257 258
	}

	for (vaddr = vstart; vaddr < vend; vaddr += step)
		ppc_md.hpte_removebolted(vaddr, psize, ssize);
259 260

	return 0;
261
}
262
#endif /* CONFIG_MEMORY_HOTPLUG */
263

P
Paul Mackerras 已提交
264 265 266 267 268
static int __init htab_dt_scan_seg_sizes(unsigned long node,
					 const char *uname, int depth,
					 void *data)
{
	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
269
	__be32 *prop;
P
Paul Mackerras 已提交
270 271 272 273 274 275
	unsigned long size = 0;

	/* We are scanning "cpu" nodes only */
	if (type == NULL || strcmp(type, "cpu") != 0)
		return 0;

276
	prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
P
Paul Mackerras 已提交
277 278 279
	if (prop == NULL)
		return 0;
	for (; size >= 4; size -= 4, ++prop) {
280
		if (be32_to_cpu(prop[0]) == 40) {
P
Paul Mackerras 已提交
281
			DBG("1T segment support detected\n");
282
			cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
283
			return 1;
P
Paul Mackerras 已提交
284 285
		}
	}
286
	cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
P
Paul Mackerras 已提交
287 288 289 290 291 292 293 294
	return 0;
}

static void __init htab_init_seg_sizes(void)
{
	of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
}

295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
static int __init get_idx_from_shift(unsigned int shift)
{
	int idx = -1;

	switch (shift) {
	case 0xc:
		idx = MMU_PAGE_4K;
		break;
	case 0x10:
		idx = MMU_PAGE_64K;
		break;
	case 0x14:
		idx = MMU_PAGE_1M;
		break;
	case 0x18:
		idx = MMU_PAGE_16M;
		break;
	case 0x22:
		idx = MMU_PAGE_16G;
		break;
	}
	return idx;
}

319 320 321 322 323
static int __init htab_dt_scan_page_sizes(unsigned long node,
					  const char *uname, int depth,
					  void *data)
{
	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
324
	__be32 *prop;
325 326 327 328 329 330
	unsigned long size = 0;

	/* We are scanning "cpu" nodes only */
	if (type == NULL || strcmp(type, "cpu") != 0)
		return 0;

331
	prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
332
	if (prop != NULL) {
333
		pr_info("Page sizes from device-tree:\n");
334
		size /= 4;
335
		cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
336
		while(size > 0) {
337 338 339
			unsigned int base_shift = be32_to_cpu(prop[0]);
			unsigned int slbenc = be32_to_cpu(prop[1]);
			unsigned int lpnum = be32_to_cpu(prop[2]);
340
			struct mmu_psize_def *def;
341
			int idx, base_idx;
342 343

			size -= 3; prop += 3;
344 345 346 347 348 349 350
			base_idx = get_idx_from_shift(base_shift);
			if (base_idx < 0) {
				/*
				 * skip the pte encoding also
				 */
				prop += lpnum * 2; size -= lpnum * 2;
				continue;
351
			}
352 353
			def = &mmu_psize_defs[base_idx];
			if (base_idx == MMU_PAGE_16M)
354
				cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
355 356 357

			def->shift = base_shift;
			if (base_shift <= 23)
358 359
				def->avpnm = 0;
			else
360
				def->avpnm = (1 << (base_shift - 23)) - 1;
361
			def->sllp = slbenc;
362 363
			/*
			 * We don't know for sure what's up with tlbiel, so
364 365
			 * for now we only set it for 4K and 64K pages
			 */
366
			if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
367 368 369 370
				def->tlbiel = 1;
			else
				def->tlbiel = 0;

371
			while (size > 0 && lpnum) {
372 373
				unsigned int shift = be32_to_cpu(prop[0]);
				int penc  = be32_to_cpu(prop[1]);
374 375 376 377 378 379 380 381 382 383 384 385 386

				prop += 2; size -= 2;
				lpnum--;

				idx = get_idx_from_shift(shift);
				if (idx < 0)
					continue;

				if (penc == -1)
					pr_err("Invalid penc for base_shift=%d "
					       "shift=%d\n", base_shift, shift);

				def->penc[idx] = penc;
387 388 389 390
				pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
					" avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
					base_shift, shift, def->sllp,
					def->avpnm, def->tlbiel, def->penc[idx]);
391
			}
L
Linus Torvalds 已提交
392
		}
393 394 395 396 397
		return 1;
	}
	return 0;
}

398
#ifdef CONFIG_HUGETLB_PAGE
399 400 401 402 403 404 405
/* Scan for 16G memory blocks that have been set aside for huge pages
 * and reserve those blocks for 16G huge pages.
 */
static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
					const char *uname, int depth,
					void *data) {
	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
406 407
	__be64 *addr_prop;
	__be32 *page_count_prop;
408 409 410 411 412 413 414 415 416 417 418 419 420
	unsigned int expected_pages;
	long unsigned int phys_addr;
	long unsigned int block_size;

	/* We are scanning "memory" nodes only */
	if (type == NULL || strcmp(type, "memory") != 0)
		return 0;

	/* This property is the log base 2 of the number of virtual pages that
	 * will represent this memory block. */
	page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
	if (page_count_prop == NULL)
		return 0;
421
	expected_pages = (1 << be32_to_cpu(page_count_prop[0]));
422 423 424
	addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
	if (addr_prop == NULL)
		return 0;
425 426
	phys_addr = be64_to_cpu(addr_prop[0]);
	block_size = be64_to_cpu(addr_prop[1]);
427 428 429 430 431
	if (block_size != (16 * GB))
		return 0;
	printk(KERN_INFO "Huge page(16GB) memory: "
			"addr = 0x%lX size = 0x%lX pages = %d\n",
			phys_addr, block_size, expected_pages);
Y
Yinghai Lu 已提交
432 433
	if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
		memblock_reserve(phys_addr, block_size * expected_pages);
434 435
		add_gpage(phys_addr, block_size, expected_pages);
	}
436 437
	return 0;
}
438
#endif /* CONFIG_HUGETLB_PAGE */
439

440 441 442 443 444 445 446 447
static void mmu_psize_set_default_penc(void)
{
	int bpsize, apsize;
	for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
		for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++)
			mmu_psize_defs[bpsize].penc[apsize] = -1;
}

448 449 450 451
static void __init htab_init_page_sizes(void)
{
	int rc;

452 453 454
	/* se the invalid penc to -1 */
	mmu_psize_set_default_penc();

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
	/* Default to 4K pages only */
	memcpy(mmu_psize_defs, mmu_psize_defaults_old,
	       sizeof(mmu_psize_defaults_old));

	/*
	 * Try to find the available page sizes in the device-tree
	 */
	rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
	if (rc != 0)  /* Found */
		goto found;

	/*
	 * Not in the device-tree, let's fallback on known size
	 * list for 16M capable GP & GR
	 */
470
	if (mmu_has_feature(MMU_FTR_16M_PAGE))
471 472 473
		memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
		       sizeof(mmu_psize_defaults_gp));
 found:
474
#ifndef CONFIG_DEBUG_PAGEALLOC
475 476 477 478 479 480 481 482
	/*
	 * Pick a size for the linear mapping. Currently, we only support
	 * 16M, 1M and 4K which is the default
	 */
	if (mmu_psize_defs[MMU_PAGE_16M].shift)
		mmu_linear_psize = MMU_PAGE_16M;
	else if (mmu_psize_defs[MMU_PAGE_1M].shift)
		mmu_linear_psize = MMU_PAGE_1M;
483
#endif /* CONFIG_DEBUG_PAGEALLOC */
484

485
#ifdef CONFIG_PPC_64K_PAGES
486 487
	/*
	 * Pick a size for the ordinary pages. Default is 4K, we support
488 489 490 491 492 493
	 * 64K for user mappings and vmalloc if supported by the processor.
	 * We only use 64k for ioremap if the processor
	 * (and firmware) support cache-inhibited large pages.
	 * If not, we use 4k and set mmu_ci_restrictions so that
	 * hash_page knows to switch processes that use cache-inhibited
	 * mappings to 4k pages.
494
	 */
495
	if (mmu_psize_defs[MMU_PAGE_64K].shift) {
496
		mmu_virtual_psize = MMU_PAGE_64K;
497
		mmu_vmalloc_psize = MMU_PAGE_64K;
498 499
		if (mmu_linear_psize == MMU_PAGE_4K)
			mmu_linear_psize = MMU_PAGE_64K;
500
		if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
501 502 503 504 505 506 507
			/*
			 * Don't use 64k pages for ioremap on pSeries, since
			 * that would stop us accessing the HEA ethernet.
			 */
			if (!machine_is(pseries))
				mmu_io_psize = MMU_PAGE_64K;
		} else
508 509
			mmu_ci_restrictions = 1;
	}
510
#endif /* CONFIG_PPC_64K_PAGES */
511

512 513 514 515 516
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	/* We try to use 16M pages for vmemmap if that is supported
	 * and we have at least 1G of RAM at boot
	 */
	if (mmu_psize_defs[MMU_PAGE_16M].shift &&
Y
Yinghai Lu 已提交
517
	    memblock_phys_mem_size() >= 0x40000000)
518 519 520 521 522 523 524
		mmu_vmemmap_psize = MMU_PAGE_16M;
	else if (mmu_psize_defs[MMU_PAGE_64K].shift)
		mmu_vmemmap_psize = MMU_PAGE_64K;
	else
		mmu_vmemmap_psize = MMU_PAGE_4K;
#endif /* CONFIG_SPARSEMEM_VMEMMAP */

525
	printk(KERN_DEBUG "Page orders: linear mapping = %d, "
526 527 528 529 530
	       "virtual = %d, io = %d"
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	       ", vmemmap = %d"
#endif
	       "\n",
531
	       mmu_psize_defs[mmu_linear_psize].shift,
532
	       mmu_psize_defs[mmu_virtual_psize].shift,
533 534 535 536 537
	       mmu_psize_defs[mmu_io_psize].shift
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	       ,mmu_psize_defs[mmu_vmemmap_psize].shift
#endif
	       );
538 539

#ifdef CONFIG_HUGETLB_PAGE
540 541
	/* Reserve 16G huge page memory sections for huge pages */
	of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
542 543 544 545 546 547 548 549
#endif /* CONFIG_HUGETLB_PAGE */
}

static int __init htab_dt_scan_pftsize(unsigned long node,
				       const char *uname, int depth,
				       void *data)
{
	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
550
	__be32 *prop;
551 552 553 554 555

	/* We are scanning "cpu" nodes only */
	if (type == NULL || strcmp(type, "cpu") != 0)
		return 0;

556
	prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
557 558
	if (prop != NULL) {
		/* pft_size[0] is the NUMA CEC cookie */
559
		ppc64_pft_size = be32_to_cpu(prop[1]);
560
		return 1;
L
Linus Torvalds 已提交
561
	}
562
	return 0;
L
Linus Torvalds 已提交
563 564
}

565
static unsigned long __init htab_get_table_size(void)
566
{
567
	unsigned long mem_size, rnd_mem_size, pteg_count, psize;
568

569
	/* If hash size isn't already provided by the platform, we try to
A
Adrian Bunk 已提交
570
	 * retrieve it from the device-tree. If it's not there neither, we
571
	 * calculate it now based on the total RAM size
572
	 */
573 574
	if (ppc64_pft_size == 0)
		of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
575 576 577 578
	if (ppc64_pft_size)
		return 1UL << ppc64_pft_size;

	/* round mem_size up to next power of 2 */
Y
Yinghai Lu 已提交
579
	mem_size = memblock_phys_mem_size();
580 581
	rnd_mem_size = 1UL << __ilog2(mem_size);
	if (rnd_mem_size < mem_size)
582 583 584
		rnd_mem_size <<= 1;

	/* # pages / 2 */
585 586
	psize = mmu_psize_defs[mmu_virtual_psize].shift;
	pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11);
587 588 589 590

	return pteg_count << 7;
}

591
#ifdef CONFIG_MEMORY_HOTPLUG
592
int create_section_mapping(unsigned long start, unsigned long end)
593
{
594
	return htab_bolt_mapping(start, end, __pa(start),
595
				 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
596
				 mmu_kernel_ssize);
597
}
598

599
int remove_section_mapping(unsigned long start, unsigned long end)
600
{
601 602
	return htab_remove_mapping(start, end, mmu_linear_psize,
			mmu_kernel_ssize);
603
}
604 605
#endif /* CONFIG_MEMORY_HOTPLUG */

606
#define FUNCTION_TEXT(A)	((*(unsigned long *)(A)))
607 608 609 610 611 612 613 614

static void __init htab_finish_init(void)
{
	extern unsigned int *htab_call_hpte_insert1;
	extern unsigned int *htab_call_hpte_insert2;
	extern unsigned int *htab_call_hpte_remove;
	extern unsigned int *htab_call_hpte_updatepp;

615
#ifdef CONFIG_PPC_HAS_HASH_64K
616 617 618 619 620
	extern unsigned int *ht64_call_hpte_insert1;
	extern unsigned int *ht64_call_hpte_insert2;
	extern unsigned int *ht64_call_hpte_remove;
	extern unsigned int *ht64_call_hpte_updatepp;

621 622 623 624 625 626 627 628 629 630 631 632 633
	patch_branch(ht64_call_hpte_insert1,
		FUNCTION_TEXT(ppc_md.hpte_insert),
		BRANCH_SET_LINK);
	patch_branch(ht64_call_hpte_insert2,
		FUNCTION_TEXT(ppc_md.hpte_insert),
		BRANCH_SET_LINK);
	patch_branch(ht64_call_hpte_remove,
		FUNCTION_TEXT(ppc_md.hpte_remove),
		BRANCH_SET_LINK);
	patch_branch(ht64_call_hpte_updatepp,
		FUNCTION_TEXT(ppc_md.hpte_updatepp),
		BRANCH_SET_LINK);

J
Jon Tollefson 已提交
634
#endif /* CONFIG_PPC_HAS_HASH_64K */
635

636 637 638 639 640 641 642 643 644 645 646 647
	patch_branch(htab_call_hpte_insert1,
		FUNCTION_TEXT(ppc_md.hpte_insert),
		BRANCH_SET_LINK);
	patch_branch(htab_call_hpte_insert2,
		FUNCTION_TEXT(ppc_md.hpte_insert),
		BRANCH_SET_LINK);
	patch_branch(htab_call_hpte_remove,
		FUNCTION_TEXT(ppc_md.hpte_remove),
		BRANCH_SET_LINK);
	patch_branch(htab_call_hpte_updatepp,
		FUNCTION_TEXT(ppc_md.hpte_updatepp),
		BRANCH_SET_LINK);
648 649
}

650
static void __init htab_initialize(void)
L
Linus Torvalds 已提交
651
{
652
	unsigned long table;
L
Linus Torvalds 已提交
653
	unsigned long pteg_count;
654
	unsigned long prot;
655
	unsigned long base = 0, size = 0, limit;
656
	struct memblock_region *reg;
657

L
Linus Torvalds 已提交
658 659
	DBG(" -> htab_initialize()\n");

P
Paul Mackerras 已提交
660 661 662
	/* Initialize segment sizes */
	htab_init_seg_sizes();

663 664 665
	/* Initialize page sizes */
	htab_init_page_sizes();

666
	if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
P
Paul Mackerras 已提交
667 668 669 670 671
		mmu_kernel_ssize = MMU_SEGSIZE_1T;
		mmu_highuser_ssize = MMU_SEGSIZE_1T;
		printk(KERN_INFO "Using 1TB segments\n");
	}

L
Linus Torvalds 已提交
672 673 674 675
	/*
	 * Calculate the required size of the htab.  We want the number of
	 * PTEGs to equal one half the number of real pages.
	 */ 
676
	htab_size_bytes = htab_get_table_size();
L
Linus Torvalds 已提交
677 678 679 680
	pteg_count = htab_size_bytes >> 7;

	htab_hash_mask = pteg_count - 1;

681
	if (firmware_has_feature(FW_FEATURE_LPAR)) {
L
Linus Torvalds 已提交
682 683 684
		/* Using a hypervisor which owns the htab */
		htab_address = NULL;
		_SDR1 = 0; 
685 686 687 688 689 690 691 692 693 694
#ifdef CONFIG_FA_DUMP
		/*
		 * If firmware assisted dump is active firmware preserves
		 * the contents of htab along with entire partition memory.
		 * Clear the htab if firmware assisted dump is active so
		 * that we dont end up using old mappings.
		 */
		if (is_fadump_active() && ppc_md.hpte_clear_all)
			ppc_md.hpte_clear_all();
#endif
L
Linus Torvalds 已提交
695 696
	} else {
		/* Find storage for the HPT.  Must be contiguous in
697
		 * the absolute address space. On cell we want it to be
698
		 * in the first 2 Gig so we can use it for IOMMU hacks.
L
Linus Torvalds 已提交
699
		 */
700
		if (machine_is(cell))
701
			limit = 0x80000000;
702
		else
703
			limit = MEMBLOCK_ALLOC_ANYWHERE;
704

Y
Yinghai Lu 已提交
705
		table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
L
Linus Torvalds 已提交
706 707 708 709

		DBG("Hash table allocated at %lx, size: %lx\n", table,
		    htab_size_bytes);

710
		htab_address = __va(table);
L
Linus Torvalds 已提交
711 712 713 714 715 716

		/* htab absolute addr + encoded htabsize */
		_SDR1 = table + __ilog2(pteg_count) - 11;

		/* Initialize the HPT with no entries */
		memset((void *)table, 0, htab_size_bytes);
717 718 719

		/* Set SDR1 */
		mtspr(SPRN_SDR1, _SDR1);
L
Linus Torvalds 已提交
720 721
	}

722
	prot = pgprot_val(PAGE_KERNEL);
L
Linus Torvalds 已提交
723

724
#ifdef CONFIG_DEBUG_PAGEALLOC
Y
Yinghai Lu 已提交
725 726
	linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
	linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
727
						    1, ppc64_rma_size));
728 729 730
	memset(linear_map_hash_slots, 0, linear_map_hash_count);
#endif /* CONFIG_DEBUG_PAGEALLOC */

L
Linus Torvalds 已提交
731 732 733 734 735 736
	/* On U3 based machines, we need to reserve the DART area and
	 * _NOT_ map it to avoid cache paradoxes as it's remapped non
	 * cacheable later on
	 */

	/* create bolted the linear mapping in the hash table */
737 738 739
	for_each_memblock(memory, reg) {
		base = (unsigned long)__va(reg->base);
		size = reg->size;
L
Linus Torvalds 已提交
740

741
		DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
742
		    base, size, prot);
L
Linus Torvalds 已提交
743 744 745

#ifdef CONFIG_U3_DART
		/* Do not map the DART space. Fortunately, it will be aligned
Y
Yinghai Lu 已提交
746
		 * in such a way that it will not cross two memblock regions and
747 748 749 750
		 * will fit within a single 16Mb page.
		 * The DART space is assumed to be a full 16Mb region even if
		 * we only use 2Mb of that space. We will use more of it later
		 * for AGP GART. We have to use a full 16Mb large page.
L
Linus Torvalds 已提交
751 752 753 754 755
		 */
		DBG("DART base: %lx\n", dart_tablebase);

		if (dart_tablebase != 0 && dart_tablebase >= base
		    && dart_tablebase < (base + size)) {
756
			unsigned long dart_table_end = dart_tablebase + 16 * MB;
L
Linus Torvalds 已提交
757
			if (base != dart_tablebase)
758
				BUG_ON(htab_bolt_mapping(base, dart_tablebase,
759
							__pa(base), prot,
P
Paul Mackerras 已提交
760 761
							mmu_linear_psize,
							mmu_kernel_ssize));
762
			if ((base + size) > dart_table_end)
763
				BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
764 765
							base + size,
							__pa(dart_table_end),
766
							 prot,
P
Paul Mackerras 已提交
767 768
							 mmu_linear_psize,
							 mmu_kernel_ssize));
L
Linus Torvalds 已提交
769 770 771
			continue;
		}
#endif /* CONFIG_U3_DART */
772
		BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
773
				prot, mmu_linear_psize, mmu_kernel_ssize));
774 775
	}
	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
L
Linus Torvalds 已提交
776 777 778 779 780 781 782 783 784

	/*
	 * If we have a memory_limit and we've allocated TCEs then we need to
	 * explicitly map the TCE area at the top of RAM. We also cope with the
	 * case that the TCEs start below memory_limit.
	 * tce_alloc_start/end are 16MB aligned so the mapping should work
	 * for either 4K or 16MB pages.
	 */
	if (tce_alloc_start) {
785 786
		tce_alloc_start = (unsigned long)__va(tce_alloc_start);
		tce_alloc_end = (unsigned long)__va(tce_alloc_end);
L
Linus Torvalds 已提交
787 788 789 790

		if (base + size >= tce_alloc_start)
			tce_alloc_start = base + size + 1;

791
		BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
792
					 __pa(tce_alloc_start), prot,
P
Paul Mackerras 已提交
793
					 mmu_linear_psize, mmu_kernel_ssize));
L
Linus Torvalds 已提交
794 795
	}

796 797
	htab_finish_init();

L
Linus Torvalds 已提交
798 799 800 801 802
	DBG(" <- htab_initialize()\n");
}
#undef KB
#undef MB

803
void __init early_init_mmu(void)
804
{
805 806 807 808 809 810 811 812 813 814
	/* Setup initial STAB address in the PACA */
	get_paca()->stab_real = __pa((u64)&initial_stab);
	get_paca()->stab_addr = (u64)&initial_stab;

	/* Initialize the MMU Hash table and create the linear mapping
	 * of memory. Has to be done before stab/slb initialization as
	 * this is currently where the page size encoding is obtained
	 */
	htab_initialize();

815
	/* Initialize stab / SLB management */
816
	if (mmu_has_feature(MMU_FTR_SLB))
817
		slb_initialize();
818 819
	else
		stab_initialize(get_paca()->stab_real);
820 821 822
}

#ifdef CONFIG_SMP
823
void early_init_mmu_secondary(void)
824 825
{
	/* Initialize hash table for that CPU */
826
	if (!firmware_has_feature(FW_FEATURE_LPAR))
827
		mtspr(SPRN_SDR1, _SDR1);
828 829

	/* Initialize STAB/SLB. We use a virtual address as it works
830
	 * in real mode on pSeries.
831
	 */
832
	if (mmu_has_feature(MMU_FTR_SLB))
833 834 835
		slb_initialize();
	else
		stab_initialize(get_paca()->stab_addr);
836
}
837
#endif /* CONFIG_SMP */
838

L
Linus Torvalds 已提交
839 840 841 842 843 844 845
/*
 * Called by asm hashtable.S for doing lazy icache flush
 */
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
{
	struct page *page;

846 847 848
	if (!pfn_valid(pte_pfn(pte)))
		return pp;

L
Linus Torvalds 已提交
849 850 851 852 853
	page = pte_page(pte);

	/* page is dirty */
	if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
		if (trap == 0x400) {
854
			flush_dcache_icache_page(page);
L
Linus Torvalds 已提交
855 856
			set_bit(PG_arch_1, &page->flags);
		} else
857
			pp |= HPTE_R_N;
L
Linus Torvalds 已提交
858 859 860 861
	}
	return pp;
}

862 863 864
#ifdef CONFIG_PPC_MM_SLICES
unsigned int get_paca_psize(unsigned long addr)
{
865 866 867
	u64 lpsizes;
	unsigned char *hpsizes;
	unsigned long index, mask_index;
868 869

	if (addr < SLICE_LOW_TOP) {
870
		lpsizes = get_paca()->context.low_slices_psize;
871
		index = GET_LOW_SLICE_INDEX(addr);
872
		return (lpsizes >> (index * 4)) & 0xF;
873
	}
874 875 876 877
	hpsizes = get_paca()->context.high_slices_psize;
	index = GET_HIGH_SLICE_INDEX(addr);
	mask_index = index & 0x1;
	return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
878 879 880 881 882 883 884 885 886
}

#else
unsigned int get_paca_psize(unsigned long addr)
{
	return get_paca()->context.user_psize;
}
#endif

887 888 889 890 891
/*
 * Demote a segment to using 4k pages.
 * For now this makes the whole process use 4k pages.
 */
#ifdef CONFIG_PPC_64K_PAGES
892
void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
893
{
894
	if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
895
		return;
896
	slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
897
#ifdef CONFIG_SPU_BASE
898 899
	spu_flush_all_slbs(mm);
#endif
900
	if (get_paca_psize(addr) != MMU_PAGE_4K) {
901 902 903
		get_paca()->context = mm->context;
		slb_flush_and_rebolt();
	}
904
}
905
#endif /* CONFIG_PPC_64K_PAGES */
906

907 908 909 910 911 912 913 914
#ifdef CONFIG_PPC_SUBPAGE_PROT
/*
 * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
 * Userspace sets the subpage permissions using the subpage_prot system call.
 *
 * Result is 0: full permissions, _PAGE_RW: read-only,
 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
 */
915
static int subpage_protection(struct mm_struct *mm, unsigned long ea)
916
{
917
	struct subpage_prot_table *spt = &mm->context.spt;
918 919 920 921 922
	u32 spp = 0;
	u32 **sbpm, *sbpp;

	if (ea >= spt->maxaddr)
		return 0;
923
	if (ea < 0x100000000UL) {
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
		/* addresses below 4GB use spt->low_prot */
		sbpm = spt->low_prot;
	} else {
		sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
		if (!sbpm)
			return 0;
	}
	sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
	if (!sbpp)
		return 0;
	spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];

	/* extract 2-bit bitfield for this 4k subpage */
	spp >>= 30 - 2 * ((ea >> 12) & 0xf);

	/* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
	spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
	return spp;
}

#else /* CONFIG_PPC_SUBPAGE_PROT */
945
static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
946 947 948 949 950
{
	return 0;
}
#endif

951 952
void hash_failure_debug(unsigned long ea, unsigned long access,
			unsigned long vsid, unsigned long trap,
953
			int ssize, int psize, int lpsize, unsigned long pte)
954 955 956 957 958
{
	if (!printk_ratelimit())
		return;
	pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
		ea, access, current->comm);
959 960
	pr_info("    trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n",
		trap, vsid, ssize, psize, lpsize, pte);
961 962
}

L
Linus Torvalds 已提交
963 964 965 966
/* Result code is:
 *  0 - handled
 *  1 - normal page fault
 * -1 - critical hash insertion error
967
 * -2 - access not permitted by subpage protection mechanism
L
Linus Torvalds 已提交
968 969 970
 */
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
{
971
	enum ctx_state prev_state = exception_enter();
972
	pgd_t *pgdir;
L
Linus Torvalds 已提交
973 974 975
	unsigned long vsid;
	struct mm_struct *mm;
	pte_t *ptep;
976
	unsigned hugeshift;
977
	const struct cpumask *tmp;
978
	int rc, user_region = 0, local = 0;
P
Paul Mackerras 已提交
979
	int psize, ssize;
L
Linus Torvalds 已提交
980

981 982
	DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
		ea, access, trap);
983

984
	/* Get region & vsid */
L
Linus Torvalds 已提交
985 986 987 988
 	switch (REGION_ID(ea)) {
	case USER_REGION_ID:
		user_region = 1;
		mm = current->mm;
989 990
		if (! mm) {
			DBG_LOW(" user region with no mm !\n");
991 992
			rc = 1;
			goto bail;
993
		}
994
		psize = get_slice_psize(mm, ea);
P
Paul Mackerras 已提交
995 996
		ssize = user_segment_size(ea);
		vsid = get_vsid(mm->context.id, ea, ssize);
L
Linus Torvalds 已提交
997 998 999
		break;
	case VMALLOC_REGION_ID:
		mm = &init_mm;
P
Paul Mackerras 已提交
1000
		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
1001 1002 1003 1004
		if (ea < VMALLOC_END)
			psize = mmu_vmalloc_psize;
		else
			psize = mmu_io_psize;
P
Paul Mackerras 已提交
1005
		ssize = mmu_kernel_ssize;
L
Linus Torvalds 已提交
1006 1007 1008 1009 1010
		break;
	default:
		/* Not a valid range
		 * Send the problem up to do_page_fault 
		 */
1011 1012
		rc = 1;
		goto bail;
L
Linus Torvalds 已提交
1013
	}
1014
	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
L
Linus Torvalds 已提交
1015

1016 1017 1018
	/* Bad address. */
	if (!vsid) {
		DBG_LOW("Bad address!\n");
1019 1020
		rc = 1;
		goto bail;
1021
	}
1022
	/* Get pgdir */
L
Linus Torvalds 已提交
1023
	pgdir = mm->pgd;
1024 1025 1026 1027
	if (pgdir == NULL) {
		rc = 1;
		goto bail;
	}
L
Linus Torvalds 已提交
1028

1029
	/* Check CPU locality */
1030 1031
	tmp = cpumask_of(smp_processor_id());
	if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
L
Linus Torvalds 已提交
1032 1033
		local = 1;

1034
#ifndef CONFIG_PPC_64K_PAGES
1035 1036 1037 1038 1039 1040
	/* If we use 4K pages and our psize is not 4K, then we might
	 * be hitting a special driver mapping, and need to align the
	 * address before we fetch the PTE.
	 *
	 * It could also be a hugepage mapping, in which case this is
	 * not necessary, but it's not harmful, either.
1041 1042 1043 1044 1045
	 */
	if (psize != MMU_PAGE_4K)
		ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
#endif /* CONFIG_PPC_64K_PAGES */

1046
	/* Get PTE and page size from page tables */
1047
	ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
1048 1049
	if (ptep == NULL || !pte_present(*ptep)) {
		DBG_LOW(" no PTE !\n");
1050 1051
		rc = 1;
		goto bail;
1052 1053
	}

1054 1055 1056 1057 1058 1059 1060 1061
	/* Add _PAGE_PRESENT to the required access perm */
	access |= _PAGE_PRESENT;

	/* Pre-check access permissions (will be re-checked atomically
	 * in __hash_page_XX but this pre-check is a fast path
	 */
	if (access & ~pte_val(*ptep)) {
		DBG_LOW(" no access !\n");
1062 1063
		rc = 1;
		goto bail;
1064 1065
	}

1066
	if (hugeshift) {
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
		if (pmd_trans_huge(*(pmd_t *)ptep))
			rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
					     trap, local, ssize, psize);
#ifdef CONFIG_HUGETLB_PAGE
		else
			rc = __hash_page_huge(ea, access, vsid, ptep, trap,
					      local, ssize, hugeshift, psize);
#else
		else {
			/*
			 * if we have hugeshift, and is not transhuge with
			 * hugetlb disabled, something is really wrong.
			 */
			rc = 1;
			WARN_ON(1);
		}
#endif
1084 1085
		goto bail;
	}
1086

1087 1088 1089 1090 1091 1092 1093
#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
#else
	DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
		pte_val(*(ptep + PTRS_PER_PTE)));
#endif
	/* Do actual hashing */
1094
#ifdef CONFIG_PPC_64K_PAGES
1095
	/* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
1096
	if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
1097 1098 1099 1100
		demote_segment_4k(mm, ea);
		psize = MMU_PAGE_4K;
	}

1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	/* If this PTE is non-cacheable and we have restrictions on
	 * using non cacheable large pages, then we switch to 4k
	 */
	if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
	    (pte_val(*ptep) & _PAGE_NO_CACHE)) {
		if (user_region) {
			demote_segment_4k(mm, ea);
			psize = MMU_PAGE_4K;
		} else if (ea < VMALLOC_END) {
			/*
			 * some driver did a non-cacheable mapping
			 * in vmalloc space, so switch vmalloc
			 * to 4k pages
			 */
			printk(KERN_ALERT "Reducing vmalloc segment "
			       "to 4kB pages because of "
			       "non-cacheable mapping\n");
			psize = mmu_vmalloc_psize = MMU_PAGE_4K;
1119
#ifdef CONFIG_SPU_BASE
1120 1121
			spu_flush_all_slbs(mm);
#endif
1122
		}
1123 1124
	}
	if (user_region) {
1125
		if (psize != get_paca_psize(ea)) {
1126
			get_paca()->context = mm->context;
1127 1128
			slb_flush_and_rebolt();
		}
1129 1130 1131 1132
	} else if (get_paca()->vmalloc_sllp !=
		   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
		get_paca()->vmalloc_sllp =
			mmu_psize_defs[mmu_vmalloc_psize].sllp;
1133
		slb_vmalloc_update();
1134
	}
1135
#endif /* CONFIG_PPC_64K_PAGES */
1136

1137
#ifdef CONFIG_PPC_HAS_HASH_64K
1138
	if (psize == MMU_PAGE_64K)
P
Paul Mackerras 已提交
1139
		rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
1140
	else
1141
#endif /* CONFIG_PPC_HAS_HASH_64K */
1142
	{
1143
		int spp = subpage_protection(mm, ea);
1144 1145 1146 1147 1148 1149
		if (access & spp)
			rc = -2;
		else
			rc = __hash_page_4K(ea, access, vsid, ptep, trap,
					    local, ssize, spp);
	}
1150

1151 1152 1153 1154 1155
	/* Dump some info in case of hash insertion failure, they should
	 * never happen so it is really useful to know if/when they do
	 */
	if (rc == -1)
		hash_failure_debug(ea, access, vsid, trap, ssize, psize,
1156
				   psize, pte_val(*ptep));
1157 1158 1159 1160 1161 1162 1163
#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
#else
	DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
		pte_val(*(ptep + PTRS_PER_PTE)));
#endif
	DBG_LOW(" -> rc=%d\n", rc);
1164 1165 1166

bail:
	exception_exit(prev_state);
1167
	return rc;
L
Linus Torvalds 已提交
1168
}
1169
EXPORT_SYMBOL_GPL(hash_page);
L
Linus Torvalds 已提交
1170

1171 1172
void hash_preload(struct mm_struct *mm, unsigned long ea,
		  unsigned long access, unsigned long trap)
L
Linus Torvalds 已提交
1173
{
1174
	int hugepage_shift;
1175
	unsigned long vsid;
1176
	pgd_t *pgdir;
1177 1178
	pte_t *ptep;
	unsigned long flags;
1179
	int rc, ssize, local = 0;
1180

1181 1182 1183 1184
	BUG_ON(REGION_ID(ea) != USER_REGION_ID);

#ifdef CONFIG_PPC_MM_SLICES
	/* We only prefault standard pages for now */
1185
	if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
1186
		return;
1187
#endif
1188 1189 1190

	DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
		" trap=%lx\n", mm, mm->pgd, ea, access, trap);
L
Linus Torvalds 已提交
1191

1192
	/* Get Linux PTE if available */
1193 1194 1195
	pgdir = mm->pgd;
	if (pgdir == NULL)
		return;
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207

	/* Get VSID */
	ssize = user_segment_size(ea);
	vsid = get_vsid(mm->context.id, ea, ssize);
	if (!vsid)
		return;
	/*
	 * Hash doesn't like irqs. Walking linux page table with irq disabled
	 * saves us from holding multiple locks.
	 */
	local_irq_save(flags);

1208 1209 1210 1211 1212
	/*
	 * THP pages use update_mmu_cache_pmd. We don't do
	 * hash preload there. Hence can ignore THP here
	 */
	ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift);
1213
	if (!ptep)
1214
		goto out_exit;
1215

1216
	WARN_ON(hugepage_shift);
1217 1218 1219 1220 1221 1222 1223 1224
#ifdef CONFIG_PPC_64K_PAGES
	/* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
	 * a 64K kernel), then we don't preload, hash_page() will take
	 * care of it once we actually try to access the page.
	 * That way we don't have to duplicate all of the logic for segment
	 * page size demotion here
	 */
	if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
1225
		goto out_exit;
1226 1227
#endif /* CONFIG_PPC_64K_PAGES */

1228
	/* Is that local to this CPU ? */
1229
	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1230
		local = 1;
1231 1232 1233

	/* Hash it in */
#ifdef CONFIG_PPC_HAS_HASH_64K
1234
	if (mm->context.user_psize == MMU_PAGE_64K)
1235
		rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
L
Linus Torvalds 已提交
1236
	else
J
Jon Tollefson 已提交
1237
#endif /* CONFIG_PPC_HAS_HASH_64K */
1238
		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
1239
				    subpage_protection(mm, ea));
1240 1241 1242 1243 1244 1245

	/* Dump some info in case of hash insertion failure, they should
	 * never happen so it is really useful to know if/when they do
	 */
	if (rc == -1)
		hash_failure_debug(ea, access, vsid, trap, ssize,
1246 1247 1248
				   mm->context.user_psize,
				   mm->context.user_psize,
				   pte_val(*ptep));
1249
out_exit:
1250 1251 1252
	local_irq_restore(flags);
}

1253 1254 1255
/* WARNING: This is called from hash_low_64.S, if you change this prototype,
 *          do not forget to update the assembly call site !
 */
1256
void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
P
Paul Mackerras 已提交
1257
		     int local)
1258 1259 1260
{
	unsigned long hash, index, shift, hidx, slot;

1261 1262 1263
	DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
	pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
		hash = hpt_hash(vpn, shift, ssize);
1264 1265 1266 1267 1268
		hidx = __rpte_to_hidx(pte, index);
		if (hidx & _PTEIDX_SECONDARY)
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot += hidx & _PTEIDX_GROUP_IX;
1269
		DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
1270 1271 1272 1273 1274
		/*
		 * We use same base page size and actual psize, because we don't
		 * use these functions for hugepage
		 */
		ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local);
1275
	} pte_iterate_hashed_end();
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	/* Transactions are not aborted by tlbiel, only tlbie.
	 * Without, syncing a page back to a block device w/ PIO could pick up
	 * transactional data (bad!) so we force an abort here.  Before the
	 * sync the page will be made read-only, which will flush_hash_page.
	 * BIG ISSUE here: if the kernel uses a page from userspace without
	 * unmapping it first, it may see the speculated version.
	 */
	if (local && cpu_has_feature(CPU_FTR_TM) &&
1286
	    current->thread.regs &&
1287 1288 1289 1290 1291
	    MSR_TM_ACTIVE(current->thread.regs->msr)) {
		tm_enable();
		tm_abort(TM_CAUSE_TLBI);
	}
#endif
L
Linus Torvalds 已提交
1292 1293
}

1294
void flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
1295
{
1296
	if (ppc_md.flush_hash_range)
1297
		ppc_md.flush_hash_range(number, local);
1298
	else {
L
Linus Torvalds 已提交
1299
		int i;
1300 1301
		struct ppc64_tlb_batch *batch =
			&__get_cpu_var(ppc64_tlb_batch);
L
Linus Torvalds 已提交
1302 1303

		for (i = 0; i < number; i++)
1304
			flush_hash_page(batch->vpn[i], batch->pte[i],
P
Paul Mackerras 已提交
1305
					batch->psize, batch->ssize, local);
L
Linus Torvalds 已提交
1306 1307 1308 1309 1310 1311 1312
	}
}

/*
 * low_hash_fault is called when we the low level hash code failed
 * to instert a PTE due to an hypervisor error
 */
1313
void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
L
Linus Torvalds 已提交
1314
{
1315 1316
	enum ctx_state prev_state = exception_enter();

L
Linus Torvalds 已提交
1317
	if (user_mode(regs)) {
1318 1319 1320 1321 1322 1323 1324 1325
#ifdef CONFIG_PPC_SUBPAGE_PROT
		if (rc == -2)
			_exception(SIGSEGV, regs, SEGV_ACCERR, address);
		else
#endif
			_exception(SIGBUS, regs, BUS_ADRERR, address);
	} else
		bad_page_fault(regs, address, SIGBUS);
1326 1327

	exception_exit(prev_state);
L
Linus Torvalds 已提交
1328
}
1329

1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
			   unsigned long pa, unsigned long rflags,
			   unsigned long vflags, int psize, int ssize)
{
	unsigned long hpte_group;
	long slot;

repeat:
	hpte_group = ((hash & htab_hash_mask) *
		       HPTES_PER_GROUP) & ~0x7UL;

	/* Insert into the hash table, primary slot */
	slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
1343
				  psize, psize, ssize);
1344 1345 1346 1347 1348 1349 1350

	/* Primary is full, try the secondary */
	if (unlikely(slot == -1)) {
		hpte_group = ((~hash & htab_hash_mask) *
			      HPTES_PER_GROUP) & ~0x7UL;
		slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
					  vflags | HPTE_V_SECONDARY,
1351
					  psize, psize, ssize);
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
		if (slot == -1) {
			if (mftb() & 0x1)
				hpte_group = ((hash & htab_hash_mask) *
					      HPTES_PER_GROUP)&~0x7UL;

			ppc_md.hpte_remove(hpte_group);
			goto repeat;
		}
	}

	return slot;
}

1365 1366 1367
#ifdef CONFIG_DEBUG_PAGEALLOC
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
1368
	unsigned long hash;
P
Paul Mackerras 已提交
1369
	unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1370
	unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1371
	unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
1372
	long ret;
1373

1374
	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1375

1376 1377 1378
	/* Don't create HPTE entries for bad address */
	if (!vsid)
		return;
1379 1380 1381 1382 1383

	ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
				    HPTE_V_BOLTED,
				    mmu_linear_psize, mmu_kernel_ssize);

1384 1385 1386 1387 1388 1389 1390 1391 1392
	BUG_ON (ret < 0);
	spin_lock(&linear_map_hash_lock);
	BUG_ON(linear_map_hash_slots[lmi] & 0x80);
	linear_map_hash_slots[lmi] = ret | 0x80;
	spin_unlock(&linear_map_hash_lock);
}

static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
{
P
Paul Mackerras 已提交
1393 1394
	unsigned long hash, hidx, slot;
	unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1395
	unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1396

1397
	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1398 1399 1400 1401 1402 1403 1404 1405 1406
	spin_lock(&linear_map_hash_lock);
	BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
	hidx = linear_map_hash_slots[lmi] & 0x7f;
	linear_map_hash_slots[lmi] = 0;
	spin_unlock(&linear_map_hash_lock);
	if (hidx & _PTEIDX_SECONDARY)
		hash = ~hash;
	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	slot += hidx & _PTEIDX_GROUP_IX;
1407 1408
	ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize,
			       mmu_kernel_ssize, 0);
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
}

void kernel_map_pages(struct page *page, int numpages, int enable)
{
	unsigned long flags, vaddr, lmi;
	int i;

	local_irq_save(flags);
	for (i = 0; i < numpages; i++, page++) {
		vaddr = (unsigned long)page_address(page);
		lmi = __pa(vaddr) >> PAGE_SHIFT;
		if (lmi >= linear_map_hash_count)
			continue;
		if (enable)
			kernel_map_linear_page(vaddr, lmi);
		else
			kernel_unmap_linear_page(vaddr, lmi);
	}
	local_irq_restore(flags);
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449

void setup_initial_memory_limit(phys_addr_t first_memblock_base,
				phys_addr_t first_memblock_size)
{
	/* We don't currently support the first MEMBLOCK not mapping 0
	 * physical on those processors
	 */
	BUG_ON(first_memblock_base != 0);

	/* On LPAR systems, the first entry is our RMA region,
	 * non-LPAR 64-bit hash MMU systems don't have a limitation
	 * on real mode access, but using the first entry works well
	 * enough. We also clamp it to 1G to avoid some funky things
	 * such as RTAS bugs etc...
	 */
	ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);

	/* Finally limit subsequent allocations */
	memblock_set_current_limit(ppc64_rma_size);
}