slb.c 16.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 * PowerPC64 SLB support.
 *
 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5
 * Based on earlier code written by:
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
 *    Copyright (c) 2001 Dave Engebretsen
 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 *
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
22
#include <asm/cacheflush.h>
23 24
#include <asm/smp.h>
#include <linux/compiler.h>
25
#include <linux/context_tracking.h>
26 27
#include <linux/mm_types.h>

28
#include <asm/udbg.h>
29
#include <asm/code-patching.h>
30

31 32 33 34 35
enum slb_index {
	LINEAR_INDEX	= 0, /* Kernel linear map  (0xc000000000000000) */
	VMALLOC_INDEX	= 1, /* Kernel virtual map (0xd000000000000000) */
	KSTACK_INDEX	= 2, /* Kernel stack map */
};
L
Linus Torvalds 已提交
36

37
extern void slb_allocate(unsigned long ea);
L
Linus Torvalds 已提交
38

39 40 41
#define slb_esid_mask(ssize)	\
	(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)

P
Paul Mackerras 已提交
42
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
43
					 enum slb_index index)
L
Linus Torvalds 已提交
44
{
45
	return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
L
Linus Torvalds 已提交
46 47
}

P
Paul Mackerras 已提交
48 49
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
					 unsigned long flags)
L
Linus Torvalds 已提交
50
{
P
Paul Mackerras 已提交
51 52
	return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
		((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
L
Linus Torvalds 已提交
53 54
}

P
Paul Mackerras 已提交
55
static inline void slb_shadow_update(unsigned long ea, int ssize,
56
				     unsigned long flags,
57
				     enum slb_index index)
L
Linus Torvalds 已提交
58
{
59 60
	struct slb_shadow *p = get_slb_shadow();

61 62
	/*
	 * Clear the ESID first so the entry is not valid while we are
63 64
	 * updating it.  No write barriers are needed here, provided
	 * we only update the current CPU's SLB shadow buffer.
65
	 */
66 67 68
	WRITE_ONCE(p->save_area[index].esid, 0);
	WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
	WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
69 70
}

71
static inline void slb_shadow_clear(enum slb_index index)
72
{
73
	WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index));
L
Linus Torvalds 已提交
74 75
}

P
Paul Mackerras 已提交
76 77
static inline void create_shadowed_slbe(unsigned long ea, int ssize,
					unsigned long flags,
78
					enum slb_index index)
79 80 81 82 83 84
{
	/*
	 * Updating the shadow buffer before writing the SLB ensures
	 * we don't get a stale entry here if we get preempted by PHYP
	 * between these two statements.
	 */
85
	slb_shadow_update(ea, ssize, flags, index);
86 87

	asm volatile("slbmte  %0,%1" :
P
Paul Mackerras 已提交
88
		     : "r" (mk_vsid_data(ea, ssize, flags)),
89
		       "r" (mk_esid_data(ea, ssize, index))
90 91 92
		     : "memory" );
}

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
/*
 * Insert bolted entries into SLB (which may not be empty, so don't clear
 * slb_cache_ptr).
 */
void __slb_restore_bolted_realmode(void)
{
	struct slb_shadow *p = get_slb_shadow();
	enum slb_index index;

	 /* No isync needed because realmode. */
	for (index = 0; index < SLB_NUM_BOLTED; index++) {
		asm volatile("slbmte  %0,%1" :
		     : "r" (be64_to_cpu(p->save_area[index].vsid)),
		       "r" (be64_to_cpu(p->save_area[index].esid)));
	}
}

/*
 * Insert the bolted entries into an empty SLB.
 * This is not the same as rebolt because the bolted segments are not
 * changed, just loaded from the shadow area.
 */
void slb_restore_bolted_realmode(void)
{
	__slb_restore_bolted_realmode();
	get_paca()->slb_cache_ptr = 0;
}

/*
 * This flushes all SLB entries including 0, so it must be realmode.
 */
void slb_flush_all_realmode(void)
{
	/*
	 * This flushes all SLB entries including 0, so it must be realmode.
	 */
	asm volatile("slbmte %0,%0; slbia" : : "r" (0));
}

132
static void __slb_flush_and_rebolt(void)
L
Linus Torvalds 已提交
133 134
{
	/* If you change this make sure you change SLB_NUM_BOLTED
135
	 * and PR KVM appropriately too. */
136
	unsigned long linear_llp, vmalloc_llp, lflags, vflags;
P
Paul Mackerras 已提交
137
	unsigned long ksp_esid_data, ksp_vsid_data;
L
Linus Torvalds 已提交
138

139
	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
140
	vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
141
	lflags = SLB_VSID_KERNEL | linear_llp;
142
	vflags = SLB_VSID_KERNEL | vmalloc_llp;
L
Linus Torvalds 已提交
143

144
	ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
P
Paul Mackerras 已提交
145
	if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
L
Linus Torvalds 已提交
146
		ksp_esid_data &= ~SLB_ESID_V;
P
Paul Mackerras 已提交
147
		ksp_vsid_data = 0;
148
		slb_shadow_clear(KSTACK_INDEX);
149 150
	} else {
		/* Update stack entry; others don't change */
151
		slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
152
		ksp_vsid_data =
153
			be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
154
	}
155

L
Linus Torvalds 已提交
156 157 158 159 160 161 162 163 164
	/* We need to do this all in asm, so we're sure we don't touch
	 * the stack between the slbia and rebolting it. */
	asm volatile("isync\n"
		     "slbia\n"
		     /* Slot 1 - first VMALLOC segment */
		     "slbmte	%0,%1\n"
		     /* Slot 2 - kernel stack */
		     "slbmte	%2,%3\n"
		     "isync"
P
Paul Mackerras 已提交
165
		     :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
166
		        "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, VMALLOC_INDEX)),
P
Paul Mackerras 已提交
167
		        "r"(ksp_vsid_data),
L
Linus Torvalds 已提交
168 169 170 171
		        "r"(ksp_esid_data)
		     : "memory");
}

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
void slb_flush_and_rebolt(void)
{

	WARN_ON(!irqs_disabled());

	/*
	 * We can't take a PMU exception in the following code, so hard
	 * disable interrupts.
	 */
	hard_irq_disable();

	__slb_flush_and_rebolt();
	get_paca()->slb_cache_ptr = 0;
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
void slb_save_contents(struct slb_entry *slb_ptr)
{
	int i;
	unsigned long e, v;

	/* Save slb_cache_ptr value. */
	get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr;

	if (!slb_ptr)
		return;

	for (i = 0; i < mmu_slb_size; i++) {
		asm volatile("slbmfee  %0,%1" : "=r" (e) : "r" (i));
		asm volatile("slbmfev  %0,%1" : "=r" (v) : "r" (i));
		slb_ptr->esid = e;
		slb_ptr->vsid = v;
		slb_ptr++;
	}
}

void slb_dump_contents(struct slb_entry *slb_ptr)
{
	int i, n;
	unsigned long e, v;
	unsigned long llp;

	if (!slb_ptr)
		return;

	pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
	pr_err("Last SLB entry inserted at slot %lld\n", get_paca()->stab_rr);

	for (i = 0; i < mmu_slb_size; i++) {
		e = slb_ptr->esid;
		v = slb_ptr->vsid;
		slb_ptr++;

		if (!e && !v)
			continue;

		pr_err("%02d %016lx %016lx\n", i, e, v);

		if (!(e & SLB_ESID_V)) {
			pr_err("\n");
			continue;
		}
		llp = v & SLB_VSID_LLP;
		if (v & SLB_VSID_B_1T) {
			pr_err("  1T  ESID=%9lx  VSID=%13lx LLP:%3lx\n",
			       GET_ESID_1T(e),
			       (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp);
		} else {
			pr_err(" 256M ESID=%9lx  VSID=%13lx LLP:%3lx\n",
			       GET_ESID(e),
			       (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp);
		}
	}
	pr_err("----------------------------------\n");

	/* Dump slb cache entires as well. */
	pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
	pr_err("Valid SLB cache entries:\n");
	n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
	for (i = 0; i < n; i++)
		pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
	pr_err("Rest of SLB cache entries:\n");
	for (i = n; i < SLB_CACHE_ENTRIES; i++)
		pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
}

257 258 259 260 261
void slb_vmalloc_update(void)
{
	unsigned long vflags;

	vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
262
	slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
263 264 265
	slb_flush_and_rebolt();
}

266 267 268 269 270 271 272 273 274 275 276
/* Helper function to compare esids.  There are four cases to handle.
 * 1. The system is not 1T segment size capable.  Use the GET_ESID compare.
 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
 * 3. The system is 1T capable, only one of the two addresses is > 1T.  This is not a match.
 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
 */
static inline int esids_match(unsigned long addr1, unsigned long addr2)
{
	int esid_1t_count;

	/* System is not 1T segment size capable. */
277
	if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
		return (GET_ESID(addr1) == GET_ESID(addr2));

	esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
				((addr2 >> SID_SHIFT_1T) != 0));

	/* both addresses are < 1T */
	if (esid_1t_count == 0)
		return (GET_ESID(addr1) == GET_ESID(addr2));

	/* One address < 1T, the other > 1T.  Not a match */
	if (esid_1t_count == 1)
		return 0;

	/* Both addresses are > 1T. */
	return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
}

L
Linus Torvalds 已提交
295 296 297
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{
298
	unsigned long offset;
P
Paul Mackerras 已提交
299
	unsigned long slbie_data = 0;
L
Linus Torvalds 已提交
300 301
	unsigned long pc = KSTK_EIP(tsk);
	unsigned long stack = KSTK_ESP(tsk);
302
	unsigned long exec_base;
L
Linus Torvalds 已提交
303

304 305 306 307 308 309 310 311
	/*
	 * We need interrupts hard-disabled here, not just soft-disabled,
	 * so that a PMU interrupt can't occur, which might try to access
	 * user memory (to get a stack trace) and possible cause an SLB miss
	 * which would update the slb_cache/slb_cache_ptr fields in the PACA.
	 */
	hard_irq_disable();
	offset = get_paca()->slb_cache_ptr;
312
	if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
313
	    offset <= SLB_CACHE_ENTRIES) {
L
Linus Torvalds 已提交
314 315 316
		int i;
		asm volatile("isync" : : : "memory");
		for (i = 0; i < offset; i++) {
P
Paul Mackerras 已提交
317 318 319 320 321 322
			slbie_data = (unsigned long)get_paca()->slb_cache[i]
				<< SID_SHIFT; /* EA */
			slbie_data |= user_segment_size(slbie_data)
				<< SLBIE_SSIZE_SHIFT;
			slbie_data |= SLBIE_C; /* C set for user addresses */
			asm volatile("slbie %0" : : "r" (slbie_data));
L
Linus Torvalds 已提交
323 324 325
		}
		asm volatile("isync" : : : "memory");
	} else {
326
		__slb_flush_and_rebolt();
L
Linus Torvalds 已提交
327 328 329 330
	}

	/* Workaround POWER5 < DD2.1 issue */
	if (offset == 1 || offset > SLB_CACHE_ENTRIES)
P
Paul Mackerras 已提交
331
		asm volatile("slbie %0" : : "r" (slbie_data));
L
Linus Torvalds 已提交
332 333

	get_paca()->slb_cache_ptr = 0;
334
	copy_mm_to_paca(mm);
L
Linus Torvalds 已提交
335 336 337

	/*
	 * preload some userspace segments into the SLB.
338 339
	 * Almost all 32 and 64bit PowerPC executables are linked at
	 * 0x10000000 so it makes sense to preload this segment.
L
Linus Torvalds 已提交
340
	 */
341
	exec_base = 0x10000000;
L
Linus Torvalds 已提交
342

343
	if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
344
	    is_kernel_addr(exec_base))
L
Linus Torvalds 已提交
345 346
		return;

347
	slb_allocate(pc);
L
Linus Torvalds 已提交
348

349 350
	if (!esids_match(pc, stack))
		slb_allocate(stack);
L
Linus Torvalds 已提交
351

352 353 354
	if (!esids_match(pc, exec_base) &&
	    !esids_match(stack, exec_base))
		slb_allocate(exec_base);
L
Linus Torvalds 已提交
355 356
}

357 358 359
static inline void patch_slb_encoding(unsigned int *insn_addr,
				      unsigned int immed)
{
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374

	/*
	 * This function patches either an li or a cmpldi instruction with
	 * a new immediate value. This relies on the fact that both li
	 * (which is actually addi) and cmpldi both take a 16-bit immediate
	 * value, and it is situated in the same location in the instruction,
	 * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
	 * The signedness of the immediate operand differs between the two
	 * instructions however this code is only ever patching a small value,
	 * much less than 1 << 15, so we can get away with it.
	 * To patch the value we read the existing instruction, clear the
	 * immediate value, and or in our new value, then write the instruction
	 * back.
	 */
	unsigned int insn = (*insn_addr & 0xffff0000) | immed;
375
	patch_instruction(insn_addr, insn);
376 377
}

378 379 380 381 382
extern u32 slb_miss_kernel_load_linear[];
extern u32 slb_miss_kernel_load_io[];
extern u32 slb_compare_rr_to_size[];
extern u32 slb_miss_kernel_load_vmemmap[];

383 384 385 386 387 388 389 390 391
void slb_set_size(u16 size)
{
	if (mmu_slb_size == size)
		return;

	mmu_slb_size = size;
	patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
}

L
Linus Torvalds 已提交
392 393
void slb_initialize(void)
{
394
	unsigned long linear_llp, vmalloc_llp, io_llp;
395
	unsigned long lflags, vflags;
396
	static int slb_encoding_inited;
397 398 399
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	unsigned long vmemmap_llp;
#endif
400 401 402

	/* Prepare our SLB miss handler based on our page size */
	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
403 404 405
	io_llp = mmu_psize_defs[mmu_io_psize].sllp;
	vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
	get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
406 407 408
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
#endif
409 410 411 412
	if (!slb_encoding_inited) {
		slb_encoding_inited = 1;
		patch_slb_encoding(slb_miss_kernel_load_linear,
				   SLB_VSID_KERNEL | linear_llp);
413 414
		patch_slb_encoding(slb_miss_kernel_load_io,
				   SLB_VSID_KERNEL | io_llp);
415 416
		patch_slb_encoding(slb_compare_rr_to_size,
				   mmu_slb_size);
417

418 419
		pr_devel("SLB: linear  LLP = %04lx\n", linear_llp);
		pr_devel("SLB: io      LLP = %04lx\n", io_llp);
420 421 422 423

#ifdef CONFIG_SPARSEMEM_VMEMMAP
		patch_slb_encoding(slb_miss_kernel_load_vmemmap,
				   SLB_VSID_KERNEL | vmemmap_llp);
424
		pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
425
#endif
426 427
	}

428
	get_paca()->stab_rr = SLB_NUM_BOLTED - 1;
429

430
	lflags = SLB_VSID_KERNEL | linear_llp;
431
	vflags = SLB_VSID_KERNEL | vmalloc_llp;
L
Linus Torvalds 已提交
432

433
	/* Invalidate the entire SLB (even entry 0) & all the ERATS */
434 435 436
	asm volatile("isync":::"memory");
	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
	asm volatile("isync; slbia; isync":::"memory");
437 438
	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
	create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
439

440 441 442 443 444
	/* For the boot cpu, we're running on the stack in init_thread_union,
	 * which is in the first segment of the linear mapping, and also
	 * get_paca()->kstack hasn't been initialized yet.
	 * For secondary cpus, we need to bolt the kernel stack entry now.
	 */
445
	slb_shadow_clear(KSTACK_INDEX);
446 447 448
	if (raw_smp_processor_id() != boot_cpuid &&
	    (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
		create_shadowed_slbe(get_paca()->kstack,
449
				     mmu_kernel_ssize, lflags, KSTACK_INDEX);
450

451
	asm volatile("isync":::"memory");
L
Linus Torvalds 已提交
452
}
453 454 455 456 457 458 459 460 461 462 463

static void insert_slb_entry(unsigned long vsid, unsigned long ea,
			     int bpsize, int ssize)
{
	unsigned long flags, vsid_data, esid_data;
	enum slb_index index;
	int slb_cache_index;

	/*
	 * We are irq disabled, hence should be safe to access PACA.
	 */
464 465 466 467 468 469 470 471
	VM_WARN_ON(!irqs_disabled());

	/*
	 * We can't take a PMU exception in the following code, so hard
	 * disable interrupts.
	 */
	hard_irq_disable();

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
	index = get_paca()->stab_rr;

	/*
	 * simple round-robin replacement of slb starting at SLB_NUM_BOLTED.
	 */
	if (index < (mmu_slb_size - 1))
		index++;
	else
		index = SLB_NUM_BOLTED;

	get_paca()->stab_rr = index;

	flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
	vsid_data = (vsid << slb_vsid_shift(ssize)) | flags |
		    ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
	esid_data = mk_esid_data(ea, ssize, index);

489 490 491 492 493
	/*
	 * No need for an isync before or after this slbmte. The exception
	 * we enter with and the rfid we exit with are context synchronizing.
	 * Also we only handle user segments here.
	 */
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)
		     : "memory");

	/*
	 * Now update slb cache entries
	 */
	slb_cache_index = get_paca()->slb_cache_ptr;
	if (slb_cache_index < SLB_CACHE_ENTRIES) {
		/*
		 * We have space in slb cache for optimized switch_slb().
		 * Top 36 bits from esid_data as per ISA
		 */
		get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28;
		get_paca()->slb_cache_ptr++;
	} else {
		/*
		 * Our cache is full and the current cache content strictly
		 * doesn't indicate the active SLB conents. Bump the ptr
		 * so that switch_slb() will ignore the cache.
		 */
		get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
	}
}

static void handle_multi_context_slb_miss(int context_id, unsigned long ea)
{
	struct mm_struct *mm = current->mm;
	unsigned long vsid;
	int bpsize;

	/*
	 * We are always above 1TB, hence use high user segment size.
	 */
	vsid = get_vsid(context_id, ea, mmu_highuser_ssize);
	bpsize = get_slice_psize(mm, ea);
	insert_slb_entry(vsid, ea, bpsize, mmu_highuser_ssize);
}

void slb_miss_large_addr(struct pt_regs *regs)
{
	enum ctx_state prev_state = exception_enter();
	unsigned long ea = regs->dar;
	int context;

	if (REGION_ID(ea) != USER_REGION_ID)
		goto slb_bad_addr;

	/*
	 * Are we beyound what the page table layout supports ?
	 */
	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
		goto slb_bad_addr;

	/* Lower address should have been handled by asm code */
	if (ea < (1UL << MAX_EA_BITS_PER_CONTEXT))
		goto slb_bad_addr;

	/*
	 * consider this as bad access if we take a SLB miss
	 * on an address above addr limit.
	 */
	if (ea >= current->mm->context.slb_addr_limit)
		goto slb_bad_addr;

	context = get_ea_context(&current->mm->context, ea);
	if (!context)
		goto slb_bad_addr;

	handle_multi_context_slb_miss(context, ea);
	exception_exit(prev_state);
	return;

slb_bad_addr:
	if (user_mode(regs))
		_exception(SIGSEGV, regs, SEGV_BNDERR, ea);
	else
		bad_page_fault(regs, ea, SIGSEGV);
	exception_exit(prev_state);
}