slb.c 17.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 * PowerPC64 SLB support.
 *
 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5
 * Based on earlier code written by:
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16
 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
 *    Copyright (c) 2001 Dave Engebretsen
 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 *
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

17
#include <asm/asm-prototypes.h>
L
Linus Torvalds 已提交
18 19 20 21 22
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
23
#include <asm/cacheflush.h>
24 25
#include <asm/smp.h>
#include <linux/compiler.h>
26
#include <linux/context_tracking.h>
27 28
#include <linux/mm_types.h>

29
#include <asm/udbg.h>
30
#include <asm/code-patching.h>
31

32 33
enum slb_index {
	LINEAR_INDEX	= 0, /* Kernel linear map  (0xc000000000000000) */
34
	KSTACK_INDEX	= 1, /* Kernel stack map */
35
};
L
Linus Torvalds 已提交
36

37
static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
L
Linus Torvalds 已提交
38

39 40 41
#define slb_esid_mask(ssize)	\
	(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)

P
Paul Mackerras 已提交
42
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
43
					 enum slb_index index)
L
Linus Torvalds 已提交
44
{
45
	return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
L
Linus Torvalds 已提交
46 47
}

48
static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
P
Paul Mackerras 已提交
49
					 unsigned long flags)
L
Linus Torvalds 已提交
50
{
51
	return (vsid << slb_vsid_shift(ssize)) | flags |
P
Paul Mackerras 已提交
52
		((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
L
Linus Torvalds 已提交
53 54
}

55 56 57 58 59 60
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
					 unsigned long flags)
{
	return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
}

P
Paul Mackerras 已提交
61
static inline void slb_shadow_update(unsigned long ea, int ssize,
62
				     unsigned long flags,
63
				     enum slb_index index)
L
Linus Torvalds 已提交
64
{
65 66
	struct slb_shadow *p = get_slb_shadow();

67 68
	/*
	 * Clear the ESID first so the entry is not valid while we are
69 70
	 * updating it.  No write barriers are needed here, provided
	 * we only update the current CPU's SLB shadow buffer.
71
	 */
72 73 74
	WRITE_ONCE(p->save_area[index].esid, 0);
	WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
	WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
75 76
}

77
static inline void slb_shadow_clear(enum slb_index index)
78
{
79
	WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index));
L
Linus Torvalds 已提交
80 81
}

P
Paul Mackerras 已提交
82 83
static inline void create_shadowed_slbe(unsigned long ea, int ssize,
					unsigned long flags,
84
					enum slb_index index)
85 86 87 88 89 90
{
	/*
	 * Updating the shadow buffer before writing the SLB ensures
	 * we don't get a stale entry here if we get preempted by PHYP
	 * between these two statements.
	 */
91
	slb_shadow_update(ea, ssize, flags, index);
92 93

	asm volatile("slbmte  %0,%1" :
P
Paul Mackerras 已提交
94
		     : "r" (mk_vsid_data(ea, ssize, flags)),
95
		       "r" (mk_esid_data(ea, ssize, index))
96 97 98
		     : "memory" );
}

99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
/*
 * Insert bolted entries into SLB (which may not be empty, so don't clear
 * slb_cache_ptr).
 */
void __slb_restore_bolted_realmode(void)
{
	struct slb_shadow *p = get_slb_shadow();
	enum slb_index index;

	 /* No isync needed because realmode. */
	for (index = 0; index < SLB_NUM_BOLTED; index++) {
		asm volatile("slbmte  %0,%1" :
		     : "r" (be64_to_cpu(p->save_area[index].vsid)),
		       "r" (be64_to_cpu(p->save_area[index].esid)));
	}
}

/*
 * Insert the bolted entries into an empty SLB.
 * This is not the same as rebolt because the bolted segments are not
 * changed, just loaded from the shadow area.
 */
void slb_restore_bolted_realmode(void)
{
	__slb_restore_bolted_realmode();
	get_paca()->slb_cache_ptr = 0;
}

/*
 * This flushes all SLB entries including 0, so it must be realmode.
 */
void slb_flush_all_realmode(void)
{
	/*
	 * This flushes all SLB entries including 0, so it must be realmode.
	 */
	asm volatile("slbmte %0,%0; slbia" : : "r" (0));
}

138
void slb_flush_and_rebolt(void)
L
Linus Torvalds 已提交
139 140
{
	/* If you change this make sure you change SLB_NUM_BOLTED
141
	 * and PR KVM appropriately too. */
142
	unsigned long linear_llp, lflags;
P
Paul Mackerras 已提交
143
	unsigned long ksp_esid_data, ksp_vsid_data;
L
Linus Torvalds 已提交
144

145 146 147 148 149 150 151 152
	WARN_ON(!irqs_disabled());

	/*
	 * We can't take a PMU exception in the following code, so hard
	 * disable interrupts.
	 */
	hard_irq_disable();

153 154
	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
	lflags = SLB_VSID_KERNEL | linear_llp;
L
Linus Torvalds 已提交
155

156
	ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
P
Paul Mackerras 已提交
157
	if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
L
Linus Torvalds 已提交
158
		ksp_esid_data &= ~SLB_ESID_V;
P
Paul Mackerras 已提交
159
		ksp_vsid_data = 0;
160
		slb_shadow_clear(KSTACK_INDEX);
161 162
	} else {
		/* Update stack entry; others don't change */
163
		slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
164
		ksp_vsid_data =
165
			be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
166
	}
167

L
Linus Torvalds 已提交
168 169 170 171
	/* We need to do this all in asm, so we're sure we don't touch
	 * the stack between the slbia and rebolting it. */
	asm volatile("isync\n"
		     "slbia\n"
172
		     /* Slot 1 - kernel stack */
L
Linus Torvalds 已提交
173 174
		     "slbmte	%0,%1\n"
		     "isync"
175
		     :: "r"(ksp_vsid_data),
L
Linus Torvalds 已提交
176 177 178
		        "r"(ksp_esid_data)
		     : "memory");

179 180 181
	get_paca()->slb_cache_ptr = 0;
}

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
void slb_save_contents(struct slb_entry *slb_ptr)
{
	int i;
	unsigned long e, v;

	/* Save slb_cache_ptr value. */
	get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr;

	if (!slb_ptr)
		return;

	for (i = 0; i < mmu_slb_size; i++) {
		asm volatile("slbmfee  %0,%1" : "=r" (e) : "r" (i));
		asm volatile("slbmfev  %0,%1" : "=r" (v) : "r" (i));
		slb_ptr->esid = e;
		slb_ptr->vsid = v;
		slb_ptr++;
	}
}

void slb_dump_contents(struct slb_entry *slb_ptr)
{
	int i, n;
	unsigned long e, v;
	unsigned long llp;

	if (!slb_ptr)
		return;

	pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
	pr_err("Last SLB entry inserted at slot %lld\n", get_paca()->stab_rr);

	for (i = 0; i < mmu_slb_size; i++) {
		e = slb_ptr->esid;
		v = slb_ptr->vsid;
		slb_ptr++;

		if (!e && !v)
			continue;

		pr_err("%02d %016lx %016lx\n", i, e, v);

		if (!(e & SLB_ESID_V)) {
			pr_err("\n");
			continue;
		}
		llp = v & SLB_VSID_LLP;
		if (v & SLB_VSID_B_1T) {
			pr_err("  1T  ESID=%9lx  VSID=%13lx LLP:%3lx\n",
			       GET_ESID_1T(e),
			       (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp);
		} else {
			pr_err(" 256M ESID=%9lx  VSID=%13lx LLP:%3lx\n",
			       GET_ESID(e),
			       (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp);
		}
	}
	pr_err("----------------------------------\n");

	/* Dump slb cache entires as well. */
	pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
	pr_err("Valid SLB cache entries:\n");
	n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
	for (i = 0; i < n; i++)
		pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
	pr_err("Rest of SLB cache entries:\n");
	for (i = n; i < SLB_CACHE_ENTRIES; i++)
		pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
}

252 253 254 255 256
void slb_vmalloc_update(void)
{
	slb_flush_and_rebolt();
}

257 258 259 260 261 262 263 264 265 266 267
/* Helper function to compare esids.  There are four cases to handle.
 * 1. The system is not 1T segment size capable.  Use the GET_ESID compare.
 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
 * 3. The system is 1T capable, only one of the two addresses is > 1T.  This is not a match.
 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
 */
static inline int esids_match(unsigned long addr1, unsigned long addr2)
{
	int esid_1t_count;

	/* System is not 1T segment size capable. */
268
	if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
		return (GET_ESID(addr1) == GET_ESID(addr2));

	esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
				((addr2 >> SID_SHIFT_1T) != 0));

	/* both addresses are < 1T */
	if (esid_1t_count == 0)
		return (GET_ESID(addr1) == GET_ESID(addr2));

	/* One address < 1T, the other > 1T.  Not a match */
	if (esid_1t_count == 1)
		return 0;

	/* Both addresses are > 1T. */
	return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
}

L
Linus Torvalds 已提交
286 287 288 289 290
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{
	unsigned long pc = KSTK_EIP(tsk);
	unsigned long stack = KSTK_ESP(tsk);
291
	unsigned long exec_base;
L
Linus Torvalds 已提交
292

293 294 295 296 297 298 299
	/*
	 * We need interrupts hard-disabled here, not just soft-disabled,
	 * so that a PMU interrupt can't occur, which might try to access
	 * user memory (to get a stack trace) and possible cause an SLB miss
	 * which would update the slb_cache/slb_cache_ptr fields in the PACA.
	 */
	hard_irq_disable();
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
		/*
		 * SLBIA IH=3 invalidates all Class=1 SLBEs and their
		 * associated lookaside structures, which matches what
		 * switch_slb wants. So ARCH_300 does not use the slb
		 * cache.
		 */
		asm volatile("isync ; " PPC_SLBIA(3)" ; isync");
	} else {
		unsigned long offset = get_paca()->slb_cache_ptr;

		if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
		    offset <= SLB_CACHE_ENTRIES) {
			unsigned long slbie_data = 0;
			int i;

			asm volatile("isync" : : : "memory");
			for (i = 0; i < offset; i++) {
				/* EA */
				slbie_data = (unsigned long)
					get_paca()->slb_cache[i] << SID_SHIFT;
				slbie_data |= user_segment_size(slbie_data)
						<< SLBIE_SSIZE_SHIFT;
				slbie_data |= SLBIE_C; /* user slbs have C=1 */
				asm volatile("slbie %0" : : "r" (slbie_data));
			}

			/* Workaround POWER5 < DD2.1 issue */
			if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1)
				asm volatile("slbie %0" : : "r" (slbie_data));

			asm volatile("isync" : : : "memory");
		} else {
			struct slb_shadow *p = get_slb_shadow();
			unsigned long ksp_esid_data =
				be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
			unsigned long ksp_vsid_data =
				be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);

			asm volatile("isync\n"
				     PPC_SLBIA(1) "\n"
				     "slbmte	%0,%1\n"
				     "isync"
				     :: "r"(ksp_vsid_data),
					"r"(ksp_esid_data));
L
Linus Torvalds 已提交
345 346
		}

347
		get_paca()->slb_cache_ptr = 0;
348
	}
L
Linus Torvalds 已提交
349 350 351

	/*
	 * preload some userspace segments into the SLB.
352 353
	 * Almost all 32 and 64bit PowerPC executables are linked at
	 * 0x10000000 so it makes sense to preload this segment.
L
Linus Torvalds 已提交
354
	 */
355
	exec_base = 0x10000000;
L
Linus Torvalds 已提交
356

357
	if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
358
	    is_kernel_addr(exec_base))
L
Linus Torvalds 已提交
359 360
		return;

361
	slb_allocate_user(mm, pc);
L
Linus Torvalds 已提交
362

363
	if (!esids_match(pc, stack))
364
		slb_allocate_user(mm, stack);
L
Linus Torvalds 已提交
365

366 367
	if (!esids_match(pc, exec_base) &&
	    !esids_match(stack, exec_base))
368
		slb_allocate_user(mm, exec_base);
369 370
}

371 372 373 374 375
void slb_set_size(u16 size)
{
	mmu_slb_size = size;
}

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
static void cpu_flush_slb(void *parm)
{
	struct mm_struct *mm = parm;
	unsigned long flags;

	if (mm != current->active_mm)
		return;

	local_irq_save(flags);
	slb_flush_and_rebolt();
	local_irq_restore(flags);
}

void core_flush_all_slbs(struct mm_struct *mm)
{
	on_each_cpu(cpu_flush_slb, mm, 1);
}

L
Linus Torvalds 已提交
394 395
void slb_initialize(void)
{
396
	unsigned long linear_llp, vmalloc_llp, io_llp;
397
	unsigned long lflags;
398
	static int slb_encoding_inited;
399 400 401
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	unsigned long vmemmap_llp;
#endif
402 403 404

	/* Prepare our SLB miss handler based on our page size */
	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
405 406 407
	io_llp = mmu_psize_defs[mmu_io_psize].sllp;
	vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
	get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
408 409 410
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
#endif
411 412
	if (!slb_encoding_inited) {
		slb_encoding_inited = 1;
413 414
		pr_devel("SLB: linear  LLP = %04lx\n", linear_llp);
		pr_devel("SLB: io      LLP = %04lx\n", io_llp);
415
#ifdef CONFIG_SPARSEMEM_VMEMMAP
416
		pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
417
#endif
418 419
	}

420
	get_paca()->stab_rr = SLB_NUM_BOLTED - 1;
421

422
	lflags = SLB_VSID_KERNEL | linear_llp;
L
Linus Torvalds 已提交
423

424
	/* Invalidate the entire SLB (even entry 0) & all the ERATS */
425 426 427
	asm volatile("isync":::"memory");
	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
	asm volatile("isync; slbia; isync":::"memory");
428
	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
429

430 431 432 433 434
	/* For the boot cpu, we're running on the stack in init_thread_union,
	 * which is in the first segment of the linear mapping, and also
	 * get_paca()->kstack hasn't been initialized yet.
	 * For secondary cpus, we need to bolt the kernel stack entry now.
	 */
435
	slb_shadow_clear(KSTACK_INDEX);
436 437 438
	if (raw_smp_processor_id() != boot_cpuid &&
	    (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
		create_shadowed_slbe(get_paca()->kstack,
439
				     mmu_kernel_ssize, lflags, KSTACK_INDEX);
440

441
	asm volatile("isync":::"memory");
L
Linus Torvalds 已提交
442
}
443

444
static void slb_cache_update(unsigned long esid_data)
445 446 447
{
	int slb_cache_index;

448 449 450
	if (cpu_has_feature(CPU_FTR_ARCH_300))
		return; /* ISAv3.0B and later does not use slb_cache */

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
	/*
	 * Now update slb cache entries
	 */
	slb_cache_index = get_paca()->slb_cache_ptr;
	if (slb_cache_index < SLB_CACHE_ENTRIES) {
		/*
		 * We have space in slb cache for optimized switch_slb().
		 * Top 36 bits from esid_data as per ISA
		 */
		get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28;
		get_paca()->slb_cache_ptr++;
	} else {
		/*
		 * Our cache is full and the current cache content strictly
		 * doesn't indicate the active SLB conents. Bump the ptr
		 * so that switch_slb() will ignore the cache.
		 */
		get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
	}
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
static enum slb_index alloc_slb_index(void)
{
	enum slb_index index;

	/* round-robin replacement of slb starting at SLB_NUM_BOLTED. */
	index = get_paca()->stab_rr;
	if (index < (mmu_slb_size - 1))
		index++;
	else
		index = SLB_NUM_BOLTED;
	get_paca()->stab_rr = index;

	return index;
}

static long slb_insert_entry(unsigned long ea, unsigned long context,
				unsigned long flags, int ssize, bool kernel)
489 490
{
	unsigned long vsid;
491 492 493 494 495 496 497 498 499 500 501
	unsigned long vsid_data, esid_data;
	enum slb_index index;

	vsid = get_vsid(context, ea, ssize);
	if (!vsid)
		return -EFAULT;

	index = alloc_slb_index();

	vsid_data = __mk_vsid_data(vsid, ssize, flags);
	esid_data = mk_esid_data(ea, ssize, index);
502 503

	/*
504 505 506
	 * No need for an isync before or after this slbmte. The exception
	 * we enter with and the rfid we exit with are context synchronizing.
	 * Also we only handle user segments here.
507
	 */
508 509 510 511 512 513
	asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));

	if (!kernel)
		slb_cache_update(esid_data);

	return 0;
514 515
}

516
static long slb_allocate_kernel(unsigned long ea, unsigned long id)
517
{
518 519 520
	unsigned long context;
	unsigned long flags;
	int ssize;
521

522 523
	if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT))
		return -EFAULT;
524

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	if (id == KERNEL_REGION_ID) {
		flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	} else if (id == VMEMMAP_REGION_ID) {
		flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp;
#endif
	} else if (id == VMALLOC_REGION_ID) {
		if (ea < H_VMALLOC_END)
			flags = get_paca()->vmalloc_sllp;
		else
			flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
	} else {
		return -EFAULT;
	}

	ssize = MMU_SEGSIZE_1T;
	if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
		ssize = MMU_SEGSIZE_256M;

	context = id - KERNEL_REGION_CONTEXT_OFFSET;
545

546 547 548 549 550 551 552 553 554
	return slb_insert_entry(ea, context, flags, ssize, true);
}

static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
{
	unsigned long context;
	unsigned long flags;
	int bpsize;
	int ssize;
555 556 557 558 559

	/*
	 * consider this as bad access if we take a SLB miss
	 * on an address above addr limit.
	 */
560 561
	if (ea >= mm->context.slb_addr_limit)
		return -EFAULT;
562

563
	context = get_ea_context(&mm->context, ea);
564
	if (!context)
565
		return -EFAULT;
566

567 568 569 570
	if (unlikely(ea >= H_PGTABLE_RANGE)) {
		WARN_ON(1);
		return -EFAULT;
	}
571

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
	ssize = user_segment_size(ea);

	bpsize = get_slice_psize(mm, ea);
	flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;

	return slb_insert_entry(ea, context, flags, ssize, false);
}

long do_slb_fault(struct pt_regs *regs, unsigned long ea)
{
	unsigned long id = REGION_ID(ea);

	/* IRQs are not reconciled here, so can't check irqs_disabled */
	VM_WARN_ON(mfmsr() & MSR_EE);

	if (unlikely(!(regs->msr & MSR_RI)))
		return -EINVAL;

	/*
	 * SLB kernel faults must be very careful not to touch anything
	 * that is not bolted. E.g., PACA and global variables are okay,
	 * mm->context stuff is not.
	 *
	 * SLB user faults can access all of kernel memory, but must be
	 * careful not to touch things like IRQ state because it is not
	 * "reconciled" here. The difficulty is that we must use
	 * fast_exception_return to return from kernel SLB faults without
	 * looking at possible non-bolted memory. We could test user vs
	 * kernel faults in the interrupt handler asm and do a full fault,
	 * reconcile, ret_from_except for user faults which would make them
	 * first class kernel code. But for performance it's probably nicer
	 * if they go via fast_exception_return too.
	 */
	if (id >= KERNEL_REGION_ID) {
		return slb_allocate_kernel(ea, id);
	} else {
		struct mm_struct *mm = current->mm;

		if (unlikely(!mm))
			return -EFAULT;

		return slb_allocate_user(mm, ea);
	}
}

void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err)
{
	if (err == -EFAULT) {
		if (user_mode(regs))
			_exception(SIGSEGV, regs, SEGV_BNDERR, ea);
		else
			bad_page_fault(regs, ea, SIGSEGV);
	} else if (err == -EINVAL) {
		unrecoverable_exception(regs);
	} else {
		BUG();
	}
629
}