exceptions-64s.S 42.6 KB
Newer Older
1 2 3 4 5 6 7
/*
 * This file contains the 64-bit "server" PowerPC variant
 * of the low level exception handling including exception
 * vectors, exception return, part of the slb and stab
 * handling and other fixed offset specific things.
 *
 * This file is meant to be #included from head_64.S due to
L
Lucas De Marchi 已提交
8
 * position dependent assembly.
9 10 11 12 13 14
 *
 * Most of this originates from head_64.S and thus has the same
 * copyright history.
 *
 */

15
#include <asm/hw_irq.h>
16
#include <asm/exception-64s.h>
17
#include <asm/ptrace.h>
18
#include <asm/cpuidle.h>
19
#include <asm/head-64.h>
20

21 22 23
/*
 * We layout physical memory as follows:
 * 0x0000 - 0x00ff : Secondary processor spin code
24 25 26 27
 * 0x0100 - 0x17ff : pSeries Interrupt prologs
 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
28
 * 0x7000 - 0x7fff : FWNMI data area
29 30
 * 0x8000 - 0x8fff : Initial (CPU0) segment table
 * 0x9000 -        : Early init and support code
31
 */
32 33 34 35 36 37 38 39 40 41 42 43 44
	/* Syscall routine is used twice, in reloc-off and reloc-on paths */
#define SYSCALL_PSERIES_1 					\
BEGIN_FTR_SECTION						\
	cmpdi	r0,0x1ebe ; 					\
	beq-	1f ;						\
END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
	mr	r9,r13 ;					\
	GET_PACA(r13) ;						\
	mfspr	r11,SPRN_SRR0 ;					\
0:

#define SYSCALL_PSERIES_2_RFID 					\
	mfspr	r12,SPRN_SRR1 ;					\
45
	LOAD_HANDLER(r10, system_call_common) ; 		\
46 47 48 49 50 51 52 53 54 55 56 57 58 59
	mtspr	SPRN_SRR0,r10 ; 				\
	ld	r10,PACAKMSR(r13) ;				\
	mtspr	SPRN_SRR1,r10 ; 				\
	rfid ; 							\
	b	. ;	/* prevent speculative execution */

#define SYSCALL_PSERIES_3					\
	/* Fast LE/BE switch system call */			\
1:	mfspr	r12,SPRN_SRR1 ;					\
	xori	r12,r12,MSR_LE ;				\
	mtspr	SPRN_SRR1,r12 ;					\
	rfid ;		/* return to userspace */		\
	b	. ;	/* prevent speculative execution */

60 61
#if defined(CONFIG_RELOCATABLE)
	/*
62 63
	 * We can't branch directly so we do it via the CTR which
	 * is volatile across system calls.
64 65
	 */
#define SYSCALL_PSERIES_2_DIRECT				\
66
	LOAD_HANDLER(r12, system_call_common) ;			\
67
	mtctr	r12 ;						\
68
	mfspr	r12,SPRN_SRR1 ;					\
69 70
	li	r10,MSR_RI ;					\
	mtmsrd 	r10,1 ;						\
71
	bctr ;
72 73 74 75 76 77
#else
	/* We can branch directly */
#define SYSCALL_PSERIES_2_DIRECT				\
	mfspr	r12,SPRN_SRR1 ;					\
	li	r10,MSR_RI ;					\
	mtmsrd 	r10,1 ;			/* Set RI (EE=0) */	\
78
	b	system_call_common ;
79
#endif
80 81 82 83 84 85 86 87 88 89 90 91 92

/*
 * This is the start of the interrupt handlers for pSeries
 * This code runs with relocation off.
 * Code from here to __end_interrupts gets copied down to real
 * address 0x100 when we are running a relocatable kernel.
 * Therefore any relative branches in this section must only
 * branch to labels in this section.
 */
	. = 0x100
	.globl __start_interrupts
__start_interrupts:

93
EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
94 95 96 97
	SET_SCRATCH0(r13)
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
	/* Running native on arch 2.06 or later, check if we are
98
	 * waking up from nap/sleep/winkle.
99 100
	 */
	mfspr	r13,SPRN_SRR1
101 102 103
	rlwinm.	r13,r13,47-31,30,31
	beq	9f

104
	cmpwi	cr3,r13,2
105
	GET_PACA(r13)
106
	bl	pnv_restore_hyp_resource
107

108 109
	li	r0,PNV_THREAD_RUNNING
	stb	r0,PACA_THREAD_IDLE_STATE(r13)	/* Clear thread state */
110

111
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
112 113 114 115 116 117 118
	li	r0,KVM_HWTHREAD_IN_KERNEL
	stb	r0,HSTATE_HWTHREAD_STATE(r13)
	/* Order setting hwthread_state vs. testing hwthread_req */
	sync
	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
	cmpwi	r0,0
	beq	1f
119 120 121 122
	b	kvm_start_guest
1:
#endif

123 124
	/* Return SRR1 from power7_nap() */
	mfspr	r3,SPRN_SRR1
125
	blt	cr3,2f
126 127
	b	pnv_wakeup_loss
2:	b	pnv_wakeup_noloss
128

129
9:
130
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
131
#endif /* CONFIG_PPC_P7_NAP */
132 133
	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
				 NOTEST, 0x100)
134
EXC_REAL_END(system_reset, 0x100, 0x200)
135

136
EXC_REAL_BEGIN(machine_check, 0x200, 0x300)
137 138 139 140
	/* This is moved out of line as it can be patched by FW, but
	 * some code path might still want to branch into the original
	 * vector
	 */
141
	SET_SCRATCH0(r13)		/* save r13 */
142 143 144 145
	/*
	 * Running native on arch 2.06 or later, we may wakeup from winkle
	 * inside machine check. If yes, then last bit of HSPGR0 would be set
	 * to 1. Hence clear it unconditionally.
146
	 */
147 148 149
	GET_PACA(r13)
	clrrdi	r13,r13,1
	SET_PACA(r13)
150
	EXCEPTION_PROLOG_0(PACA_EXMC)
151
BEGIN_FTR_SECTION
152
	b	machine_check_powernv_early
153
FTR_SECTION_ELSE
154
	b	machine_check_pSeries_0
155
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
156
EXC_REAL_END(machine_check, 0x200, 0x300)
157

158
EXC_REAL(data_access, 0x300, 0x380)
159

160
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x400)
161
	SET_SCRATCH0(r13)
162
	EXCEPTION_PROLOG_0(PACA_EXSLB)
163
	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
164 165
	std	r3,PACA_EXSLB+EX_R3(r13)
	mfspr	r3,SPRN_DAR
166
	mfspr	r12,SPRN_SRR1
167
	crset	4*cr6+eq
168
#ifndef CONFIG_RELOCATABLE
169
	b	slb_miss_realmode
170 171
#else
	/*
172
	 * We can't just use a direct branch to slb_miss_realmode
173 174 175 176
	 * because the distance from here to there depends on where
	 * the kernel ends up being put.
	 */
	mfctr	r11
177
	LOAD_HANDLER(r10, slb_miss_realmode)
178 179 180
	mtctr	r10
	bctr
#endif
181
EXC_REAL_END(data_access_slb, 0x380, 0x400)
182

183
EXC_REAL(instruction_access, 0x400, 0x480)
184

185
EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500)
186
	SET_SCRATCH0(r13)
187
	EXCEPTION_PROLOG_0(PACA_EXSLB)
188
	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
189 190
	std	r3,PACA_EXSLB+EX_R3(r13)
	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
191
	mfspr	r12,SPRN_SRR1
192
	crclr	4*cr6+eq
193
#ifndef CONFIG_RELOCATABLE
194
	b	slb_miss_realmode
195 196
#else
	mfctr	r11
197
	LOAD_HANDLER(r10, slb_miss_realmode)
198 199 200
	mtctr	r10
	bctr
#endif
201
EXC_REAL_END(instruction_access_slb, 0x480, 0x500)
202

203 204 205
	/* We open code these as we can't have a ". = x" (even with
	 * x = "." within a feature section
	 */
206
EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x600)
207 208
	.globl hardware_interrupt_hv;
hardware_interrupt_hv:
209
	BEGIN_FTR_SECTION
210
		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
211
					    EXC_HV, SOFTEN_TEST_HV)
212
do_kvm_H0x500:
213
		KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
214
	FTR_SECTION_ELSE
215
		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
216
					    EXC_STD, SOFTEN_TEST_PR)
217
do_kvm_0x500:
218
		KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
219
	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
220 221 222 223 224 225 226
EXC_REAL_END(hardware_interrupt, 0x500, 0x600)

EXC_REAL(alignment, 0x600, 0x700)

TRAMP_KVM(PACA_EXGEN, 0x600)

EXC_REAL(program_check, 0x700, 0x800)
227

228
TRAMP_KVM(PACA_EXGEN, 0x700)
229

230
EXC_REAL(fp_unavailable, 0x800, 0x900)
231

232
TRAMP_KVM(PACA_EXGEN, 0x800)
233

234
EXC_REAL_MASKABLE(decrementer, 0x900, 0x980)
235

236
EXC_REAL_HV(hdecrementer, 0x980, 0xa00)
237

238
EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0xb00)
239

240
TRAMP_KVM(PACA_EXGEN, 0xa00)
241

242 243 244 245 246
EXC_REAL(trap_0b, 0xb00, 0xc00)

TRAMP_KVM(PACA_EXGEN, 0xb00)

EXC_REAL_BEGIN(system_call, 0xc00, 0xd00)
247 248 249 250 251 252 253
	 /*
	  * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
	  * that support it) before changing to HMT_MEDIUM. That allows the KVM
	  * code to save that value into the guest state (it is the guest's PPR
	  * value). Otherwise just change to HMT_MEDIUM as userspace has
	  * already saved the PPR.
	  */
254 255 256 257
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
	SET_SCRATCH0(r13)
	GET_PACA(r13)
	std	r9,PACA_EXGEN+EX_R9(r13)
258 259
	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
	HMT_MEDIUM;
260
	std	r10,PACA_EXGEN+EX_R10(r13)
261
	OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
262
	mfcr	r9
263
	KVMTEST_PR(0xc00)
264
	GET_SCRATCH0(r13)
265 266
#else
	HMT_MEDIUM;
267
#endif
268 269 270
	SYSCALL_PSERIES_1
	SYSCALL_PSERIES_2_RFID
	SYSCALL_PSERIES_3
271 272 273 274 275 276 277
EXC_REAL_END(system_call, 0xc00, 0xd00)

TRAMP_KVM(PACA_EXGEN, 0xc00)

EXC_REAL(single_step, 0xd00, 0xe00)

TRAMP_KVM(PACA_EXGEN, 0xd00)
278

279 280 281 282

	/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
	 * out of line to handle them
	 */
283
__EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0xe20)
284

285
__EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0xe40)
286

287
__EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0xe60)
288

289
__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0xe80, hmi_exception_early)
290

291
__EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0xea0)
292

293
__EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0xec0)
294

295
EXC_REAL_NONE(0xec0, 0xf00)
296

297
__EXC_REAL_OOL(performance_monitor, 0xf00, 0xf20)
298

299
__EXC_REAL_OOL(altivec_unavailable, 0xf20, 0xf40)
300

301 302 303 304 305 306 307
__EXC_REAL_OOL(vsx_unavailable, 0xf40, 0xf60)

__EXC_REAL_OOL(facility_unavailable, 0xf60, 0xf80)

__EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0xfa0)

EXC_REAL_NONE(0xfa0, 0x1200)
308

309

310
#ifdef CONFIG_CBE_RAS
311 312 313 314 315 316 317
EXC_REAL_HV(cbe_system_error, 0x1200, 0x1300)

TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)

#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1200, 0x1300)
#endif
318

319
EXC_REAL(instruction_breakpoint, 0x1300, 0x1400)
320

321 322 323
TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)

EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x1600)
324
	mtspr	SPRN_SPRG_HSCRATCH0,r13
325
	EXCEPTION_PROLOG_0(PACA_EXGEN)
326
	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
327 328 329 330 331 332 333 334 335

#ifdef CONFIG_PPC_DENORMALISATION
	mfspr	r10,SPRN_HSRR1
	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
	addi	r11,r11,-4		/* HSRR0 is next instruction */
	bne+	denorm_assist
#endif

336
	KVMTEST_PR(0x1500)
337
	EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
338 339 340
EXC_REAL_END(denorm_exception_hv, 0x1500, 0x1600)

TRAMP_KVM_SKIP(PACA_EXGEN, 0x1500)
341

342
#ifdef CONFIG_CBE_RAS
343
EXC_REAL_HV(cbe_maintenance, 0x1600, 0x1700)
344

345 346 347 348 349 350 351 352 353
TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)

#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1600, 0x1700)
#endif

EXC_REAL(altivec_assist, 0x1700, 0x1800)

TRAMP_KVM(PACA_EXGEN, 0x1700)
354

355
#ifdef CONFIG_CBE_RAS
356 357 358 359 360 361
EXC_REAL_HV(cbe_thermal, 0x1800, 0x1900)

TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)

#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x1900)
362
	. = 0x1800
363
#endif
364 365


366 367
/*** Out of line interrupts support ***/

368
	/* moved from 0x200 */
369
TRAMP_REAL_BEGIN(machine_check_powernv_early)
370 371 372 373 374 375 376 377
BEGIN_FTR_SECTION
	EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
	/*
	 * Register contents:
	 * R13		= PACA
	 * R9		= CR
	 * Original R9 to R13 is saved on PACA_EXMC
	 *
378 379 380
	 * Switch to mc_emergency stack and handle re-entrancy (we limit
	 * the nested MCE upto level 4 to avoid stack overflow).
	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
	 *
	 * We use paca->in_mce to check whether this is the first entry or
	 * nested machine check. We increment paca->in_mce to track nested
	 * machine checks.
	 *
	 * If this is the first entry then set stack pointer to
	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
	 * stack frame on mc_emergency stack.
	 *
	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
	 * checkstop if we get another machine check exception before we do
	 * rfid with MSR_ME=1.
	 */
	mr	r11,r1			/* Save r1 */
	lhz	r10,PACA_IN_MCE(r13)
	cmpwi	r10,0			/* Are we in nested machine check */
	bne	0f			/* Yes, we are. */
	/* First machine check entry */
	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
0:	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
	addi	r10,r10,1		/* increment paca->in_mce */
	sth	r10,PACA_IN_MCE(r13)
403 404 405
	/* Limit nested MCE to level 4 to avoid stack overflow */
	cmpwi	r10,4
	bgt	2f			/* Check if we hit limit of 4 */
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
	std	r11,GPR1(r1)		/* Save r1 on the stack. */
	std	r11,0(r1)		/* make stack chain pointer */
	mfspr	r11,SPRN_SRR0		/* Save SRR0 */
	std	r11,_NIP(r1)
	mfspr	r11,SPRN_SRR1		/* Save SRR1 */
	std	r11,_MSR(r1)
	mfspr	r11,SPRN_DAR		/* Save DAR */
	std	r11,_DAR(r1)
	mfspr	r11,SPRN_DSISR		/* Save DSISR */
	std	r11,_DSISR(r1)
	std	r9,_CCR(r1)		/* Save CR in stackframe */
	/* Save r9 through r13 from EXMC save area to stack frame. */
	EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
	mfmsr	r11			/* get MSR value */
	ori	r11,r11,MSR_ME		/* turn on ME bit */
	ori	r11,r11,MSR_RI		/* turn on RI bit */
	LOAD_HANDLER(r12, machine_check_handle_early)
423
1:	mtspr	SPRN_SRR0,r12
424 425 426
	mtspr	SPRN_SRR1,r11
	rfid
	b	.	/* prevent speculative execution */
427 428 429 430 431 432 433 434 435 436 437 438
2:
	/* Stack overflow. Stay on emergency stack and panic.
	 * Keep the ME bit off while panic-ing, so that if we hit
	 * another machine check we checkstop.
	 */
	addi	r1,r1,INT_FRAME_SIZE	/* go back to previous stack frame */
	ld	r11,PACAKMSR(r13)
	LOAD_HANDLER(r12, unrecover_mce)
	li	r10,MSR_ME
	andc	r11,r11,r10		/* Turn off MSR_ME */
	b	1b
	b	.	/* prevent speculative execution */
439 440
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)

441
TRAMP_REAL_BEGIN(machine_check_pSeries)
442 443 444
	.globl machine_check_fwnmi
machine_check_fwnmi:
	SET_SCRATCH0(r13)		/* save r13 */
445 446
	EXCEPTION_PROLOG_0(PACA_EXMC)
machine_check_pSeries_0:
447
	EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200)
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
	/*
	 * The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
	 * difference that MSR_RI is not enabled, because PACA_EXMC is being
	 * used, so nested machine check corrupts it. machine_check_common
	 * enables MSR_RI.
	 */
	ld	r10,PACAKMSR(r13)
	xori	r10,r10,MSR_RI
	mfspr	r11,SPRN_SRR0
	LOAD_HANDLER(r12, machine_check_common)
	mtspr	SPRN_SRR0,r12
	mfspr	r12,SPRN_SRR1
	mtspr	SPRN_SRR1,r10
	rfid
	b	.	/* prevent speculative execution */

464 465 466 467 468 469 470 471

TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
TRAMP_KVM(PACA_EXGEN, 0x400)
TRAMP_KVM(PACA_EXSLB, 0x480)
TRAMP_KVM(PACA_EXGEN, 0x900)
TRAMP_KVM_HV(PACA_EXGEN, 0x980)
472

473
#ifdef CONFIG_PPC_DENORMALISATION
474
TRAMP_REAL_BEGIN(denorm_assist)
475 476 477 478 479 480 481 482 483 484
BEGIN_FTR_SECTION
/*
 * To denormalise we need to move a copy of the register to itself.
 * For POWER6 do that here for all FP regs.
 */
	mfmsr	r10
	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
	xori	r10,r10,(MSR_FE0|MSR_FE1)
	mtmsrd	r10
	sync
485 486 487 488 489 490 491 492

#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
#define FMR4(n)  FMR2(n) ; FMR2(n+2)
#define FMR8(n)  FMR4(n) ; FMR4(n+4)
#define FMR16(n) FMR8(n) ; FMR8(n+8)
#define FMR32(n) FMR16(n) ; FMR16(n+16)
	FMR32(0)

493 494 495 496 497 498 499 500 501
FTR_SECTION_ELSE
/*
 * To denormalise we need to move a copy of the register to itself.
 * For POWER7 do that here for the first 32 VSX registers only.
 */
	mfmsr	r10
	oris	r10,r10,MSR_VSX@h
	mtmsrd	r10
	sync
502 503 504 505 506 507 508 509

#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
	XVCPSGNDP32(0)

510
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
511 512 513 514 515 516 517 518 519 520

BEGIN_FTR_SECTION
	b	denorm_done
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/*
 * To denormalise we need to move a copy of the register to itself.
 * For POWER8 we need to do that for all 64 VSX registers
 */
	XVCPSGNDP32(32)
denorm_done:
521 522 523
	mtspr	SPRN_HSRR0,r11
	mtcrf	0x80,r9
	ld	r9,PACA_EXGEN+EX_R9(r13)
524
	RESTORE_PPR_PACA(PACA_EXGEN, r10)
525 526 527 528
BEGIN_FTR_SECTION
	ld	r10,PACA_EXGEN+EX_CFAR(r13)
	mtspr	SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
529 530 531 532 533 534 535 536
	ld	r10,PACA_EXGEN+EX_R10(r13)
	ld	r11,PACA_EXGEN+EX_R11(r13)
	ld	r12,PACA_EXGEN+EX_R12(r13)
	ld	r13,PACA_EXGEN+EX_R13(r13)
	HRFID
	b	.
#endif

537
	/* moved from 0xe00 */
538 539 540 541 542 543 544 545
__TRAMP_REAL_REAL_OOL_HV(h_data_storage, 0xe00)
TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)

__TRAMP_REAL_REAL_OOL_HV(h_instr_storage, 0xe20)
TRAMP_KVM_HV(PACA_EXGEN, 0xe20)

__TRAMP_REAL_REAL_OOL_HV(emulation_assist, 0xe40)
TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
546

547 548
__TRAMP_REAL_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60)
TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
549

550 551 552 553 554
__TRAMP_REAL_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80)
TRAMP_KVM_HV(PACA_EXGEN, 0xe80)

__TRAMP_REAL_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0)
TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
555

556
	/* moved from 0xf00 */
557 558 559 560 561 562 563 564 565 566 567 568 569 570
__TRAMP_REAL_REAL_OOL(performance_monitor, 0xf00)
TRAMP_KVM(PACA_EXGEN, 0xf00)

__TRAMP_REAL_REAL_OOL(altivec_unavailable, 0xf20)
TRAMP_KVM(PACA_EXGEN, 0xf20)

__TRAMP_REAL_REAL_OOL(vsx_unavailable, 0xf40)
TRAMP_KVM(PACA_EXGEN, 0xf40)

__TRAMP_REAL_REAL_OOL(facility_unavailable, 0xf60)
TRAMP_KVM(PACA_EXGEN, 0xf60)

__TRAMP_REAL_REAL_OOL_HV(h_facility_unavailable, 0xf80)
TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
571 572

/*
573 574 575 576
 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
 * - If it was a decrementer interrupt, we bump the dec to max and and return.
 * - If it was a doorbell we return immediately since doorbells are edge
 *   triggered and won't automatically refire.
577 578
 * - If it was a HMI we return immediately since we handled it in realmode
 *   and it won't refire.
579 580
 * - else we hard disable and return.
 * This is called with r10 containing the value to OR to the paca field.
581
 */
582 583 584 585 586 587
#define MASKED_INTERRUPT(_H)				\
masked_##_H##interrupt:					\
	std	r11,PACA_EXGEN+EX_R11(r13);		\
	lbz	r11,PACAIRQHAPPENED(r13);		\
	or	r11,r11,r10;				\
	stb	r11,PACAIRQHAPPENED(r13);		\
588 589
	cmpwi	r10,PACA_IRQ_DEC;			\
	bne	1f;					\
590 591 592 593
	lis	r10,0x7fff;				\
	ori	r10,r10,0xffff;				\
	mtspr	SPRN_DEC,r10;				\
	b	2f;					\
594
1:	cmpwi	r10,PACA_IRQ_DBELL;			\
595 596
	beq	2f;					\
	cmpwi	r10,PACA_IRQ_HMI;			\
597 598
	beq	2f;					\
	mfspr	r10,SPRN_##_H##SRR1;			\
599 600 601 602 603 604 605 606 607
	rldicl	r10,r10,48,1; /* clear MSR_EE */	\
	rotldi	r10,r10,16;				\
	mtspr	SPRN_##_H##SRR1,r10;			\
2:	mtcrf	0x80,r9;				\
	ld	r9,PACA_EXGEN+EX_R9(r13);		\
	ld	r10,PACA_EXGEN+EX_R10(r13);		\
	ld	r11,PACA_EXGEN+EX_R11(r13);		\
	GET_SCRATCH0(r13);				\
	##_H##rfid;					\
608
	b	.
609 610 611
	
	MASKED_INTERRUPT()
	MASKED_INTERRUPT(H)
612

613 614
/*
 * Called from arch_local_irq_enable when an interrupt needs
615 616
 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
 * which kind of interrupt. MSR:EE is already off. We generate a
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
 * stackframe like if a real interrupt had happened.
 *
 * Note: While MSR:EE is off, we need to make sure that _MSR
 * in the generated frame has EE set to 1 or the exception
 * handler will not properly re-enable them.
 */
_GLOBAL(__replay_interrupt)
	/* We are going to jump to the exception common code which
	 * will retrieve various register values from the PACA which
	 * we don't give a damn about, so we don't bother storing them.
	 */
	mfmsr	r12
	mflr	r11
	mfcr	r9
	ori	r12,r12,MSR_EE
632 633 634 635 636 637 638
	cmpwi	r3,0x900
	beq	decrementer_common
	cmpwi	r3,0x500
	beq	hardware_interrupt_common
BEGIN_FTR_SECTION
	cmpwi	r3,0xe80
	beq	h_doorbell_common
639 640
	cmpwi	r3,0xea0
	beq	h_virt_irq_common
641 642
	cmpwi	r3,0xe60
	beq	hmi_exception_common
643 644 645 646 647
FTR_SECTION_ELSE
	cmpwi	r3,0xa00
	beq	doorbell_super_common
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
	blr
648

649 650 651 652
#ifdef CONFIG_PPC_PSERIES
/*
 * Vectors for the FWNMI option.  Share common code.
 */
653
TRAMP_REAL_BEGIN(system_reset_fwnmi)
654
	SET_SCRATCH0(r13)		/* save r13 */
655 656
	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
				 NOTEST, 0x100)
657 658 659

#endif /* CONFIG_PPC_PSERIES */

660
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
661
TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
662 663 664 665 666 667 668 669 670 671 672
	/*
	 * Here all GPRs are unchanged from when the interrupt happened
	 * except for r13, which is saved in SPRG_SCRATCH0.
	 */
	mfspr	r13, SPRN_SRR0
	addi	r13, r13, 4
	mtspr	SPRN_SRR0, r13
	GET_SCRATCH0(r13)
	rfid
	b	.

673
TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
674 675 676 677 678 679 680 681 682 683 684 685
	/*
	 * Here all GPRs are unchanged from when the interrupt happened
	 * except for r13, which is saved in SPRG_SCRATCH0.
	 */
	mfspr	r13, SPRN_HSRR0
	addi	r13, r13, 4
	mtspr	SPRN_HSRR0, r13
	GET_SCRATCH0(r13)
	hrfid
	b	.
#endif

686
/*
687 688 689 690
 * Ensure that any handlers that get invoked from the exception prologs
 * above are below the first 64KB (0x10000) of the kernel image because
 * the prologs assemble the addresses of these handlers using the
 * LOAD_HANDLER macro, which uses an ori instruction.
691 692 693 694
 */

/*** Common interrupt handlers ***/

695 696 697 698
EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
699

700
#ifdef CONFIG_PPC_DOORBELL
701
EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
702
#else
703
EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
704
#endif
705 706 707 708 709
EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
EXC_COMMON(single_step_common, 0xd00, single_step_exception)
EXC_COMMON(trap_0e_common, 0xe00, unknown_exception)
EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
EXC_COMMON_ASYNC(hmi_exception_common, 0xe60, handle_hmi_exception)
710
#ifdef CONFIG_PPC_DOORBELL
711
EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
712
#else
713
EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
714
#endif
715 716 717 718
EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
EXC_COMMON_HV(denorm_common, 0x1500, unknown_exception)
719
#ifdef CONFIG_ALTIVEC
720
EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
721
#else
722
EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
723 724
#endif

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
	/*
	 * Relocation-on interrupts: A subset of the interrupts can be delivered
	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
	 * it.  Addresses are the same as the original interrupt addresses, but
	 * offset by 0xc000000000004000.
	 * It's impossible to receive interrupts below 0x300 via this mechanism.
	 * KVM: None of these traps are from the guest ; anything that escalated
	 * to HV=1 from HV=0 is delivered via real mode handlers.
	 */

	/*
	 * This uses the standard macro, since the original 0x300 vector
	 * only has extra guff for STAB-based processors -- which never
	 * come here.
	 */
740 741 742 743 744 745
EXC_VIRT_NONE(0x4100, 0x4200)
EXC_VIRT_NONE(0x4200, 0x4300)

EXC_VIRT(data_access, 0x4300, 0x4380, 0x300)

EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x4400)
746
	SET_SCRATCH0(r13)
747
	EXCEPTION_PROLOG_0(PACA_EXSLB)
748 749 750 751
	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
	std	r3,PACA_EXSLB+EX_R3(r13)
	mfspr	r3,SPRN_DAR
	mfspr	r12,SPRN_SRR1
752
	crset	4*cr6+eq
753
#ifndef CONFIG_RELOCATABLE
754
	b	slb_miss_realmode
755 756
#else
	/*
757
	 * We can't just use a direct branch to slb_miss_realmode
758 759 760 761
	 * because the distance from here to there depends on where
	 * the kernel ends up being put.
	 */
	mfctr	r11
762
	LOAD_HANDLER(r10, slb_miss_realmode)
763 764 765
	mtctr	r10
	bctr
#endif
766
EXC_VIRT_END(data_access_slb, 0x4380, 0x4400)
767

768 769 770
EXC_VIRT(instruction_access, 0x4400, 0x4480, 0x400)

EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x4500)
771
	SET_SCRATCH0(r13)
772
	EXCEPTION_PROLOG_0(PACA_EXSLB)
773 774 775 776
	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
	std	r3,PACA_EXSLB+EX_R3(r13)
	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
	mfspr	r12,SPRN_SRR1
777
	crclr	4*cr6+eq
778
#ifndef CONFIG_RELOCATABLE
779
	b	slb_miss_realmode
780 781
#else
	mfctr	r11
782
	LOAD_HANDLER(r10, slb_miss_realmode)
783 784 785
	mtctr	r10
	bctr
#endif
786
EXC_VIRT_END(instruction_access_slb, 0x4480, 0x4500)
787

788
EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x4600)
789 790 791
	.globl hardware_interrupt_relon_hv;
hardware_interrupt_relon_hv:
	BEGIN_FTR_SECTION
792
		_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_HV, SOFTEN_TEST_HV)
793
	FTR_SECTION_ELSE
794
		_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_STD, SOFTEN_TEST_PR)
795
	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
796 797 798 799 800 801 802 803 804 805 806
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x4600)

EXC_VIRT(alignment, 0x4600, 0x4700, 0x600)
EXC_VIRT(program_check, 0x4700, 0x4800, 0x700)
EXC_VIRT(fp_unavailable, 0x4800, 0x4900, 0x800)
EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900)
EXC_VIRT_HV(hdecrementer, 0x4980, 0x4a00, 0x980)
EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x4b00, 0xa00)
EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)

EXC_VIRT_BEGIN(system_call, 0x4c00, 0x4d00)
807 808 809 810
	HMT_MEDIUM
	SYSCALL_PSERIES_1
	SYSCALL_PSERIES_2_DIRECT
	SYSCALL_PSERIES_3
811
EXC_VIRT_END(system_call, 0x4c00, 0x4d00)
812

813
EXC_VIRT(single_step, 0x4d00, 0x4e00, 0xd00)
814

815 816 817
EXC_VIRT_BEGIN(unused, 0x4e00, 0x4e20)
	b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
EXC_VIRT_END(unused, 0x4e00, 0x4e20)
818

819 820 821
EXC_VIRT_BEGIN(unused, 0x4e20, 0x4e40)
	b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
EXC_VIRT_END(unused, 0x4e20, 0x4e40)
822

823
__EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x4e60)
824

825 826 827
EXC_VIRT_BEGIN(unused, 0x4e60, 0x4e80)
	b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
EXC_VIRT_END(unused, 0x4e60, 0x4e80)
828

829
__EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x4ea0)
830

831
__EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x4ec0)
832

833
EXC_VIRT_NONE(0x4ec0, 0x4f00)
834

835
__EXC_VIRT_OOL(performance_monitor, 0x4f00, 0x4f20)
836

837
__EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x4f40)
838

839
__EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x4f60)
840

841 842 843 844 845 846 847 848 849
__EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x4f80)

__EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x4fa0)

EXC_VIRT_NONE(0x4fa0, 0x5200)

EXC_VIRT_NONE(0x5200, 0x5300)

EXC_VIRT(instruction_breakpoint, 0x5300, 0x5400, 0x1300)
850

851
#ifdef CONFIG_PPC_DENORMALISATION
852 853 854 855 856
EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x5600)
	b	exc_real_0x1500_denorm_exception_hv
EXC_VIRT_END(denorm_exception, 0x5500, 0x5600)
#else
EXC_VIRT_NONE(0x5500, 0x5600)
857 858
#endif

859 860 861 862 863 864 865
EXC_VIRT_NONE(0x5600, 0x5700)

EXC_VIRT(altivec_assist, 0x5700, 0x5800, 0x1700)

EXC_VIRT_NONE(0x5800, 0x5900)

TRAMP_REAL_BEGIN(ppc64_runlatch_on_trampoline)
866
	b	__ppc64_runlatch_on
867

868 869 870 871 872
/*
 * Here r13 points to the paca, r9 contains the saved CR,
 * SRR0 and SRR1 are saved in r11 and r12,
 * r9 - r13 are saved in paca->exgen.
 */
873
EXC_COMMON_BEGIN(data_access_common)
874 875 876 877 878
	mfspr	r10,SPRN_DAR
	std	r10,PACA_EXGEN+EX_DAR(r13)
	mfspr	r10,SPRN_DSISR
	stw	r10,PACA_EXGEN+EX_DSISR(r13)
	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
879
	RECONCILE_IRQ_STATE(r10, r11)
880
	ld	r12,_MSR(r1)
881 882 883
	ld	r3,PACA_EXGEN+EX_DAR(r13)
	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
	li	r5,0x300
884 885 886
	std	r3,_DAR(r1)
	std	r4,_DSISR(r1)
BEGIN_MMU_FTR_SECTION
887
	b	do_hash_page		/* Try to handle as hpte fault */
888 889
MMU_FTR_SECTION_ELSE
	b	handle_page_fault
890
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
891

892
EXC_COMMON_BEGIN(h_data_storage_common)
893 894 895 896 897
	mfspr   r10,SPRN_HDAR
	std     r10,PACA_EXGEN+EX_DAR(r13)
	mfspr   r10,SPRN_HDSISR
	stw     r10,PACA_EXGEN+EX_DSISR(r13)
	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
898
	bl      save_nvgprs
899
	RECONCILE_IRQ_STATE(r10, r11)
900
	addi    r3,r1,STACK_FRAME_OVERHEAD
901 902
	bl      unknown_exception
	b       ret_from_except
903

904
EXC_COMMON_BEGIN(instruction_access_common)
905
	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
906
	RECONCILE_IRQ_STATE(r10, r11)
907
	ld	r12,_MSR(r1)
908 909 910
	ld	r3,_NIP(r1)
	andis.	r4,r12,0x5820
	li	r5,0x400
911 912 913
	std	r3,_DAR(r1)
	std	r4,_DSISR(r1)
BEGIN_MMU_FTR_SECTION
914
	b	do_hash_page		/* Try to handle as hpte fault */
915 916
MMU_FTR_SECTION_ELSE
	b	handle_page_fault
917
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
918

919
EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
920

921 922 923 924
	/*
	 * Machine check is different because we use a different
	 * save area: PACA_EXMC instead of PACA_EXGEN.
	 */
925
EXC_COMMON_BEGIN(machine_check_common)
926
	mfspr	r10,SPRN_DAR
927
	std	r10,PACA_EXMC+EX_DAR(r13)
928
	mfspr	r10,SPRN_DSISR
929
	stw	r10,PACA_EXMC+EX_DSISR(r13)
930 931
	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
	FINISH_NAP
932
	RECONCILE_IRQ_STATE(r10, r11)
933 934
	ld	r3,PACA_EXMC+EX_DAR(r13)
	lwz	r4,PACA_EXMC+EX_DSISR(r13)
935 936 937
	/* Enable MSR_RI when finished with PACA_EXMC */
	li	r10,MSR_RI
	mtmsrd 	r10,1
938 939
	std	r3,_DAR(r1)
	std	r4,_DSISR(r1)
940
	bl	save_nvgprs
941
	addi	r3,r1,STACK_FRAME_OVERHEAD
942 943
	bl	machine_check_exception
	b	ret_from_except
944

945
EXC_COMMON_BEGIN(alignment_common)
946 947 948 949 950 951 952 953 954
	mfspr	r10,SPRN_DAR
	std	r10,PACA_EXGEN+EX_DAR(r13)
	mfspr	r10,SPRN_DSISR
	stw	r10,PACA_EXGEN+EX_DSISR(r13)
	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
	ld	r3,PACA_EXGEN+EX_DAR(r13)
	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
	std	r3,_DAR(r1)
	std	r4,_DSISR(r1)
955
	bl	save_nvgprs
956
	RECONCILE_IRQ_STATE(r10, r11)
957
	addi	r3,r1,STACK_FRAME_OVERHEAD
958 959
	bl	alignment_exception
	b	ret_from_except
960

961
EXC_COMMON_BEGIN(program_check_common)
962
	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
963
	bl	save_nvgprs
964
	RECONCILE_IRQ_STATE(r10, r11)
965
	addi	r3,r1,STACK_FRAME_OVERHEAD
966 967
	bl	program_check_exception
	b	ret_from_except
968

969
EXC_COMMON_BEGIN(fp_unavailable_common)
970 971
	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
	bne	1f			/* if from user, just load it up */
972
	bl	save_nvgprs
973
	RECONCILE_IRQ_STATE(r10, r11)
974
	addi	r3,r1,STACK_FRAME_OVERHEAD
975
	bl	kernel_fp_unavailable_exception
976
	BUG_OPCODE
977 978 979 980 981 982 983 984 985 986
1:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
	 * transaction), go do TM stuff
	 */
	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
	bne-	2f
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
987
	bl	load_up_fpu
988
	b	fast_exception_return
989 990
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2:	/* User process was in a transaction */
991
	bl	save_nvgprs
992
	RECONCILE_IRQ_STATE(r10, r11)
993
	addi	r3,r1,STACK_FRAME_OVERHEAD
994 995
	bl	fp_unavailable_tm
	b	ret_from_except
996
#endif
997 998

EXC_COMMON_BEGIN(altivec_unavailable_common)
999 1000 1001 1002
	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	beq	1f
1003 1004 1005 1006 1007 1008 1009 1010 1011
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  BEGIN_FTR_SECTION_NESTED(69)
	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
	 * transaction), go do TM stuff
	 */
	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
	bne-	2f
  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
#endif
1012
	bl	load_up_altivec
1013
	b	fast_exception_return
1014 1015
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2:	/* User process was in a transaction */
1016
	bl	save_nvgprs
1017
	RECONCILE_IRQ_STATE(r10, r11)
1018
	addi	r3,r1,STACK_FRAME_OVERHEAD
1019 1020
	bl	altivec_unavailable_tm
	b	ret_from_except
1021
#endif
1022 1023 1024
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
1025
	bl	save_nvgprs
1026
	RECONCILE_IRQ_STATE(r10, r11)
1027
	addi	r3,r1,STACK_FRAME_OVERHEAD
1028 1029
	bl	altivec_unavailable_exception
	b	ret_from_except
1030

1031
EXC_COMMON_BEGIN(vsx_unavailable_common)
1032 1033 1034
	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
1035
	beq	1f
1036 1037 1038 1039 1040 1041 1042 1043 1044
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  BEGIN_FTR_SECTION_NESTED(69)
	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
	 * transaction), go do TM stuff
	 */
	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
	bne-	2f
  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
#endif
1045
	b	load_up_vsx
1046 1047
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2:	/* User process was in a transaction */
1048
	bl	save_nvgprs
1049
	RECONCILE_IRQ_STATE(r10, r11)
1050
	addi	r3,r1,STACK_FRAME_OVERHEAD
1051 1052
	bl	vsx_unavailable_tm
	b	ret_from_except
1053
#endif
1054 1055 1056
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
1057
	bl	save_nvgprs
1058
	RECONCILE_IRQ_STATE(r10, r11)
1059
	addi	r3,r1,STACK_FRAME_OVERHEAD
1060 1061
	bl	vsx_unavailable_exception
	b	ret_from_except
1062

1063
	/* Equivalents to the above handlers for relocation-on interrupt vectors */
1064 1065 1066 1067 1068 1069 1070 1071
__TRAMP_REAL_VIRT_OOL_HV(emulation_assist, 0xe40)
__TRAMP_REAL_VIRT_OOL_MASKABLE_HV(h_doorbell, 0xe80)
__TRAMP_REAL_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0xea0)
__TRAMP_REAL_VIRT_OOL(performance_monitor, 0xf00)
__TRAMP_REAL_VIRT_OOL(altivec_unavailable, 0xf20)
__TRAMP_REAL_VIRT_OOL(vsx_unavailable, 0xf40)
__TRAMP_REAL_VIRT_OOL(facility_unavailable, 0xf60)
__TRAMP_REAL_VIRT_OOL_HV(h_facility_unavailable, 0xf80)
1072

1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	/*
	 * The __end_interrupts marker must be past the out-of-line (OOL)
	 * handlers, so that they are copied to real address 0x100 when running
	 * a relocatable kernel. This ensures they can be reached from the short
	 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
	 * directly, without using LOAD_HANDLER().
	 */
	.align	7
	.globl	__end_interrupts
__end_interrupts:

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
 * Data area reserved for FWNMI option.
 * This address (0x7000) is fixed by the RPA.
 */
	.= 0x7000
	.globl fwnmi_data_area
fwnmi_data_area:

	/* pseries and powernv need to keep the whole page from
	 * 0x7000 to 0x8000 free for use by the firmware
	 */
	. = 0x8000
#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */

1099 1100
EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
1101 1102

#ifdef CONFIG_CBE_RAS
1103 1104 1105
EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
1106 1107
#endif /* CONFIG_CBE_RAS */

1108 1109 1110

EXC_COMMON_BEGIN(hmi_exception_early)
	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60)
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	mr	r10,r1			/* Save r1			*/
	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack		*/
	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
	std	r9,_CCR(r1)		/* save CR in stackframe	*/
	mfspr	r11,SPRN_HSRR0		/* Save HSRR0 */
	std	r11,_NIP(r1)		/* save HSRR0 in stackframe	*/
	mfspr	r12,SPRN_HSRR1		/* Save SRR1 */
	std	r12,_MSR(r1)		/* save SRR1 in stackframe	*/
	std	r10,0(r1)		/* make stack chain pointer	*/
	std	r0,GPR0(r1)		/* save r0 in stackframe	*/
	std	r10,GPR1(r1)		/* save r1 in stackframe	*/
	EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
	EXCEPTION_PROLOG_COMMON_3(0xe60)
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	hmi_exception_realmode
	/* Windup the stack. */
	/* Move original HSRR0 and HSRR1 into the respective regs */
	ld	r9,_MSR(r1)
	mtspr	SPRN_HSRR1,r9
	ld	r3,_NIP(r1)
	mtspr	SPRN_HSRR0,r3
	ld	r9,_CTR(r1)
	mtctr	r9
	ld	r9,_XER(r1)
	mtxer	r9
	ld	r9,_LINK(r1)
	mtlr	r9
	REST_GPR(0, r1)
	REST_8GPRS(2, r1)
	REST_GPR(10, r1)
	ld	r11,_CCR(r1)
	mtcr	r11
	REST_GPR(11, r1)
	REST_2GPRS(12, r1)
	/* restore original r1. */
	ld	r1,GPR1(r1)

	/*
	 * Go to virtual mode and pull the HMI event information from
	 * firmware.
	 */
	.globl hmi_exception_after_realmode
hmi_exception_after_realmode:
	SET_SCRATCH0(r13)
	EXCEPTION_PROLOG_0(PACA_EXGEN)
1156
	b	tramp_real_hmi_exception
1157

1158

1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
#define MACHINE_CHECK_HANDLER_WINDUP			\
	/* Clear MSR_RI before setting SRR0 and SRR1. */\
	li	r0,MSR_RI;				\
	mfmsr	r9;		/* get MSR value */	\
	andc	r9,r9,r0;				\
	mtmsrd	r9,1;		/* Clear MSR_RI */	\
	/* Move original SRR0 and SRR1 into the respective regs */	\
	ld	r9,_MSR(r1);				\
	mtspr	SPRN_SRR1,r9;				\
	ld	r3,_NIP(r1);				\
	mtspr	SPRN_SRR0,r3;				\
	ld	r9,_CTR(r1);				\
	mtctr	r9;					\
	ld	r9,_XER(r1);				\
	mtxer	r9;					\
	ld	r9,_LINK(r1);				\
	mtlr	r9;					\
	REST_GPR(0, r1);				\
	REST_8GPRS(2, r1);				\
	REST_GPR(10, r1);				\
	ld	r11,_CCR(r1);				\
	mtcr	r11;					\
	/* Decrement paca->in_mce. */			\
	lhz	r12,PACA_IN_MCE(r13);			\
	subi	r12,r12,1;				\
	sth	r12,PACA_IN_MCE(r13);			\
	REST_GPR(11, r1);				\
	REST_2GPRS(12, r1);				\
	/* restore original r1. */			\
	ld	r1,GPR1(r1)

	/*
	 * Handle machine check early in real mode. We come here with
	 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
	 */
1194
EXC_COMMON_BEGIN(machine_check_handle_early)
1195 1196
	std	r0,GPR0(r1)	/* Save r0 */
	EXCEPTION_PROLOG_COMMON_3(0x200)
1197
	bl	save_nvgprs
1198
	addi	r3,r1,STACK_FRAME_OVERHEAD
1199
	bl	machine_check_early
1200
	std	r3,RESULT(r1)	/* Save result */
1201 1202 1203 1204 1205 1206
	ld	r12,_MSR(r1)
#ifdef	CONFIG_PPC_P7_NAP
	/*
	 * Check if thread was in power saving mode. We come here when any
	 * of the following is true:
	 * a. thread wasn't in power saving mode
1207 1208
	 * b. thread was in power saving mode with no state loss,
	 *    supervisor state loss or hypervisor state loss.
1209
	 *
1210
	 * Go back to nap/sleep/winkle mode again if (b) is true.
1211 1212 1213 1214 1215
	 */
	rlwinm.	r11,r12,47-31,30,31	/* Was it in power saving mode? */
	beq	4f			/* No, it wasn;t */
	/* Thread was in power saving mode. Go back to nap again. */
	cmpwi	r11,2
1216 1217
	blt	3f
	/* Supervisor/Hypervisor state loss */
1218 1219
	li	r0,1
	stb	r0,PACA_NAPSTATELOST(r13)
1220
3:	bl	machine_check_queue_event
1221 1222 1223
	MACHINE_CHECK_HANDLER_WINDUP
	GET_PACA(r13)
	ld	r1,PACAR1(r13)
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
	/*
	 * Check what idle state this CPU was in and go back to same mode
	 * again.
	 */
	lbz	r3,PACA_THREAD_IDLE_STATE(r13)
	cmpwi	r3,PNV_THREAD_NAP
	bgt	10f
	IDLE_STATE_ENTER_SEQ(PPC_NAP)
	/* No return */
10:
	cmpwi	r3,PNV_THREAD_SLEEP
	bgt	2f
	IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
	/* No return */

2:
	/*
	 * Go back to winkle. Please note that this thread was woken up in
	 * machine check from winkle and have not restored the per-subcore
	 * state. Hence before going back to winkle, set last bit of HSPGR0
	 * to 1. This will make sure that if this thread gets woken up
	 * again at reset vector 0x100 then it will get chance to restore
	 * the subcore state.
	 */
	ori	r13,r13,1
	SET_PACA(r13)
	IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
	/* No return */
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
4:
#endif
	/*
	 * Check if we are coming from hypervisor userspace. If yes then we
	 * continue in host kernel in V mode to deliver the MC event.
	 */
	rldicl.	r11,r12,4,63		/* See if MC hit while in HV mode. */
	beq	5f
	andi.	r11,r12,MSR_PR		/* See if coming from user. */
	bne	9f			/* continue in V mode if we are. */

5:
1264
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
	/*
	 * We are coming from kernel context. Check if we are coming from
	 * guest. if yes, then we can continue. We will fall through
	 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
	 */
	lbz	r11,HSTATE_IN_GUEST(r13)
	cmpwi	r11,0			/* Check if coming from guest */
	bne	9f			/* continue if we are. */
#endif
	/*
	 * At this point we are not sure about what context we come from.
	 * Queue up the MCE event and return from the interrupt.
	 * But before that, check if this is an un-recoverable exception.
	 * If yes, then stay on emergency stack and panic.
	 */
	andi.	r11,r12,MSR_RI
	bne	2f
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
1:	mfspr	r11,SPRN_SRR0
	LOAD_HANDLER(r10,unrecover_mce)
	mtspr	SPRN_SRR0,r10
	ld	r10,PACAKMSR(r13)
	/*
	 * We are going down. But there are chances that we might get hit by
	 * another MCE during panic path and we may run into unstable state
	 * with no way out. Hence, turn ME bit off while going down, so that
	 * when another MCE is hit during panic path, system will checkstop
	 * and hypervisor will get restarted cleanly by SP.
	 */
	li	r3,MSR_ME
	andc	r10,r10,r3		/* Turn off MSR_ME */
	mtspr	SPRN_SRR1,r10
	rfid
	b	.
1298
2:
1299 1300 1301 1302 1303 1304 1305 1306
	/*
	 * Check if we have successfully handled/recovered from error, if not
	 * then stay on emergency stack and panic.
	 */
	ld	r3,RESULT(r1)	/* Load result */
	cmpdi	r3,0		/* see if we handled MCE successfully */

	beq	1b		/* if !handled then panic */
1307 1308 1309 1310 1311
	/*
	 * Return from MC interrupt.
	 * Queue up the MCE event so that we can log it later, while
	 * returning from kernel or opal call.
	 */
1312
	bl	machine_check_queue_event
1313 1314 1315 1316 1317 1318 1319
	MACHINE_CHECK_HANDLER_WINDUP
	rfid
9:
	/* Deliver the machine check to host kernel in V mode. */
	MACHINE_CHECK_HANDLER_WINDUP
	b	machine_check_pSeries

1320
EXC_COMMON_BEGIN(unrecover_mce)
1321 1322
	/* Invoke machine_check_exception to print MCE event and panic. */
	addi	r3,r1,STACK_FRAME_OVERHEAD
1323
	bl	machine_check_exception
1324 1325 1326 1327 1328
	/*
	 * We will not reach here. Even if we did, there is no way out. Call
	 * unrecoverable_exception and die.
	 */
1:	addi	r3,r1,STACK_FRAME_OVERHEAD
1329
	bl	unrecoverable_exception
1330
	b	1b
1331

1332 1333 1334 1335 1336 1337
/*
 * r13 points to the PACA, r9 contains the saved CR,
 * r12 contain the saved SRR1, SRR0 is still ready for return
 * r3 has the faulting address
 * r9 - r13 are saved in paca->exslb.
 * r3 is saved in paca->slb_r3
1338
 * cr6.eq is set for a D-SLB miss, clear for a I-SLB miss
1339 1340
 * We assume we aren't going to take any exceptions during this procedure.
 */
1341
EXC_COMMON_BEGIN(slb_miss_realmode)
1342 1343 1344 1345 1346 1347 1348
	mflr	r10
#ifdef CONFIG_RELOCATABLE
	mtctr	r11
#endif

	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1349
	std	r3,PACA_EXSLB+EX_DAR(r13)
1350

1351
	crset	4*cr0+eq
1352 1353
#ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION
1354
	bl	slb_allocate_realmode
1355
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
1356
#endif
1357 1358 1359 1360 1361

	ld	r10,PACA_EXSLB+EX_LR(r13)
	ld	r3,PACA_EXSLB+EX_R3(r13)
	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
	mtlr	r10
1362 1363 1364

	beq	8f		/* if bad address, make full stack frame */

1365 1366
	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
	beq-	2f
1367 1368

	/* All done -- return from exception. */
1369 1370 1371 1372

.machine	push
.machine	"power4"
	mtcrf	0x80,r9
1373
	mtcrf	0x02,r9		/* I/D indication is in cr6 */
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
.machine	pop

	RESTORE_PPR_PACA(PACA_EXSLB, r9)
	ld	r9,PACA_EXSLB+EX_R9(r13)
	ld	r10,PACA_EXSLB+EX_R10(r13)
	ld	r11,PACA_EXSLB+EX_R11(r13)
	ld	r12,PACA_EXSLB+EX_R12(r13)
	ld	r13,PACA_EXSLB+EX_R13(r13)
	rfid
	b	.	/* prevent speculative execution */

2:	mfspr	r11,SPRN_SRR0
	LOAD_HANDLER(r10,unrecov_slb)
	mtspr	SPRN_SRR0,r10
	ld	r10,PACAKMSR(r13)
	mtspr	SPRN_SRR1,r10
	rfid
	b	.

1394 1395 1396 1397 1398 1399 1400 1401
8:	mfspr	r11,SPRN_SRR0
	LOAD_HANDLER(r10,bad_addr_slb)
	mtspr	SPRN_SRR0,r10
	ld	r10,PACAKMSR(r13)
	mtspr	SPRN_SRR1,r10
	rfid
	b	.

1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
EXC_COMMON_BEGIN(unrecov_slb)
	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
	RECONCILE_IRQ_STATE(r10, r11)
	bl	save_nvgprs
1:	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	unrecoverable_exception
	b	1b


EXC_COMMON_BEGIN(bad_addr_slb)
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
	RECONCILE_IRQ_STATE(r10, r11)
	ld	r3, PACA_EXSLB+EX_DAR(r13)
	std	r3, _DAR(r1)
	beq	cr6, 2f
	li	r10, 0x480		/* fix trap number for I-SLB miss */
	std	r10, _TRAP(r1)
2:	bl	save_nvgprs
	addi	r3, r1, STACK_FRAME_OVERHEAD
	bl	slb_miss_bad_addr
	b	ret_from_except
1423 1424

#ifdef CONFIG_PPC_970_NAP
1425
TRAMP_REAL_BEGIN(power4_fixup_nap)
1426 1427 1428 1429 1430 1431 1432
	andc	r9,r9,r10
	std	r9,TI_LOCAL_FLAGS(r11)
	ld	r10,_LINK(r1)		/* make idle task do the */
	std	r10,_NIP(r1)		/* equivalent of a blr */
	blr
#endif

1433 1434 1435 1436
/*
 * Hash table stuff
 */
	.align	7
1437
do_hash_page:
1438
#ifdef CONFIG_PPC_STD_MMU_64
1439
	andis.	r0,r4,0xa410		/* weird error? */
1440
	bne-	handle_page_fault	/* if not, try to insert a HPTE */
1441 1442
	andis.  r0,r4,DSISR_DABRMATCH@h
	bne-    handle_dabr_fault
1443
	CURRENT_THREAD_INFO(r11, r1)
1444 1445 1446
	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
	bne	77f			/* then don't call hash_page now */
1447 1448 1449

	/*
	 * r3 contains the faulting address
1450
	 * r4 msr
1451
	 * r5 contains the trap number
1452
	 * r6 contains dsisr
1453
	 *
1454
	 * at return r3 = 0 for success, 1 for page fault, negative for error
1455
	 */
1456
        mr 	r4,r12
1457
	ld      r6,_DSISR(r1)
1458 1459
	bl	__hash_page		/* build HPTE if possible */
        cmpdi	r3,0			/* see if __hash_page succeeded */
1460

1461
	/* Success */
1462 1463
	beq	fast_exc_return_irq	/* Return from exception on success */

1464 1465
	/* Error */
	blt-	13f
1466
#endif /* CONFIG_PPC_STD_MMU_64 */
1467

1468 1469 1470 1471 1472
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
11:	ld	r4,_DAR(r1)
	ld	r5,_DSISR(r1)
	addi	r3,r1,STACK_FRAME_OVERHEAD
1473
	bl	do_page_fault
1474
	cmpdi	r3,0
1475
	beq+	12f
1476
	bl	save_nvgprs
1477 1478 1479
	mr	r5,r3
	addi	r3,r1,STACK_FRAME_OVERHEAD
	lwz	r4,_DAR(r1)
1480 1481
	bl	bad_page_fault
	b	ret_from_except
1482

1483 1484
/* We have a data breakpoint exception - handle it */
handle_dabr_fault:
1485
	bl	save_nvgprs
1486 1487 1488
	ld      r4,_DAR(r1)
	ld      r5,_DSISR(r1)
	addi    r3,r1,STACK_FRAME_OVERHEAD
1489 1490
	bl      do_break
12:	b       ret_from_except_lite
1491

1492

1493
#ifdef CONFIG_PPC_STD_MMU_64
1494 1495 1496
/* We have a page fault that hash_page could handle but HV refused
 * the PTE insertion
 */
1497
13:	bl	save_nvgprs
1498 1499 1500
	mr	r5,r3
	addi	r3,r1,STACK_FRAME_OVERHEAD
	ld	r4,_DAR(r1)
1501 1502
	bl	low_hash_fault
	b	ret_from_except
1503
#endif
1504

1505 1506 1507 1508 1509 1510 1511
/*
 * We come here as a result of a DSI at a point where we don't want
 * to call hash_page, such as when we are accessing memory (possibly
 * user memory) inside a PMU interrupt that occurred while interrupts
 * were soft-disabled.  We want to invoke the exception handler for
 * the access, or panic if there isn't a handler.
 */
1512
77:	bl	save_nvgprs
1513 1514 1515
	mr	r4,r3
	addi	r3,r1,STACK_FRAME_OVERHEAD
	li	r5,SIGSEGV
1516 1517
	bl	bad_page_fault
	b	ret_from_except
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577

/*
 * Here we have detected that the kernel stack pointer is bad.
 * R9 contains the saved CR, r13 points to the paca,
 * r10 contains the (bad) kernel stack pointer,
 * r11 and r12 contain the saved SRR0 and SRR1.
 * We switch to using an emergency stack, save the registers there,
 * and call kernel_bad_stack(), which panics.
 */
bad_stack:
	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,64+INT_FRAME_SIZE
	std	r9,_CCR(r1)
	std	r10,GPR1(r1)
	std	r11,_NIP(r1)
	std	r12,_MSR(r1)
	mfspr	r11,SPRN_DAR
	mfspr	r12,SPRN_DSISR
	std	r11,_DAR(r1)
	std	r12,_DSISR(r1)
	mflr	r10
	mfctr	r11
	mfxer	r12
	std	r10,_LINK(r1)
	std	r11,_CTR(r1)
	std	r12,_XER(r1)
	SAVE_GPR(0,r1)
	SAVE_GPR(2,r1)
	ld	r10,EX_R3(r3)
	std	r10,GPR3(r1)
	SAVE_GPR(4,r1)
	SAVE_4GPRS(5,r1)
	ld	r9,EX_R9(r3)
	ld	r10,EX_R10(r3)
	SAVE_2GPRS(9,r1)
	ld	r9,EX_R11(r3)
	ld	r10,EX_R12(r3)
	ld	r11,EX_R13(r3)
	std	r9,GPR11(r1)
	std	r10,GPR12(r1)
	std	r11,GPR13(r1)
BEGIN_FTR_SECTION
	ld	r10,EX_CFAR(r3)
	std	r10,ORIG_GPR3(r1)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
	SAVE_8GPRS(14,r1)
	SAVE_10GPRS(22,r1)
	lhz	r12,PACA_TRAP_SAVE(r13)
	std	r12,_TRAP(r1)
	addi	r11,r1,INT_FRAME_SIZE
	std	r11,0(r1)
	li	r12,0
	std	r12,0(r11)
	ld	r2,PACATOC(r13)
	ld	r11,exception_marker@toc(r2)
	std	r12,RESULT(r1)
	std	r11,STACK_FRAME_OVERHEAD-16(r1)
1:	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	kernel_bad_stack
	b	1b