entry_64.S 29.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  PowerPC version 
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
 *  Adapted for Power Macintosh by Paul Mackerras.
 *  Low-level exception handlers and MMU support
 *  rewritten by Paul Mackerras.
 *    Copyright (C) 1996 Paul Mackerras.
 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 *
 *  This file contains the system call entry code, context switch
 *  code, and exception/interrupt return code for PowerPC.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <linux/errno.h>
22
#include <linux/err.h>
23 24 25 26 27 28 29 30
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
31
#include <asm/firmware.h>
32
#include <asm/bug.h>
33
#include <asm/ptrace.h>
34
#include <asm/irqflags.h>
35
#include <asm/hw_irq.h>
36
#include <asm/context_tracking.h>
37
#include <asm/tm.h>
38
#include <asm/ppc-opcode.h>
A
Al Viro 已提交
39
#include <asm/export.h>
40 41 42 43 44

/*
 * System calls.
 */
	.section	".toc","aw"
45 46
SYS_CALL_TABLE:
	.tc sys_call_table[TC],sys_call_table
47 48 49

/* This value is used to mark exception frames on the stack. */
exception_marker:
50
	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
51 52 53 54 55 56

	.section	".text"
	.align 7

	.globl system_call_common
system_call_common:
57 58 59
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
60
	bne	.Ltabort_syscall
61 62
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
63 64 65 66 67 68 69 70 71 72
	andi.	r10,r12,MSR_PR
	mr	r10,r1
	addi	r1,r1,-INT_FRAME_SIZE
	beq-	1f
	ld	r1,PACAKSAVE(r13)
1:	std	r10,0(r1)
	std	r11,_NIP(r1)
	std	r12,_MSR(r1)
	std	r0,GPR0(r1)
	std	r10,GPR1(r1)
73
	beq	2f			/* if from kernel mode */
74
	ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
75
2:	std	r2,GPR2(r1)
76
	std	r3,GPR3(r1)
77
	mfcr	r2
78 79 80 81 82 83 84 85 86 87
	std	r4,GPR4(r1)
	std	r5,GPR5(r1)
	std	r6,GPR6(r1)
	std	r7,GPR7(r1)
	std	r8,GPR8(r1)
	li	r11,0
	std	r11,GPR9(r1)
	std	r11,GPR10(r1)
	std	r11,GPR11(r1)
	std	r11,GPR12(r1)
88
	std	r11,_XER(r1)
89
	std	r11,_CTR(r1)
90 91
	std	r9,GPR13(r1)
	mflr	r10
92 93 94 95 96
	/*
	 * This clears CR0.SO (bit 28), which is the error indication on
	 * return from this system call.
	 */
	rldimi	r2,r11,28,(63-28)
97 98 99 100
	li	r11,0xc01
	std	r10,_LINK(r1)
	std	r11,_TRAP(r1)
	std	r3,ORIG_GPR3(r1)
101
	std	r2,_CCR(r1)
102 103 104 105
	ld	r2,PACATOC(r13)
	addi	r9,r1,STACK_FRAME_OVERHEAD
	ld	r11,exception_marker@toc(r2)
	std	r11,-16(r9)		/* "regshere" marker */
106
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
107 108 109 110 111
BEGIN_FW_FTR_SECTION
	beq	33f
	/* if from user, see if there are any DTL entries to process */
	ld	r10,PACALPPACAPTR(r13)	/* get ptr to VPA */
	ld	r11,PACA_DTL_RIDX(r13)	/* get log read index */
112 113
	addi	r10,r10,LPPACA_DTLIDX
	LDX_BE	r10,0,r10		/* get log write index */
114 115
	cmpd	cr1,r11,r10
	beq+	cr1,33f
116
	bl	accumulate_stolen_time
117 118 119 120 121 122
	REST_GPR(0,r1)
	REST_4GPRS(3,r1)
	REST_2GPRS(7,r1)
	addi	r9,r1,STACK_FRAME_OVERHEAD
33:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
123
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
124

125 126 127 128 129 130 131 132 133 134 135 136
	/*
	 * A syscall should always be called with interrupts enabled
	 * so we just unconditionally hard-enable here. When some kind
	 * of irq tracing is used, we additionally check that condition
	 * is correct
	 */
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
	lbz	r10,PACASOFTIRQEN(r13)
	xori	r10,r10,1
1:	tdnei	r10,0
	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
137 138 139 140

#ifdef CONFIG_PPC_BOOK3E
	wrteei	1
#else
141
	li	r11,MSR_RI
142 143
	ori	r11,r11,MSR_EE
	mtmsrd	r11,1
144
#endif /* CONFIG_PPC_BOOK3E */
145

146
system_call:			/* label this so stack traces look sane */
147 148 149 150 151 152
	/* We do need to set SOFTE in the stack frame or the return
	 * from interrupt will be painful
	 */
	li	r10,1
	std	r10,SOFTE(r1)

153
	CURRENT_THREAD_INFO(r11, r1)
154
	ld	r10,TI_FLAGS(r11)
155
	andi.	r11,r10,_TIF_SYSCALL_DOTRACE
156
	bne	.Lsyscall_dotrace		/* does not return */
157
	cmpldi	0,r0,NR_syscalls
158
	bge-	.Lsyscall_enosys
159

160
.Lsyscall:
161 162 163 164
/*
 * Need to vector to 32 Bit or default sys_call_table here,
 * based on caller's run-mode / personality.
 */
165
	ld	r11,SYS_CALL_TABLE@toc(2)
166 167 168 169 170 171 172 173 174 175 176
	andi.	r10,r10,_TIF_32BIT
	beq	15f
	addi	r11,r11,8	/* use 32-bit syscall entries */
	clrldi	r3,r3,32
	clrldi	r4,r4,32
	clrldi	r5,r5,32
	clrldi	r6,r6,32
	clrldi	r7,r7,32
	clrldi	r8,r8,32
15:
	slwi	r0,r0,4
177 178
	ldx	r12,r11,r0	/* Fetch system call handler [ptr] */
	mtctr   r12
179 180
	bctrl			/* Call handler */

181
.Lsyscall_exit:
182
	std	r3,RESULT(r1)
183
	CURRENT_THREAD_INFO(r12, r1)
184 185

	ld	r8,_MSR(r1)
186 187
#ifdef CONFIG_PPC_BOOK3S
	/* No MSR:RI on BookE */
188
	andi.	r10,r8,MSR_RI
189
	beq-	.Lunrecov_restore
190
#endif
191 192 193 194 195 196 197 198 199 200 201 202

/*
 * This is a few instructions into the actual syscall exit path (which actually
 * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the
 * number of visible symbols for profiling purposes.
 *
 * We can probe from system_call until this point as MSR_RI is set. But once it
 * is cleared below, we won't be able to take a trap.
 *
 * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL().
 */
system_call_exit:
203 204
	/*
	 * Disable interrupts so current_thread_info()->flags can't change,
205 206 207 208 209
	 * and so that we don't get interrupted after loading SRR0/1.
	 */
#ifdef CONFIG_PPC_BOOK3E
	wrteei	0
#else
210 211 212 213 214 215 216
	/*
	 * For performance reasons we clear RI the same time that we
	 * clear EE. We only need to clear RI just before we restore r13
	 * below, but batching it with EE saves us one expensive mtmsrd call.
	 * We have to be careful to restore RI if we branch anywhere from
	 * here (eg syscall_exit_work).
	 */
217
	li	r11,0
218
	mtmsrd	r11,1
219 220
#endif /* CONFIG_PPC_BOOK3E */

221
	ld	r9,TI_FLAGS(r12)
222
	li	r11,-MAX_ERRNO
223
	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
224
	bne-	.Lsyscall_exit_work
225

226 227
	andi.	r0,r8,MSR_FP
	beq 2f
228
#ifdef CONFIG_ALTIVEC
229 230
	andis.	r0,r8,MSR_VEC@h
	bne	3f
231
#endif
232 233 234 235 236 237 238 239 240 241 242 243 244
2:	addi    r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_BOOK3S
	li	r10,MSR_RI
	mtmsrd	r10,1		/* Restore RI */
#endif
	bl	restore_math
#ifdef CONFIG_PPC_BOOK3S
	li	r11,0
	mtmsrd	r11,1
#endif
	ld	r8,_MSR(r1)
	ld	r3,RESULT(r1)
	li	r11,-MAX_ERRNO
245

246
3:	cmpld	r3,r11
247
	ld	r5,_CCR(r1)
248
	bge-	.Lsyscall_error
249
.Lsyscall_error_cont:
250
	ld	r7,_NIP(r1)
251
BEGIN_FTR_SECTION
252
	stdcx.	r0,0,r1			/* to clear the reservation */
253
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
254 255
	andi.	r6,r8,MSR_PR
	ld	r4,_LINK(r1)
256

257
	beq-	1f
258
	ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
259 260 261 262 263

BEGIN_FTR_SECTION
	HMT_MEDIUM_LOW
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

264
	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
265 266 267 268 269 270
1:	ld	r2,GPR2(r1)
	ld	r1,GPR1(r1)
	mtlr	r4
	mtcr	r5
	mtspr	SPRN_SRR0,r7
	mtspr	SPRN_SRR1,r8
271
	RFI
272 273
	b	.	/* prevent speculative execution */

274
.Lsyscall_error:
275
	oris	r5,r5,0x1000	/* Set SO bit in CR */
276
	neg	r3,r3
277
	std	r5,_CCR(r1)
278
	b	.Lsyscall_error_cont
279

280
/* Traced system call support */
281
.Lsyscall_dotrace:
282
	bl	save_nvgprs
283
	addi	r3,r1,STACK_FRAME_OVERHEAD
284
	bl	do_syscall_trace_enter
285

286
	/*
287 288 289 290
	 * We use the return value of do_syscall_trace_enter() as the syscall
	 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
	 * returns an invalid syscall number and the test below against
	 * NR_syscalls will fail.
291 292
	 */
	mr	r0,r3
293 294

	/* Restore argument registers just clobbered and/or possibly changed. */
295 296 297 298 299 300
	ld	r3,GPR3(r1)
	ld	r4,GPR4(r1)
	ld	r5,GPR5(r1)
	ld	r6,GPR6(r1)
	ld	r7,GPR7(r1)
	ld	r8,GPR8(r1)
301

302
	/* Repopulate r9 and r10 for the syscall path */
303
	addi	r9,r1,STACK_FRAME_OVERHEAD
304
	CURRENT_THREAD_INFO(r10, r1)
305
	ld	r10,TI_FLAGS(r10)
306 307

	cmpldi	r0,NR_syscalls
308
	blt+	.Lsyscall
309 310 311 312

	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
	b	.Lsyscall_exit

313

314
.Lsyscall_enosys:
315
	li	r3,-ENOSYS
316
	b	.Lsyscall_exit
317
	
318
.Lsyscall_exit_work:
319
#ifdef CONFIG_PPC_BOOK3S
320
	li	r10,MSR_RI
321 322
	mtmsrd	r10,1		/* Restore RI */
#endif
323 324 325 326
	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
	 If TIF_NOERROR is set, just save r3 as it is. */

	andi.	r0,r9,_TIF_RESTOREALL
327 328 329
	beq+	0f
	REST_NVGPRS(r1)
	b	2f
330
0:	cmpld	r3,r11		/* r11 is -MAX_ERRNO */
331 332 333 334 335 336 337 338 339 340 341
	blt+	1f
	andi.	r0,r9,_TIF_NOERROR
	bne-	1f
	ld	r5,_CCR(r1)
	neg	r3,r3
	oris	r5,r5,0x1000	/* Set SO bit in CR */
	std	r5,_CCR(r1)
1:	std	r3,GPR3(r1)
2:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
	beq	4f

342
	/* Clear per-syscall TIF flags if any are set.  */
343 344 345 346 347 348 349 350

	li	r11,_TIF_PERSYSCALL_MASK
	addi	r12,r12,TI_FLAGS
3:	ldarx	r10,0,r12
	andc	r10,r10,r11
	stdcx.	r10,0,r12
	bne-	3b
	subi	r12,r12,TI_FLAGS
351 352

4:	/* Anything else left to do? */
353 354 355 356 357 358 359
BEGIN_FTR_SECTION
	lis	r3,INIT_PPR@highest	/* Set thread.ppr = 3 */
	ld	r10,PACACURRENT(r13)
	sldi	r3,r3,32	/* bits 11-13 are used for ppr */
	std	r3,TASKTHREADPPR(r10)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

360
	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
361
	beq	ret_from_except_lite
362 363

	/* Re-enable interrupts */
364 365 366
#ifdef CONFIG_PPC_BOOK3E
	wrteei	1
#else
367
	li	r10,MSR_RI
368 369
	ori	r10,r10,MSR_EE
	mtmsrd	r10,1
370
#endif /* CONFIG_PPC_BOOK3E */
371

372
	bl	save_nvgprs
373
	addi	r3,r1,STACK_FRAME_OVERHEAD
374 375
	bl	do_syscall_trace_leave
	b	ret_from_except
376

377
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
378
.Ltabort_syscall:
379 380
	/* Firstly we need to enable TM in the kernel */
	mfmsr	r10
381 382
	li	r9, 1
	rldimi	r10, r9, MSR_TM_LG, 63-MSR_TM_LG
383 384 385
	mtmsrd	r10, 0

	/* tabort, this dooms the transaction, nothing else */
386 387
	li	r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
	TABORT(R9)
388 389 390 391 392 393 394

	/*
	 * Return directly to userspace. We have corrupted user register state,
	 * but userspace will never see that register state. Execution will
	 * resume after the tbegin of the aborted transaction with the
	 * checkpointed register state.
	 */
395 396
	li	r9, MSR_RI
	andc	r10, r10, r9
397 398 399 400 401 402 403
	mtmsrd	r10, 1
	mtspr	SPRN_SRR0, r11
	mtspr	SPRN_SRR1, r12

	rfid
	b	.	/* prevent speculative execution */
#endif
404
_ASM_NOKPROBE_SYMBOL(system_call_common);
405
_ASM_NOKPROBE_SYMBOL(system_call_exit);
406

407 408 409 410 411 412 413 414 415
/* Save non-volatile GPRs, if not already saved. */
_GLOBAL(save_nvgprs)
	ld	r11,_TRAP(r1)
	andi.	r0,r11,1
	beqlr-
	SAVE_NVGPRS(r1)
	clrrdi	r0,r11,1
	std	r0,_TRAP(r1)
	blr
416
_ASM_NOKPROBE_SYMBOL(save_nvgprs);
417

418
	
419 420 421 422 423 424 425 426 427 428
/*
 * The sigsuspend and rt_sigsuspend system calls can call do_signal
 * and thus put the process into the stopped state where we might
 * want to examine its user state with ptrace.  Therefore we need
 * to save all the nonvolatile registers (r14 - r31) before calling
 * the C code.  Similarly, fork, vfork and clone need the full
 * register state on the stack so that it can be copied to the child.
 */

_GLOBAL(ppc_fork)
429 430
	bl	save_nvgprs
	bl	sys_fork
431
	b	.Lsyscall_exit
432 433

_GLOBAL(ppc_vfork)
434 435
	bl	save_nvgprs
	bl	sys_vfork
436
	b	.Lsyscall_exit
437 438

_GLOBAL(ppc_clone)
439 440
	bl	save_nvgprs
	bl	sys_clone
441
	b	.Lsyscall_exit
442

443
_GLOBAL(ppc32_swapcontext)
444 445
	bl	save_nvgprs
	bl	compat_sys_swapcontext
446
	b	.Lsyscall_exit
447 448

_GLOBAL(ppc64_swapcontext)
449 450
	bl	save_nvgprs
	bl	sys_swapcontext
451
	b	.Lsyscall_exit
452

453 454 455 456 457
_GLOBAL(ppc_switch_endian)
	bl	save_nvgprs
	bl	sys_switch_endian
	b	.Lsyscall_exit

458
_GLOBAL(ret_from_fork)
459
	bl	schedule_tail
460 461
	REST_NVGPRS(r1)
	li	r3,0
462
	b	.Lsyscall_exit
463

A
Al Viro 已提交
464
_GLOBAL(ret_from_kernel_thread)
465
	bl	schedule_tail
A
Al Viro 已提交
466 467 468
	REST_NVGPRS(r1)
	mtlr	r14
	mr	r3,r15
469
#ifdef PPC64_ELF_ABI_v2
470 471
	mr	r12,r14
#endif
A
Al Viro 已提交
472 473
	blrl
	li	r3,0
474
	b	.Lsyscall_exit
475

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
/*
 * This routine switches between two different tasks.  The process
 * state of one is saved on its kernel stack.  Then the state
 * of the other is restored from its kernel stack.  The memory
 * management hardware is updated to the second process's state.
 * Finally, we can return to the second process, via ret_from_except.
 * On entry, r3 points to the THREAD for the current task, r4
 * points to the THREAD for the new task.
 *
 * Note: there are two ways to get to the "going out" portion
 * of this code; either by coming in via the entry (_switch)
 * or via "fork" which must set up an environment equivalent
 * to the "_switch" path.  If you change this you'll have to change
 * the fork code also.
 *
 * The code which creates the new task context is in 'copy_thread'
492
 * in arch/powerpc/kernel/process.c 
493 494 495 496 497 498 499 500 501
 */
	.align	7
_GLOBAL(_switch)
	mflr	r0
	std	r0,16(r1)
	stdu	r1,-SWITCH_FRAME_SIZE(r1)
	/* r3-r13 are caller saved -- Cort */
	SAVE_8GPRS(14, r1)
	SAVE_10GPRS(22, r1)
502
	std	r0,_NIP(r1)	/* Return to switch caller */
503 504 505 506
	mfcr	r23
	std	r23,_CCR(r1)
	std	r1,KSP(r3)	/* Set old stack pointer */

507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
	/*
	 * On SMP kernels, care must be taken because a task may be
	 * scheduled off CPUx and on to CPUy. Memory ordering must be
	 * considered.
	 *
	 * Cacheable stores on CPUx will be visible when the task is
	 * scheduled on CPUy by virtue of the core scheduler barriers
	 * (see "Notes on Program-Order guarantees on SMP systems." in
	 * kernel/sched/core.c).
	 *
	 * Uncacheable stores in the case of involuntary preemption must
	 * be taken care of. The smp_mb__before_spin_lock() in __schedule()
	 * is implemented as hwsync on powerpc, which orders MMIO too. So
	 * long as there is an hwsync in the context switch path, it will
	 * be executed on the source CPU after the task has performed
	 * all MMIO ops on that CPU, and on the destination CPU before the
	 * task performs any MMIO ops there.
524 525
	 */

526
	/*
527 528 529
	 * The kernel context switch path must contain a spin_lock,
	 * which contains larx/stcx, which will clear any reservation
	 * of the task being switched.
530
	 */
531 532 533 534 535 536 537
#ifdef CONFIG_PPC_BOOK3S
/* Cancel all explict user streams as they will have no use after context
 * switch and will stop the HW from creating streams itself
 */
	DCBT_STOP_ALL_STREAM_IDS(r6)
#endif

538 539 540 541
	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
	std	r6,PACACURRENT(r13)	/* Set new 'current' */

	ld	r8,KSP(r4)	/* new stack pointer */
542 543 544
#ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION
	b	2f
545
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
P
Paul Mackerras 已提交
546
BEGIN_FTR_SECTION
547 548
	clrrdi	r6,r8,28	/* get its ESID */
	clrrdi	r9,r1,28	/* get current sp ESID */
M
Michael Ellerman 已提交
549
FTR_SECTION_ELSE
P
Paul Mackerras 已提交
550 551
	clrrdi	r6,r8,40	/* get its 1T ESID */
	clrrdi	r9,r1,40	/* get current sp 1T ESID */
M
Michael Ellerman 已提交
552
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
553 554 555 556 557 558 559 560 561
	clrldi.	r0,r6,2		/* is new ESID c00000000? */
	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
	cror	eq,4*cr1+eq,eq
	beq	2f		/* if yes, don't slbie it */

	/* Bolt in the new stack SLB entry */
	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
	oris	r0,r6,(SLB_ESID_V)@h
	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
P
Paul Mackerras 已提交
562 563 564 565
BEGIN_FTR_SECTION
	li	r9,MMU_SEGSIZE_1T	/* insert B field */
	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
566
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
567

568 569 570 571
	/* Update the last bolted SLB.  No write barriers are needed
	 * here, provided we only update the current CPU's SLB shadow
	 * buffer.
	 */
572
	ld	r9,PACA_SLBSHADOWPTR(r13)
573
	li	r12,0
574 575 576 577 578
	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
	li	r12,SLBSHADOW_STACKVSID
	STDX_BE	r7,r12,r9			/* Save VSID */
	li	r12,SLBSHADOW_STACKESID
	STDX_BE	r0,r12,r9			/* Save ESID */
579

580
	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
581 582 583 584 585
	 * we have 1TB segments, the only CPUs known to have the errata
	 * only support less than 1TB of system memory and we'll never
	 * actually hit this code path.
	 */

586 587 588 589 590
	slbie	r6
	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
	slbmte	r7,r0
	isync
2:
591
#endif /* CONFIG_PPC_STD_MMU_64 */
592

593
	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
594 595 596 597 598
	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
	   because we don't need to leave the 288-byte ABI gap at the
	   top of the kernel stack. */
	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE

599 600 601 602 603 604 605 606
	/*
	 * PMU interrupts in radix may come in here. They will use r1, not
	 * PACAKSAVE, so this stack switch will not cause a problem. They
	 * will store to the process stack, which may then be migrated to
	 * another CPU. However the rq lock release on this CPU paired with
	 * the rq lock acquire on the new CPU before the stack becomes
	 * active on the new CPU, will order those stores.
	 */
607 608 609
	mr	r1,r8		/* start using new stack pointer */
	std	r7,PACAKSAVE(r13)

610 611 612
	ld	r6,_CCR(r1)
	mtcrf	0xFF,r6

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
	/* r3-r13 are destroyed -- Cort */
	REST_8GPRS(14, r1)
	REST_10GPRS(22, r1)

	/* convert old thread to its task_struct for return value */
	addi	r3,r3,-THREAD
	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
	mtlr	r7
	addi	r1,r1,SWITCH_FRAME_SIZE
	blr

	.align	7
_GLOBAL(ret_from_except)
	ld	r11,_TRAP(r1)
	andi.	r0,r11,1
628
	bne	ret_from_except_lite
629 630 631 632 633 634 635 636
	REST_NVGPRS(r1)

_GLOBAL(ret_from_except_lite)
	/*
	 * Disable interrupts so that current_thread_info()->flags
	 * can't change between when we test it and when we return
	 * from the interrupt.
	 */
637 638 639
#ifdef CONFIG_PPC_BOOK3E
	wrteei	0
#else
640
	li	r10,MSR_RI
641
	mtmsrd	r10,1		  /* Update machine state */
642
#endif /* CONFIG_PPC_BOOK3E */
643

644
	CURRENT_THREAD_INFO(r9, r1)
645
	ld	r3,_MSR(r1)
646 647 648
#ifdef CONFIG_PPC_BOOK3E
	ld	r10,PACACURRENT(r13)
#endif /* CONFIG_PPC_BOOK3E */
649 650
	ld	r4,TI_FLAGS(r9)
	andi.	r3,r3,MSR_PR
651
	beq	resume_kernel
652 653 654
#ifdef CONFIG_PPC_BOOK3E
	lwz	r3,(THREAD+THREAD_DBCR0)(r10)
#endif /* CONFIG_PPC_BOOK3E */
655 656

	/* Check current_thread_info()->flags */
657
	andi.	r0,r4,_TIF_USER_WORK_MASK
658
	bne	1f
659
#ifdef CONFIG_PPC_BOOK3E
660 661 662 663 664
	/*
	 * Check to see if the dbcr0 register is set up to debug.
	 * Use the internal debug mode bit to do this.
	 */
	andis.	r0,r3,DBCR0_IDM@h
665
	beq	restore
666 667 668 669 670 671 672 673
	mfmsr	r0
	rlwinm	r0,r0,0,~MSR_DE	/* Clear MSR.DE */
	mtmsr	r0
	mtspr	SPRN_DBCR0,r3
	li	r10, -1
	mtspr	SPRN_DBSR,r10
	b	restore
#else
674 675 676
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	restore_math
	b	restore
677 678 679
#endif
1:	andi.	r0,r4,_TIF_NEED_RESCHED
	beq	2f
680
	bl	restore_interrupts
681
	SCHEDULE_USER
682
	b	ret_from_except_lite
683 684 685 686 687
2:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	andi.	r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
	bne	3f		/* only restore TM if nothing else to do */
	addi	r3,r1,STACK_FRAME_OVERHEAD
688
	bl	restore_tm_state
689 690 691
	b	restore
3:
#endif
692
	bl	save_nvgprs
693 694 695 696 697
	/*
	 * Use a non volatile GPR to save and restore our thread_info flags
	 * across the call to restore_interrupts.
	 */
	mr	r30,r4
698
	bl	restore_interrupts
699
	mr	r4,r30
700
	addi	r3,r1,STACK_FRAME_OVERHEAD
701 702
	bl	do_notify_resume
	b	ret_from_except
703 704

resume_kernel:
705
	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
706
	andis.	r8,r4,_TIF_EMULATE_STACK_STORE@h
707 708 709 710
	beq+	1f

	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */

711
	ld	r3,GPR1(r1)
712 713 714 715 716 717 718 719 720 721 722 723 724
	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
	mr	r4,r1			/* src:  current exception frame */
	mr	r1,r3			/* Reroute the trampoline frame to r1 */

	/* Copy from the original to the trampoline. */
	li	r5,INT_FRAME_SIZE/8	/* size: INT_FRAME_SIZE */
	li	r6,0			/* start offset: 0 */
	mtctr	r5
2:	ldx	r0,r6,r4
	stdx	r0,r6,r3
	addi	r6,r6,8
	bdnz	2b

725 726
	/* Do real store operation to complete stdu */
	ld	r5,GPR1(r1)
727 728 729 730 731
	std	r8,0(r5)

	/* Clear _TIF_EMULATE_STACK_STORE flag */
	lis	r11,_TIF_EMULATE_STACK_STORE@h
	addi	r5,r9,TI_FLAGS
732
0:	ldarx	r4,0,r5
733 734 735 736 737
	andc	r4,r4,r11
	stdcx.	r4,0,r5
	bne-	0b
1:

738 739 740 741 742 743 744 745 746 747 748 749 750 751
#ifdef CONFIG_PREEMPT
	/* Check if we need to preempt */
	andi.	r0,r4,_TIF_NEED_RESCHED
	beq+	restore
	/* Check that preempt_count() == 0 and interrupts are enabled */
	lwz	r8,TI_PREEMPT(r9)
	cmpwi	cr1,r8,0
	ld	r0,SOFTE(r1)
	cmpdi	r0,0
	crandc	eq,cr1*4+eq,eq
	bne	restore

	/*
	 * Here we are preempting the current task. We want to make
752
	 * sure we are soft-disabled first and reconcile irq state.
753
	 */
754
	RECONCILE_IRQ_STATE(r3,r4)
755
1:	bl	preempt_schedule_irq
756 757

	/* Re-test flags and eventually loop */
758
	CURRENT_THREAD_INFO(r9, r1)
759
	ld	r4,TI_FLAGS(r9)
760 761
	andi.	r0,r4,_TIF_NEED_RESCHED
	bne	1b
762 763 764 765 766 767 768 769 770 771

	/*
	 * arch_local_irq_restore() from preempt_schedule_irq above may
	 * enable hard interrupt but we really should disable interrupts
	 * when we return from the interrupt, and so that we don't get
	 * interrupted after loading SRR0/1.
	 */
#ifdef CONFIG_PPC_BOOK3E
	wrteei	0
#else
772
	li	r10,MSR_RI
773 774
	mtmsrd	r10,1		  /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
775
#endif /* CONFIG_PREEMPT */
776

777 778
	.globl	fast_exc_return_irq
fast_exc_return_irq:
779
restore:
780
	/*
781 782
	 * This is the main kernel exit path. First we check if we
	 * are about to re-enable interrupts
783
	 */
784
	ld	r5,SOFTE(r1)
785
	lbz	r6,PACASOFTIRQEN(r13)
786
	cmpwi	cr0,r5,0
787
	beq	.Lrestore_irq_off
788

789 790
	/* We are enabling, were we already enabled ? Yes, just return */
	cmpwi	cr0,r6,1
791
	beq	cr0,.Ldo_restore
792

793
	/*
794 795 796 797 798 799
	 * We are about to soft-enable interrupts (we are hard disabled
	 * at this point). We check if there's anything that needs to
	 * be replayed first.
	 */
	lbz	r0,PACAIRQHAPPENED(r13)
	cmpwi	cr0,r0,0
800
	bne-	.Lrestore_check_irq_replay
801

802 803 804 805 806
	/*
	 * Get here when nothing happened while soft-disabled, just
	 * soft-enable and move-on. We will hard-enable as a side
	 * effect of rfi
	 */
807
.Lrestore_no_replay:
808 809 810 811 812 813 814
	TRACE_ENABLE_INTS
	li	r0,1
	stb	r0,PACASOFTIRQEN(r13);

	/*
	 * Final return path. BookE is handled in a different file
	 */
815
.Ldo_restore:
816
#ifdef CONFIG_PPC_BOOK3E
817
	b	exception_return_book3e
818
#else
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
	/*
	 * Clear the reservation. If we know the CPU tracks the address of
	 * the reservation then we can potentially save some cycles and use
	 * a larx. On POWER6 and POWER7 this is significantly faster.
	 */
BEGIN_FTR_SECTION
	stdcx.	r0,0,r1		/* to clear the reservation */
FTR_SECTION_ELSE
	ldarx	r4,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

	/*
	 * Some code path such as load_up_fpu or altivec return directly
	 * here. They run entirely hard disabled and do not alter the
	 * interrupt state. They also don't use lwarx/stwcx. and thus
	 * are known not to leave dangling reservations.
	 */
	.globl	fast_exception_return
fast_exception_return:
	ld	r3,_MSR(r1)
839 840 841 842 843 844 845 846 847
	ld	r4,_CTR(r1)
	ld	r0,_LINK(r1)
	mtctr	r4
	mtlr	r0
	ld	r4,_XER(r1)
	mtspr	SPRN_XER,r4

	REST_8GPRS(5, r1)

848
	andi.	r0,r3,MSR_RI
849
	beq-	.Lunrecov_restore
850

851 852 853 854 855 856
	/* Load PPR from thread struct before we clear MSR:RI */
BEGIN_FTR_SECTION
	ld	r2,PACACURRENT(r13)
	ld	r2,TASKTHREADPPR(r2)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

857 858 859 860 861
	/*
	 * Clear RI before restoring r13.  If we are returning to
	 * userspace and we take an exception after restoring r13,
	 * we end up corrupting the userspace r13 value.
	 */
862
	li	r4,0
863
	mtmsrd	r4,1
864

865 866 867 868
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	/* TM debug */
	std	r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
#endif
869 870
	/*
	 * r13 is our per cpu area, only restore it if we are returning to
871 872
	 * userspace the value stored in the stack frame may belong to
	 * another CPU.
873
	 */
874
	andi.	r0,r3,MSR_PR
875
	beq	1f
876 877 878
BEGIN_FTR_SECTION
	mtspr	SPRN_PPR,r2	/* Restore PPR */
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
879
	ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
880 881
	REST_GPR(13, r1)
1:
882
	mtspr	SPRN_SRR1,r3
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897

	ld	r2,_CCR(r1)
	mtcrf	0xFF,r2
	ld	r2,_NIP(r1)
	mtspr	SPRN_SRR0,r2

	ld	r0,GPR0(r1)
	ld	r2,GPR2(r1)
	ld	r3,GPR3(r1)
	ld	r4,GPR4(r1)
	ld	r1,GPR1(r1)

	rfid
	b	.	/* prevent speculative execution */

898 899
#endif /* CONFIG_PPC_BOOK3E */

900 901 902 903 904 905 906
	/*
	 * We are returning to a context with interrupts soft disabled.
	 *
	 * However, we may also about to hard enable, so we need to
	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
	 * or that bit can get out of sync and bad things will happen
	 */
907
.Lrestore_irq_off:
908 909 910 911 912 913 914 915 916
	ld	r3,_MSR(r1)
	lbz	r7,PACAIRQHAPPENED(r13)
	andi.	r0,r3,MSR_EE
	beq	1f
	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
	stb	r7,PACAIRQHAPPENED(r13)
1:	li	r0,0
	stb	r0,PACASOFTIRQEN(r13);
	TRACE_DISABLE_INTS
917
	b	.Ldo_restore
918

919 920 921 922
	/*
	 * Something did happen, check if a re-emit is needed
	 * (this also clears paca->irq_happened)
	 */
923
.Lrestore_check_irq_replay:
924 925 926 927 928 929 930
	/* XXX: We could implement a fast path here where we check
	 * for irq_happened being just 0x01, in which case we can
	 * clear it and return. That means that we would potentially
	 * miss a decrementer having wrapped all the way around.
	 *
	 * Still, this might be useful for things like hash_page
	 */
931
	bl	__check_irq_replay
932
	cmpwi	cr0,r3,0
933
	beq	.Lrestore_no_replay
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
 
	/*
	 * We need to re-emit an interrupt. We do so by re-using our
	 * existing exception frame. We first change the trap value,
	 * but we need to ensure we preserve the low nibble of it
	 */
	ld	r4,_TRAP(r1)
	clrldi	r4,r4,60
	or	r4,r4,r3
	std	r4,_TRAP(r1)

	/*
	 * Then find the right handler and call it. Interrupts are
	 * still soft-disabled and we keep them that way.
	*/
	cmpwi	cr0,r3,0x500
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
952 953
 	bl	do_IRQ
	b	ret_from_except
954 955 956 957 958
1:	cmpwi	cr0,r3,0xe60
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
	bl	handle_hmi_exception
	b	ret_from_except
959 960 961
1:	cmpwi	cr0,r3,0x900
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
962 963
	bl	timer_interrupt
	b	ret_from_except
964 965
#ifdef CONFIG_PPC_DOORBELL
1:
966
#ifdef CONFIG_PPC_BOOK3E
967 968
	cmpwi	cr0,r3,0x280
#else
969
	cmpwi	cr0,r3,0xa00
970
#endif /* CONFIG_PPC_BOOK3E */
971 972
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
973
	bl	doorbell_exception
974
#endif /* CONFIG_PPC_DOORBELL */
975
1:	b	ret_from_except /* What else to do here ? */
976
 
977
.Lunrecov_restore:
978
	addi	r3,r1,STACK_FRAME_OVERHEAD
979
	bl	unrecoverable_exception
980 981 982 983 984 985 986 987 988
	b	.Lunrecov_restore

_ASM_NOKPROBE_SYMBOL(ret_from_except);
_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
_ASM_NOKPROBE_SYMBOL(resume_kernel);
_ASM_NOKPROBE_SYMBOL(fast_exc_return_irq);
_ASM_NOKPROBE_SYMBOL(restore);
_ASM_NOKPROBE_SYMBOL(fast_exception_return);

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023

#ifdef CONFIG_PPC_RTAS
/*
 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
 * called with the MMU off.
 *
 * In addition, we need to be in 32b mode, at least for now.
 * 
 * Note: r3 is an input parameter to rtas, so don't trash it...
 */
_GLOBAL(enter_rtas)
	mflr	r0
	std	r0,16(r1)
        stdu	r1,-RTAS_FRAME_SIZE(r1)	/* Save SP and create stack space. */

	/* Because RTAS is running in 32b mode, it clobbers the high order half
	 * of all registers that it saves.  We therefore save those registers
	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
   	 */
	SAVE_GPR(2, r1)			/* Save the TOC */
	SAVE_GPR(13, r1)		/* Save paca */
	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
	SAVE_10GPRS(22, r1)		/* ditto */

	mfcr	r4
	std	r4,_CCR(r1)
	mfctr	r5
	std	r5,_CTR(r1)
	mfspr	r6,SPRN_XER
	std	r6,_XER(r1)
	mfdar	r7
	std	r7,_DAR(r1)
	mfdsisr	r8
	std	r8,_DSISR(r1)

1024 1025 1026 1027 1028 1029
	/* Temporary workaround to clear CR until RTAS can be modified to
	 * ignore all bits.
	 */
	li	r0,0
	mtcr	r0

1030
#ifdef CONFIG_BUG	
1031 1032 1033
	/* There is no way it is acceptable to get here with interrupts enabled,
	 * check it with the asm equivalent of WARN_ON
	 */
1034
	lbz	r0,PACASOFTIRQEN(r13)
1035
1:	tdnei	r0,0
1036 1037 1038
	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
	
1039 1040 1041 1042 1043 1044
	/* Hard-disable interrupts */
	mfmsr	r6
	rldicl	r7,r6,48,1
	rotldi	r7,r7,16
	mtmsrd	r7,1

1045 1046 1047 1048 1049 1050 1051 1052
	/* Unfortunately, the stack pointer and the MSR are also clobbered,
	 * so they are saved in the PACA which allows us to restore
	 * our original state after RTAS returns.
         */
	std	r1,PACAR1(r13)
        std	r6,PACASAVEDMSR(r13)

	/* Setup our real return addr */	
1053
	LOAD_REG_ADDR(r4,rtas_return_loc)
1054
	clrldi	r4,r4,2			/* convert to realmode address */
1055 1056 1057 1058 1059 1060 1061 1062
       	mtlr	r4

	li	r0,0
	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
	andc	r0,r6,r0
	
        li      r9,1
        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1063
	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1064
	andc	r6,r0,r9
1065 1066

__enter_rtas:
1067 1068 1069
	sync				/* disable interrupts so SRR0/1 */
	mtmsrd	r0			/* don't get trashed */

1070
	LOAD_REG_ADDR(r4, rtas)
1071 1072 1073 1074 1075 1076 1077 1078
	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
	
	mtspr	SPRN_SRR0,r5
	mtspr	SPRN_SRR1,r6
	rfid
	b	.	/* prevent speculative execution */

1079
rtas_return_loc:
1080 1081
	FIXUP_ENDIAN

1082
	/* relocation is off at this point */
1083
	GET_PACA(r4)
1084
	clrldi	r4,r4,2			/* convert to realmode address */
1085

1086 1087
	bcl	20,31,$+4
0:	mflr	r3
1088
	ld	r3,(1f-0b)(r3)		/* get &rtas_restore_regs */
1089

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	mfmsr   r6
	li	r0,MSR_RI
	andc	r6,r6,r0
	sync	
	mtmsrd  r6
        
        ld	r1,PACAR1(r4)           /* Restore our SP */
        ld	r4,PACASAVEDMSR(r4)     /* Restore our MSR */

	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
	rfid
	b	.	/* prevent speculative execution */
1103 1104
_ASM_NOKPROBE_SYMBOL(__enter_rtas)
_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
1105

1106
	.align	3
1107
1:	.8byte	rtas_restore_regs
1108

1109
rtas_restore_regs:
1110 1111 1112 1113 1114 1115
	/* relocation is on at this point */
	REST_GPR(2, r1)			/* Restore the TOC */
	REST_GPR(13, r1)		/* Restore paca */
	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
	REST_10GPRS(22, r1)		/* ditto */

1116
	GET_PACA(r13)
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145

	ld	r4,_CCR(r1)
	mtcr	r4
	ld	r5,_CTR(r1)
	mtctr	r5
	ld	r6,_XER(r1)
	mtspr	SPRN_XER,r6
	ld	r7,_DAR(r1)
	mtdar	r7
	ld	r8,_DSISR(r1)
	mtdsisr	r8

        addi	r1,r1,RTAS_FRAME_SIZE	/* Unstack our frame */
	ld	r0,16(r1)		/* get return address */

	mtlr    r0
        blr				/* return to caller */

#endif /* CONFIG_PPC_RTAS */

_GLOBAL(enter_prom)
	mflr	r0
	std	r0,16(r1)
        stdu	r1,-PROM_FRAME_SIZE(r1)	/* Save SP and create stack space */

	/* Because PROM is running in 32b mode, it clobbers the high order half
	 * of all registers that it saves.  We therefore save those registers
	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
   	 */
1146
	SAVE_GPR(2, r1)
1147 1148 1149
	SAVE_GPR(13, r1)
	SAVE_8GPRS(14, r1)
	SAVE_10GPRS(22, r1)
1150
	mfcr	r10
1151
	mfmsr	r11
1152
	std	r10,_CCR(r1)
1153 1154
	std	r11,_MSR(r1)

1155 1156 1157 1158 1159 1160 1161 1162
	/* Put PROM address in SRR0 */
	mtsrr0	r4

	/* Setup our trampoline return addr in LR */
	bcl	20,31,$+4
0:	mflr	r4
	addi	r4,r4,(1f - 0b)
       	mtlr	r4
1163

1164
	/* Prepare a 32-bit mode big endian MSR
1165
	 */
1166 1167
#ifdef CONFIG_PPC_BOOK3E
	rlwinm	r11,r11,0,1,31
1168 1169
	mtsrr1	r11
	rfi
1170
#else /* CONFIG_PPC_BOOK3E */
1171 1172 1173 1174
	LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
	andc	r11,r11,r12
	mtsrr1	r11
	rfid
1175
#endif /* CONFIG_PPC_BOOK3E */
1176

1177 1178
1:	/* Return from OF */
	FIXUP_ENDIAN
1179 1180 1181 1182 1183 1184 1185 1186

	/* Just make sure that r1 top 32 bits didn't get
	 * corrupt by OF
	 */
	rldicl	r1,r1,0,32

	/* Restore the MSR (back to 64 bits) */
	ld	r0,_MSR(r1)
1187
	MTMSRD(r0)
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
        isync

	/* Restore other registers */
	REST_GPR(2, r1)
	REST_GPR(13, r1)
	REST_8GPRS(14, r1)
	REST_10GPRS(22, r1)
	ld	r4,_CCR(r1)
	mtcr	r4
	
        addi	r1,r1,PROM_FRAME_SIZE
	ld	r0,16(r1)
	mtlr    r0
        blr