entry_64.S 27.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 *  PowerPC version 
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
 *  Adapted for Power Macintosh by Paul Mackerras.
 *  Low-level exception handlers and MMU support
 *  rewritten by Paul Mackerras.
 *    Copyright (C) 1996 Paul Mackerras.
 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 *
 *  This file contains the system call entry code, context switch
 *  code, and exception/interrupt return code for PowerPC.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <linux/errno.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
30
#include <asm/firmware.h>
31
#include <asm/bug.h>
32
#include <asm/ptrace.h>
33
#include <asm/irqflags.h>
34
#include <asm/ftrace.h>
35
#include <asm/hw_irq.h>
36 37 38 39 40 41 42 43 44 45

/*
 * System calls.
 */
	.section	".toc","aw"
.SYS_CALL_TABLE:
	.tc .sys_call_table[TC],.sys_call_table

/* This value is used to mark exception frames on the stack. */
exception_marker:
46
	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64

	.section	".text"
	.align 7

#undef SHOW_SYSCALLS

	.globl system_call_common
system_call_common:
	andi.	r10,r12,MSR_PR
	mr	r10,r1
	addi	r1,r1,-INT_FRAME_SIZE
	beq-	1f
	ld	r1,PACAKSAVE(r13)
1:	std	r10,0(r1)
	std	r11,_NIP(r1)
	std	r12,_MSR(r1)
	std	r0,GPR0(r1)
	std	r10,GPR1(r1)
65
	ACCOUNT_CPU_USER_ENTRY(r10, r11)
66 67
	std	r2,GPR2(r1)
	std	r3,GPR3(r1)
68
	mfcr	r2
69 70 71 72 73 74 75 76 77 78
	std	r4,GPR4(r1)
	std	r5,GPR5(r1)
	std	r6,GPR6(r1)
	std	r7,GPR7(r1)
	std	r8,GPR8(r1)
	li	r11,0
	std	r11,GPR9(r1)
	std	r11,GPR10(r1)
	std	r11,GPR11(r1)
	std	r11,GPR12(r1)
79
	std	r11,_XER(r1)
80
	std	r11,_CTR(r1)
81 82
	std	r9,GPR13(r1)
	mflr	r10
83 84 85 86 87
	/*
	 * This clears CR0.SO (bit 28), which is the error indication on
	 * return from this system call.
	 */
	rldimi	r2,r11,28,(63-28)
88 89 90 91
	li	r11,0xc01
	std	r10,_LINK(r1)
	std	r11,_TRAP(r1)
	std	r3,ORIG_GPR3(r1)
92
	std	r2,_CCR(r1)
93 94 95 96
	ld	r2,PACATOC(r13)
	addi	r9,r1,STACK_FRAME_OVERHEAD
	ld	r11,exception_marker@toc(r2)
	std	r11,-16(r9)		/* "regshere" marker */
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
BEGIN_FW_FTR_SECTION
	beq	33f
	/* if from user, see if there are any DTL entries to process */
	ld	r10,PACALPPACAPTR(r13)	/* get ptr to VPA */
	ld	r11,PACA_DTL_RIDX(r13)	/* get log read index */
	ld	r10,LPPACA_DTLIDX(r10)	/* get log write index */
	cmpd	cr1,r11,r10
	beq+	cr1,33f
	bl	.accumulate_stolen_time
	REST_GPR(0,r1)
	REST_4GPRS(3,r1)
	REST_2GPRS(7,r1)
	addi	r9,r1,STACK_FRAME_OVERHEAD
33:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */

115 116 117 118 119 120 121 122 123 124 125 126
	/*
	 * A syscall should always be called with interrupts enabled
	 * so we just unconditionally hard-enable here. When some kind
	 * of irq tracing is used, we additionally check that condition
	 * is correct
	 */
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
	lbz	r10,PACASOFTIRQEN(r13)
	xori	r10,r10,1
1:	tdnei	r10,0
	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
127 128 129 130

#ifdef CONFIG_PPC_BOOK3E
	wrteei	1
#else
131
	ld	r11,PACAKMSR(r13)
132 133
	ori	r11,r11,MSR_EE
	mtmsrd	r11,1
134
#endif /* CONFIG_PPC_BOOK3E */
135

136 137 138 139 140 141
	/* We do need to set SOFTE in the stack frame or the return
	 * from interrupt will be painful
	 */
	li	r10,1
	std	r10,SOFTE(r1)

142 143 144 145 146 147 148
#ifdef SHOW_SYSCALLS
	bl	.do_show_syscall
	REST_GPR(0,r1)
	REST_4GPRS(3,r1)
	REST_2GPRS(7,r1)
	addi	r9,r1,STACK_FRAME_OVERHEAD
#endif
149
	CURRENT_THREAD_INFO(r11, r1)
150 151 152
	ld	r10,TI_FLAGS(r11)
	andi.	r11,r10,_TIF_SYSCALL_T_OR_A
	bne-	syscall_dotrace
153
.Lsyscall_dotrace_cont:
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
	cmpldi	0,r0,NR_syscalls
	bge-	syscall_enosys

system_call:			/* label this so stack traces look sane */
/*
 * Need to vector to 32 Bit or default sys_call_table here,
 * based on caller's run-mode / personality.
 */
	ld	r11,.SYS_CALL_TABLE@toc(2)
	andi.	r10,r10,_TIF_32BIT
	beq	15f
	addi	r11,r11,8	/* use 32-bit syscall entries */
	clrldi	r3,r3,32
	clrldi	r4,r4,32
	clrldi	r5,r5,32
	clrldi	r6,r6,32
	clrldi	r7,r7,32
	clrldi	r8,r8,32
15:
	slwi	r0,r0,4
	ldx	r10,r11,r0	/* Fetch system call handler [ptr] */
	mtctr   r10
	bctrl			/* Call handler */

syscall_exit:
179
	std	r3,RESULT(r1)
180 181
#ifdef SHOW_SYSCALLS
	bl	.do_show_syscall_exit
182
	ld	r3,RESULT(r1)
183
#endif
184
	CURRENT_THREAD_INFO(r12, r1)
185 186

	ld	r8,_MSR(r1)
187 188
#ifdef CONFIG_PPC_BOOK3S
	/* No MSR:RI on BookE */
189 190
	andi.	r10,r8,MSR_RI
	beq-	unrecov_restore
191
#endif
192 193
	/*
	 * Disable interrupts so current_thread_info()->flags can't change,
194 195 196 197 198
	 * and so that we don't get interrupted after loading SRR0/1.
	 */
#ifdef CONFIG_PPC_BOOK3E
	wrteei	0
#else
199
	ld	r10,PACAKMSR(r13)
200 201 202 203 204 205 206 207 208 209
	/*
	 * For performance reasons we clear RI the same time that we
	 * clear EE. We only need to clear RI just before we restore r13
	 * below, but batching it with EE saves us one expensive mtmsrd call.
	 * We have to be careful to restore RI if we branch anywhere from
	 * here (eg syscall_exit_work).
	 */
	li	r9,MSR_RI
	andc	r11,r10,r9
	mtmsrd	r11,1
210 211
#endif /* CONFIG_PPC_BOOK3E */

212
	ld	r9,TI_FLAGS(r12)
213
	li	r11,-_LAST_ERRNO
214
	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
215
	bne-	syscall_exit_work
216 217 218
	cmpld	r3,r11
	ld	r5,_CCR(r1)
	bge-	syscall_error
219
.Lsyscall_error_cont:
220
	ld	r7,_NIP(r1)
221
BEGIN_FTR_SECTION
222
	stdcx.	r0,0,r1			/* to clear the reservation */
223
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
224 225
	andi.	r6,r8,MSR_PR
	ld	r4,_LINK(r1)
226

227 228 229
	beq-	1f
	ACCOUNT_CPU_USER_EXIT(r11, r12)
	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
230 231 232 233 234 235
1:	ld	r2,GPR2(r1)
	ld	r1,GPR1(r1)
	mtlr	r4
	mtcr	r5
	mtspr	SPRN_SRR0,r7
	mtspr	SPRN_SRR1,r8
236
	RFI
237 238
	b	.	/* prevent speculative execution */

239
syscall_error:	
240
	oris	r5,r5,0x1000	/* Set SO bit in CR */
241
	neg	r3,r3
242
	std	r5,_CCR(r1)
243
	b	.Lsyscall_error_cont
244
	
245 246 247 248 249
/* Traced system call support */
syscall_dotrace:
	bl	.save_nvgprs
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	.do_syscall_trace_enter
250 251 252 253 254 255
	/*
	 * Restore argument registers possibly just changed.
	 * We use the return value of do_syscall_trace_enter
	 * for the call number to look up in the table (r0).
	 */
	mr	r0,r3
256 257 258 259 260 261 262
	ld	r3,GPR3(r1)
	ld	r4,GPR4(r1)
	ld	r5,GPR5(r1)
	ld	r6,GPR6(r1)
	ld	r7,GPR7(r1)
	ld	r8,GPR8(r1)
	addi	r9,r1,STACK_FRAME_OVERHEAD
263
	CURRENT_THREAD_INFO(r10, r1)
264
	ld	r10,TI_FLAGS(r10)
265
	b	.Lsyscall_dotrace_cont
266

267 268 269 270 271
syscall_enosys:
	li	r3,-ENOSYS
	b	syscall_exit
	
syscall_exit_work:
272 273 274
#ifdef CONFIG_PPC_BOOK3S
	mtmsrd	r10,1		/* Restore RI */
#endif
275 276 277 278
	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
	 If TIF_NOERROR is set, just save r3 as it is. */

	andi.	r0,r9,_TIF_RESTOREALL
279 280 281 282
	beq+	0f
	REST_NVGPRS(r1)
	b	2f
0:	cmpld	r3,r11		/* r10 is -LAST_ERRNO */
283 284 285 286 287 288 289 290 291 292 293
	blt+	1f
	andi.	r0,r9,_TIF_NOERROR
	bne-	1f
	ld	r5,_CCR(r1)
	neg	r3,r3
	oris	r5,r5,0x1000	/* Set SO bit in CR */
	std	r5,_CCR(r1)
1:	std	r3,GPR3(r1)
2:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
	beq	4f

294
	/* Clear per-syscall TIF flags if any are set.  */
295 296 297 298 299 300 301 302

	li	r11,_TIF_PERSYSCALL_MASK
	addi	r12,r12,TI_FLAGS
3:	ldarx	r10,0,r12
	andc	r10,r10,r11
	stdcx.	r10,0,r12
	bne-	3b
	subi	r12,r12,TI_FLAGS
303 304 305

4:	/* Anything else left to do? */
	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
306 307 308
	beq	.ret_from_except_lite

	/* Re-enable interrupts */
309 310 311
#ifdef CONFIG_PPC_BOOK3E
	wrteei	1
#else
312
	ld	r10,PACAKMSR(r13)
313 314
	ori	r10,r10,MSR_EE
	mtmsrd	r10,1
315
#endif /* CONFIG_PPC_BOOK3E */
316

317
	bl	.save_nvgprs
318 319
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	.do_syscall_trace_leave
320
	b	.ret_from_except
321 322 323 324 325 326 327 328 329 330 331

/* Save non-volatile GPRs, if not already saved. */
_GLOBAL(save_nvgprs)
	ld	r11,_TRAP(r1)
	andi.	r0,r11,1
	beqlr-
	SAVE_NVGPRS(r1)
	clrrdi	r0,r11,1
	std	r0,_TRAP(r1)
	blr

332
	
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
/*
 * The sigsuspend and rt_sigsuspend system calls can call do_signal
 * and thus put the process into the stopped state where we might
 * want to examine its user state with ptrace.  Therefore we need
 * to save all the nonvolatile registers (r14 - r31) before calling
 * the C code.  Similarly, fork, vfork and clone need the full
 * register state on the stack so that it can be copied to the child.
 */

_GLOBAL(ppc_fork)
	bl	.save_nvgprs
	bl	.sys_fork
	b	syscall_exit

_GLOBAL(ppc_vfork)
	bl	.save_nvgprs
	bl	.sys_vfork
	b	syscall_exit

_GLOBAL(ppc_clone)
	bl	.save_nvgprs
	bl	.sys_clone
	b	syscall_exit

357 358 359 360 361 362 363 364 365 366
_GLOBAL(ppc32_swapcontext)
	bl	.save_nvgprs
	bl	.compat_sys_swapcontext
	b	syscall_exit

_GLOBAL(ppc64_swapcontext)
	bl	.save_nvgprs
	bl	.sys_swapcontext
	b	syscall_exit

367 368 369 370 371 372
_GLOBAL(ret_from_fork)
	bl	.schedule_tail
	REST_NVGPRS(r1)
	li	r3,0
	b	syscall_exit

A
Al Viro 已提交
373 374 375 376 377 378 379 380 381 382
_GLOBAL(ret_from_kernel_thread)
	bl	.schedule_tail
	REST_NVGPRS(r1)
	REST_GPR(2,r1)
	mtlr	r14
	mr	r3,r15
	blrl
	li	r3,0
	b	.do_exit	# no return

383 384 385 386 387 388
_GLOBAL(__ret_from_kernel_execve)
	addi	r1,r3,-STACK_FRAME_OVERHEAD
	li	r10,1
	std	r10,SOFTE(r1)
	b	syscall_exit

389 390 391 392 393 394
	.section	".toc","aw"
DSCR_DEFAULT:
	.tc dscr_default[TC],dscr_default

	.section	".text"

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
/*
 * This routine switches between two different tasks.  The process
 * state of one is saved on its kernel stack.  Then the state
 * of the other is restored from its kernel stack.  The memory
 * management hardware is updated to the second process's state.
 * Finally, we can return to the second process, via ret_from_except.
 * On entry, r3 points to the THREAD for the current task, r4
 * points to the THREAD for the new task.
 *
 * Note: there are two ways to get to the "going out" portion
 * of this code; either by coming in via the entry (_switch)
 * or via "fork" which must set up an environment equivalent
 * to the "_switch" path.  If you change this you'll have to change
 * the fork code also.
 *
 * The code which creates the new task context is in 'copy_thread'
411
 * in arch/powerpc/kernel/process.c 
412 413 414 415 416 417 418 419 420 421 422 423
 */
	.align	7
_GLOBAL(_switch)
	mflr	r0
	std	r0,16(r1)
	stdu	r1,-SWITCH_FRAME_SIZE(r1)
	/* r3-r13 are caller saved -- Cort */
	SAVE_8GPRS(14, r1)
	SAVE_10GPRS(22, r1)
	mflr	r20		/* Return to switch caller */
	mfmsr	r22
	li	r0, MSR_FP
424 425 426 427 428
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r0,r0,MSR_VSX@h	/* Disable VSX */
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif /* CONFIG_VSX */
429 430 431 432 433 434 435
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
	mfspr	r24,SPRN_VRSAVE	/* save vrsave register value */
	std	r24,THREAD_VRSAVE(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
436 437 438 439 440 441
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
	mfspr	r25,SPRN_DSCR
	std	r25,THREAD_DSCR(r3)
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
442 443 444
	and.	r0,r0,r22
	beq+	1f
	andc	r22,r22,r0
445
	MTMSRD(r22)
446 447 448 449 450 451 452 453 454 455 456 457 458 459
	isync
1:	std	r20,_NIP(r1)
	mfcr	r23
	std	r23,_CCR(r1)
	std	r1,KSP(r3)	/* Set old stack pointer */

#ifdef CONFIG_SMP
	/* We need a sync somewhere here to make sure that if the
	 * previous task gets rescheduled on another CPU, it sees all
	 * stores it has performed on this one.
	 */
	sync
#endif /* CONFIG_SMP */

460 461 462 463 464 465 466 467 468 469 470
	/*
	 * If we optimise away the clear of the reservation in system
	 * calls because we know the CPU tracks the address of the
	 * reservation, then we need to clear it here to cover the
	 * case that the kernel context switch path has no larx
	 * instructions.
	 */
BEGIN_FTR_SECTION
	ldarx	r6,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)

471 472 473 474
	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
	std	r6,PACACURRENT(r13)	/* Set new 'current' */

	ld	r8,KSP(r4)	/* new stack pointer */
475
#ifdef CONFIG_PPC_BOOK3S
P
Paul Mackerras 已提交
476
BEGIN_FTR_SECTION
477
  BEGIN_FTR_SECTION_NESTED(95)
478 479
	clrrdi	r6,r8,28	/* get its ESID */
	clrrdi	r9,r1,28	/* get current sp ESID */
480
  FTR_SECTION_ELSE_NESTED(95)
P
Paul Mackerras 已提交
481 482
	clrrdi	r6,r8,40	/* get its 1T ESID */
	clrrdi	r9,r1,40	/* get current sp 1T ESID */
483
  ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
484 485
FTR_SECTION_ELSE
	b	2f
486
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
487 488 489 490 491 492 493 494 495
	clrldi.	r0,r6,2		/* is new ESID c00000000? */
	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
	cror	eq,4*cr1+eq,eq
	beq	2f		/* if yes, don't slbie it */

	/* Bolt in the new stack SLB entry */
	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
	oris	r0,r6,(SLB_ESID_V)@h
	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
P
Paul Mackerras 已提交
496 497 498 499
BEGIN_FTR_SECTION
	li	r9,MMU_SEGSIZE_1T	/* insert B field */
	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
500
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
501

502 503 504 505
	/* Update the last bolted SLB.  No write barriers are needed
	 * here, provided we only update the current CPU's SLB shadow
	 * buffer.
	 */
506
	ld	r9,PACA_SLBSHADOWPTR(r13)
507 508 509 510
	li	r12,0
	std	r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
	std	r7,SLBSHADOW_STACKVSID(r9)  /* Save VSID */
	std	r0,SLBSHADOW_STACKESID(r9)  /* Save ESID */
511

512
	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
513 514 515 516 517
	 * we have 1TB segments, the only CPUs known to have the errata
	 * only support less than 1TB of system memory and we'll never
	 * actually hit this code path.
	 */

518 519 520 521 522
	slbie	r6
	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
	slbmte	r7,r0
	isync
2:
523 524
#endif /* !CONFIG_PPC_BOOK3S */

525
	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
526 527 528 529 530 531 532 533 534 535 536 537 538 539
	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
	   because we don't need to leave the 288-byte ABI gap at the
	   top of the kernel stack. */
	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE

	mr	r1,r8		/* start using new stack pointer */
	std	r7,PACAKSAVE(r13)

#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	ld	r0,THREAD_VRSAVE(r4)
	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
540 541
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
542 543
	lwz	r6,THREAD_DSCR_INHERIT(r4)
	ld	r7,DSCR_DEFAULT@toc(2)
544
	ld	r0,THREAD_DSCR(r4)
545 546 547 548 549
	cmpwi	r6,0
	bne	1f
	ld	r0,0(r7)
1:	cmpd	r0,r25
	beq	2f
550
	mtspr	SPRN_DSCR,r0
551
2:
552 553
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
554

555 556 557
	ld	r6,_CCR(r1)
	mtcrf	0xFF,r6

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	/* r3-r13 are destroyed -- Cort */
	REST_8GPRS(14, r1)
	REST_10GPRS(22, r1)

	/* convert old thread to its task_struct for return value */
	addi	r3,r3,-THREAD
	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
	mtlr	r7
	addi	r1,r1,SWITCH_FRAME_SIZE
	blr

	.align	7
_GLOBAL(ret_from_except)
	ld	r11,_TRAP(r1)
	andi.	r0,r11,1
	bne	.ret_from_except_lite
	REST_NVGPRS(r1)

_GLOBAL(ret_from_except_lite)
	/*
	 * Disable interrupts so that current_thread_info()->flags
	 * can't change between when we test it and when we return
	 * from the interrupt.
	 */
582 583 584
#ifdef CONFIG_PPC_BOOK3E
	wrteei	0
#else
585 586
	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */
	mtmsrd	r10,1		  /* Update machine state */
587
#endif /* CONFIG_PPC_BOOK3E */
588

589
	CURRENT_THREAD_INFO(r9, r1)
590 591 592
	ld	r3,_MSR(r1)
	ld	r4,TI_FLAGS(r9)
	andi.	r3,r3,MSR_PR
593
	beq	resume_kernel
594 595

	/* Check current_thread_info()->flags */
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
	andi.	r0,r4,_TIF_USER_WORK_MASK
	beq	restore

	andi.	r0,r4,_TIF_NEED_RESCHED
	beq	1f
	bl	.restore_interrupts
	bl	.schedule
	b	.ret_from_except_lite

1:	bl	.save_nvgprs
	bl	.restore_interrupts
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	.do_notify_resume
	b	.ret_from_except

resume_kernel:
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
	CURRENT_THREAD_INFO(r9, r1)
	ld	r8,TI_FLAGS(r9)
	andis.	r8,r8,_TIF_EMULATE_STACK_STORE@h
	beq+	1f

	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */

	lwz	r3,GPR1(r1)
	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
	mr	r4,r1			/* src:  current exception frame */
	mr	r1,r3			/* Reroute the trampoline frame to r1 */

	/* Copy from the original to the trampoline. */
	li	r5,INT_FRAME_SIZE/8	/* size: INT_FRAME_SIZE */
	li	r6,0			/* start offset: 0 */
	mtctr	r5
2:	ldx	r0,r6,r4
	stdx	r0,r6,r3
	addi	r6,r6,8
	bdnz	2b

	/* Do real store operation to complete stwu */
	lwz	r5,GPR1(r1)
	std	r8,0(r5)

	/* Clear _TIF_EMULATE_STACK_STORE flag */
	lis	r11,_TIF_EMULATE_STACK_STORE@h
	addi	r5,r9,TI_FLAGS
	ldarx	r4,0,r5
	andc	r4,r4,r11
	stdcx.	r4,0,r5
	bne-	0b
1:

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
#ifdef CONFIG_PREEMPT
	/* Check if we need to preempt */
	andi.	r0,r4,_TIF_NEED_RESCHED
	beq+	restore
	/* Check that preempt_count() == 0 and interrupts are enabled */
	lwz	r8,TI_PREEMPT(r9)
	cmpwi	cr1,r8,0
	ld	r0,SOFTE(r1)
	cmpdi	r0,0
	crandc	eq,cr1*4+eq,eq
	bne	restore

	/*
	 * Here we are preempting the current task. We want to make
	 * sure we are soft-disabled first
	 */
	SOFT_DISABLE_INTS(r3,r4)
1:	bl	.preempt_schedule_irq

	/* Re-test flags and eventually loop */
667
	CURRENT_THREAD_INFO(r9, r1)
668
	ld	r4,TI_FLAGS(r9)
669 670 671
	andi.	r0,r4,_TIF_NEED_RESCHED
	bne	1b
#endif /* CONFIG_PREEMPT */
672

673 674
	.globl	fast_exc_return_irq
fast_exc_return_irq:
675
restore:
676
	/*
677 678
	 * This is the main kernel exit path. First we check if we
	 * are about to re-enable interrupts
679
	 */
680
	ld	r5,SOFTE(r1)
681
	lbz	r6,PACASOFTIRQEN(r13)
682 683
	cmpwi	cr0,r5,0
	beq	restore_irq_off
684

685 686 687
	/* We are enabling, were we already enabled ? Yes, just return */
	cmpwi	cr0,r6,1
	beq	cr0,do_restore
688

689
	/*
690 691 692 693 694 695 696
	 * We are about to soft-enable interrupts (we are hard disabled
	 * at this point). We check if there's anything that needs to
	 * be replayed first.
	 */
	lbz	r0,PACAIRQHAPPENED(r13)
	cmpwi	cr0,r0,0
	bne-	restore_check_irq_replay
697

698 699 700 701 702 703 704 705 706 707 708 709 710
	/*
	 * Get here when nothing happened while soft-disabled, just
	 * soft-enable and move-on. We will hard-enable as a side
	 * effect of rfi
	 */
restore_no_replay:
	TRACE_ENABLE_INTS
	li	r0,1
	stb	r0,PACASOFTIRQEN(r13);

	/*
	 * Final return path. BookE is handled in a different file
	 */
711
do_restore:
712 713 714
#ifdef CONFIG_PPC_BOOK3E
	b	.exception_return_book3e
#else
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
	/*
	 * Clear the reservation. If we know the CPU tracks the address of
	 * the reservation then we can potentially save some cycles and use
	 * a larx. On POWER6 and POWER7 this is significantly faster.
	 */
BEGIN_FTR_SECTION
	stdcx.	r0,0,r1		/* to clear the reservation */
FTR_SECTION_ELSE
	ldarx	r4,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

	/*
	 * Some code path such as load_up_fpu or altivec return directly
	 * here. They run entirely hard disabled and do not alter the
	 * interrupt state. They also don't use lwarx/stwcx. and thus
	 * are known not to leave dangling reservations.
	 */
	.globl	fast_exception_return
fast_exception_return:
	ld	r3,_MSR(r1)
735 736 737 738 739 740 741 742 743
	ld	r4,_CTR(r1)
	ld	r0,_LINK(r1)
	mtctr	r4
	mtlr	r0
	ld	r4,_XER(r1)
	mtspr	SPRN_XER,r4

	REST_8GPRS(5, r1)

744 745 746
	andi.	r0,r3,MSR_RI
	beq-	unrecov_restore

747 748 749 750 751
	/*
	 * Clear RI before restoring r13.  If we are returning to
	 * userspace and we take an exception after restoring r13,
	 * we end up corrupting the userspace r13 value.
	 */
752 753
	ld	r4,PACAKMSR(r13) /* Get kernel MSR without EE */
	andc	r4,r4,r0	 /* r0 contains MSR_RI here */
754
	mtmsrd	r4,1
755 756 757

	/*
	 * r13 is our per cpu area, only restore it if we are returning to
758 759
	 * userspace the value stored in the stack frame may belong to
	 * another CPU.
760
	 */
761
	andi.	r0,r3,MSR_PR
762
	beq	1f
763
	ACCOUNT_CPU_USER_EXIT(r2, r4)
764 765
	REST_GPR(13, r1)
1:
766
	mtspr	SPRN_SRR1,r3
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781

	ld	r2,_CCR(r1)
	mtcrf	0xFF,r2
	ld	r2,_NIP(r1)
	mtspr	SPRN_SRR0,r2

	ld	r0,GPR0(r1)
	ld	r2,GPR2(r1)
	ld	r3,GPR3(r1)
	ld	r4,GPR4(r1)
	ld	r1,GPR1(r1)

	rfid
	b	.	/* prevent speculative execution */

782 783
#endif /* CONFIG_PPC_BOOK3E */

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
	/*
	 * We are returning to a context with interrupts soft disabled.
	 *
	 * However, we may also about to hard enable, so we need to
	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
	 * or that bit can get out of sync and bad things will happen
	 */
restore_irq_off:
	ld	r3,_MSR(r1)
	lbz	r7,PACAIRQHAPPENED(r13)
	andi.	r0,r3,MSR_EE
	beq	1f
	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
	stb	r7,PACAIRQHAPPENED(r13)
1:	li	r0,0
	stb	r0,PACASOFTIRQEN(r13);
	TRACE_DISABLE_INTS
	b	do_restore

803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
	/*
	 * Something did happen, check if a re-emit is needed
	 * (this also clears paca->irq_happened)
	 */
restore_check_irq_replay:
	/* XXX: We could implement a fast path here where we check
	 * for irq_happened being just 0x01, in which case we can
	 * clear it and return. That means that we would potentially
	 * miss a decrementer having wrapped all the way around.
	 *
	 * Still, this might be useful for things like hash_page
	 */
	bl	.__check_irq_replay
	cmpwi	cr0,r3,0
 	beq	restore_no_replay
 
	/*
	 * We need to re-emit an interrupt. We do so by re-using our
	 * existing exception frame. We first change the trap value,
	 * but we need to ensure we preserve the low nibble of it
	 */
	ld	r4,_TRAP(r1)
	clrldi	r4,r4,60
	or	r4,r4,r3
	std	r4,_TRAP(r1)

	/*
	 * Then find the right handler and call it. Interrupts are
	 * still soft-disabled and we keep them that way.
	*/
	cmpwi	cr0,r3,0x500
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
 	bl	.do_IRQ
	b	.ret_from_except
1:	cmpwi	cr0,r3,0x900
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
	bl	.timer_interrupt
	b	.ret_from_except
#ifdef CONFIG_PPC_BOOK3E
1:	cmpwi	cr0,r3,0x280
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
	bl	.doorbell_exception
	b	.ret_from_except
#endif /* CONFIG_PPC_BOOK3E */
1:	b	.ret_from_except /* What else to do here ? */
 
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
unrecov_restore:
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	.unrecoverable_exception
	b	unrecov_restore

#ifdef CONFIG_PPC_RTAS
/*
 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
 * called with the MMU off.
 *
 * In addition, we need to be in 32b mode, at least for now.
 * 
 * Note: r3 is an input parameter to rtas, so don't trash it...
 */
_GLOBAL(enter_rtas)
	mflr	r0
	std	r0,16(r1)
        stdu	r1,-RTAS_FRAME_SIZE(r1)	/* Save SP and create stack space. */

	/* Because RTAS is running in 32b mode, it clobbers the high order half
	 * of all registers that it saves.  We therefore save those registers
	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
   	 */
	SAVE_GPR(2, r1)			/* Save the TOC */
	SAVE_GPR(13, r1)		/* Save paca */
	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
	SAVE_10GPRS(22, r1)		/* ditto */

	mfcr	r4
	std	r4,_CCR(r1)
	mfctr	r5
	std	r5,_CTR(r1)
	mfspr	r6,SPRN_XER
	std	r6,_XER(r1)
	mfdar	r7
	std	r7,_DAR(r1)
	mfdsisr	r8
	std	r8,_DSISR(r1)

891 892 893 894 895 896
	/* Temporary workaround to clear CR until RTAS can be modified to
	 * ignore all bits.
	 */
	li	r0,0
	mtcr	r0

897
#ifdef CONFIG_BUG	
898 899 900
	/* There is no way it is acceptable to get here with interrupts enabled,
	 * check it with the asm equivalent of WARN_ON
	 */
901
	lbz	r0,PACASOFTIRQEN(r13)
902
1:	tdnei	r0,0
903 904 905
	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
	
906 907 908 909 910 911
	/* Hard-disable interrupts */
	mfmsr	r6
	rldicl	r7,r6,48,1
	rotldi	r7,r7,16
	mtmsrd	r7,1

912 913 914 915 916 917 918 919
	/* Unfortunately, the stack pointer and the MSR are also clobbered,
	 * so they are saved in the PACA which allows us to restore
	 * our original state after RTAS returns.
         */
	std	r1,PACAR1(r13)
        std	r6,PACASAVEDMSR(r13)

	/* Setup our real return addr */	
920 921
	LOAD_REG_ADDR(r4,.rtas_return_loc)
	clrldi	r4,r4,2			/* convert to realmode address */
922 923 924 925 926 927 928 929
       	mtlr	r4

	li	r0,0
	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
	andc	r0,r6,r0
	
        li      r9,1
        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
930
	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
931 932 933 934
	andc	r6,r0,r9
	sync				/* disable interrupts so SRR0/1 */
	mtmsrd	r0			/* don't get trashed */

935
	LOAD_REG_ADDR(r4, rtas)
936 937 938 939 940 941 942 943 944 945
	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
	
	mtspr	SPRN_SRR0,r5
	mtspr	SPRN_SRR1,r6
	rfid
	b	.	/* prevent speculative execution */

_STATIC(rtas_return_loc)
	/* relocation is off at this point */
946
	GET_PACA(r4)
947
	clrldi	r4,r4,2			/* convert to realmode address */
948

949 950 951 952
	bcl	20,31,$+4
0:	mflr	r3
	ld	r3,(1f-0b)(r3)		/* get &.rtas_restore_regs */

953 954 955 956 957 958 959 960 961 962 963 964 965 966
	mfmsr   r6
	li	r0,MSR_RI
	andc	r6,r6,r0
	sync	
	mtmsrd  r6
        
        ld	r1,PACAR1(r4)           /* Restore our SP */
        ld	r4,PACASAVEDMSR(r4)     /* Restore our MSR */

	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
	rfid
	b	.	/* prevent speculative execution */

967 968 969
	.align	3
1:	.llong	.rtas_restore_regs

970 971 972 973 974 975 976
_STATIC(rtas_restore_regs)
	/* relocation is on at this point */
	REST_GPR(2, r1)			/* Restore the TOC */
	REST_GPR(13, r1)		/* Restore paca */
	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
	REST_10GPRS(22, r1)		/* ditto */

977
	GET_PACA(r13)
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006

	ld	r4,_CCR(r1)
	mtcr	r4
	ld	r5,_CTR(r1)
	mtctr	r5
	ld	r6,_XER(r1)
	mtspr	SPRN_XER,r6
	ld	r7,_DAR(r1)
	mtdar	r7
	ld	r8,_DSISR(r1)
	mtdsisr	r8

        addi	r1,r1,RTAS_FRAME_SIZE	/* Unstack our frame */
	ld	r0,16(r1)		/* get return address */

	mtlr    r0
        blr				/* return to caller */

#endif /* CONFIG_PPC_RTAS */

_GLOBAL(enter_prom)
	mflr	r0
	std	r0,16(r1)
        stdu	r1,-PROM_FRAME_SIZE(r1)	/* Save SP and create stack space */

	/* Because PROM is running in 32b mode, it clobbers the high order half
	 * of all registers that it saves.  We therefore save those registers
	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
   	 */
1007
	SAVE_GPR(2, r1)
1008 1009 1010
	SAVE_GPR(13, r1)
	SAVE_8GPRS(14, r1)
	SAVE_10GPRS(22, r1)
1011
	mfcr	r10
1012
	mfmsr	r11
1013
	std	r10,_CCR(r1)
1014 1015 1016
	std	r11,_MSR(r1)

	/* Get the PROM entrypoint */
1017
	mtlr	r4
1018 1019 1020

	/* Switch MSR to 32 bits mode
	 */
1021 1022 1023 1024
#ifdef CONFIG_PPC_BOOK3E
	rlwinm	r11,r11,0,1,31
	mtmsr	r11
#else /* CONFIG_PPC_BOOK3E */
1025 1026 1027 1028 1029 1030 1031 1032
        mfmsr   r11
        li      r12,1
        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
        andc    r11,r11,r12
        li      r12,1
        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
        andc    r11,r11,r12
        mtmsrd  r11
1033
#endif /* CONFIG_PPC_BOOK3E */
1034 1035
        isync

1036
	/* Enter PROM here... */
1037 1038 1039 1040 1041 1042 1043 1044 1045
	blrl

	/* Just make sure that r1 top 32 bits didn't get
	 * corrupt by OF
	 */
	rldicl	r1,r1,0,32

	/* Restore the MSR (back to 64 bits) */
	ld	r0,_MSR(r1)
1046
	MTMSRD(r0)
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
        isync

	/* Restore other registers */
	REST_GPR(2, r1)
	REST_GPR(13, r1)
	REST_8GPRS(14, r1)
	REST_10GPRS(22, r1)
	ld	r4,_CCR(r1)
	mtcr	r4
	
        addi	r1,r1,PROM_FRAME_SIZE
	ld	r0,16(r1)
	mtlr    r0
        blr
S
Steven Rostedt 已提交
1061

1062
#ifdef CONFIG_FUNCTION_TRACER
S
Steven Rostedt 已提交
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
	blr

_GLOBAL(ftrace_caller)
	/* Taken from output of objdump from lib64/glibc */
	mflr	r3
	ld	r11, 0(r1)
	stdu	r1, -112(r1)
	std	r3, 128(r1)
	ld	r4, 16(r11)
1075
	subi	r3, r3, MCOUNT_INSN_SIZE
S
Steven Rostedt 已提交
1076 1077 1078 1079
.globl ftrace_call
ftrace_call:
	bl	ftrace_stub
	nop
1080 1081 1082 1083 1084 1085
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
	b	ftrace_graph_stub
_GLOBAL(ftrace_graph_stub)
#endif
S
Steven Rostedt 已提交
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	ld	r0, 128(r1)
	mtlr	r0
	addi	r1, r1, 112
_GLOBAL(ftrace_stub)
	blr
#else
_GLOBAL(mcount)
	blr

_GLOBAL(_mcount)
	/* Taken from output of objdump from lib64/glibc */
	mflr	r3
	ld	r11, 0(r1)
	stdu	r1, -112(r1)
	std	r3, 128(r1)
	ld	r4, 16(r11)

1103
	subi	r3, r3, MCOUNT_INSN_SIZE
S
Steven Rostedt 已提交
1104 1105 1106 1107 1108 1109
	LOAD_REG_ADDR(r5,ftrace_trace_function)
	ld	r5,0(r5)
	ld	r5,0(r5)
	mtctr	r5
	bctrl
	nop
1110 1111 1112 1113 1114


#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	b	ftrace_graph_caller
#endif
S
Steven Rostedt 已提交
1115 1116 1117 1118 1119 1120
	ld	r0, 128(r1)
	mtlr	r0
	addi	r1, r1, 112
_GLOBAL(ftrace_stub)
	blr

1121 1122 1123
#endif /* CONFIG_DYNAMIC_FTRACE */

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1124
_GLOBAL(ftrace_graph_caller)
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
	/* load r4 with local address */
	ld	r4, 128(r1)
	subi	r4, r4, MCOUNT_INSN_SIZE

	/* get the parent address */
	ld	r11, 112(r1)
	addi	r3, r11, 16

	bl	.prepare_ftrace_return
	nop

	ld	r0, 128(r1)
	mtlr	r0
	addi	r1, r1, 112
	blr

_GLOBAL(return_to_handler)
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	/* need to save return values */
	std	r4,  -24(r1)
	std	r3,  -16(r1)
	std	r31, -8(r1)
	mr	r31, r1
	stdu	r1, -112(r1)

	bl	.ftrace_return_to_handler
	nop

	/* return value has real return address */
	mtlr	r3

	ld	r1, 0(r1)
	ld	r4,  -24(r1)
	ld	r3,  -16(r1)
	ld	r31, -8(r1)

	/* Jump back to real return address */
	blr

_GLOBAL(mod_return_to_handler)
1164 1165 1166 1167 1168 1169 1170 1171 1172
	/* need to save return values */
	std	r4,  -32(r1)
	std	r3,  -24(r1)
	/* save TOC */
	std	r2,  -16(r1)
	std	r31, -8(r1)
	mr	r31, r1
	stdu	r1, -112(r1)

1173 1174 1175 1176
	/*
	 * We are in a module using the module's TOC.
	 * Switch to our TOC to run inside the core kernel.
	 */
1177
	ld	r2, PACATOC(r13)
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194

	bl	.ftrace_return_to_handler
	nop

	/* return value has real return address */
	mtlr	r3

	ld	r1, 0(r1)
	ld	r4,  -32(r1)
	ld	r3,  -24(r1)
	ld	r2,  -16(r1)
	ld	r31, -8(r1)

	/* Jump back to real return address */
	blr
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_FUNCTION_TRACER */