entry_64.S 26.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 *  PowerPC version 
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
 *  Adapted for Power Macintosh by Paul Mackerras.
 *  Low-level exception handlers and MMU support
 *  rewritten by Paul Mackerras.
 *    Copyright (C) 1996 Paul Mackerras.
 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 *
 *  This file contains the system call entry code, context switch
 *  code, and exception/interrupt return code for PowerPC.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <linux/errno.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
30
#include <asm/firmware.h>
31
#include <asm/bug.h>
32
#include <asm/ptrace.h>
33
#include <asm/irqflags.h>
34
#include <asm/ftrace.h>
35
#include <asm/hw_irq.h>
36 37 38 39 40 41 42 43 44 45

/*
 * System calls.
 */
	.section	".toc","aw"
.SYS_CALL_TABLE:
	.tc .sys_call_table[TC],.sys_call_table

/* This value is used to mark exception frames on the stack. */
exception_marker:
46
	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64

	.section	".text"
	.align 7

#undef SHOW_SYSCALLS

	.globl system_call_common
system_call_common:
	andi.	r10,r12,MSR_PR
	mr	r10,r1
	addi	r1,r1,-INT_FRAME_SIZE
	beq-	1f
	ld	r1,PACAKSAVE(r13)
1:	std	r10,0(r1)
	std	r11,_NIP(r1)
	std	r12,_MSR(r1)
	std	r0,GPR0(r1)
	std	r10,GPR1(r1)
65
	ACCOUNT_CPU_USER_ENTRY(r10, r11)
66 67 68 69 70 71 72
	/*
	 * This "crclr so" clears CR0.SO, which is the error indication on
	 * return from this system call.  There must be no cmp instruction
	 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
	 * CR0.SO will get set, causing all system calls to appear to fail.
	 */
	crclr	so
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
	std	r2,GPR2(r1)
	std	r3,GPR3(r1)
	std	r4,GPR4(r1)
	std	r5,GPR5(r1)
	std	r6,GPR6(r1)
	std	r7,GPR7(r1)
	std	r8,GPR8(r1)
	li	r11,0
	std	r11,GPR9(r1)
	std	r11,GPR10(r1)
	std	r11,GPR11(r1)
	std	r11,GPR12(r1)
	std	r9,GPR13(r1)
	mfcr	r9
	mflr	r10
	li	r11,0xc01
	std	r9,_CCR(r1)
	std	r10,_LINK(r1)
	std	r11,_TRAP(r1)
	mfxer	r9
	mfctr	r10
	std	r9,_XER(r1)
	std	r10,_CTR(r1)
	std	r3,ORIG_GPR3(r1)
	ld	r2,PACATOC(r13)
	addi	r9,r1,STACK_FRAME_OVERHEAD
	ld	r11,exception_marker@toc(r2)
	std	r11,-16(r9)		/* "regshere" marker */
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
BEGIN_FW_FTR_SECTION
	beq	33f
	/* if from user, see if there are any DTL entries to process */
	ld	r10,PACALPPACAPTR(r13)	/* get ptr to VPA */
	ld	r11,PACA_DTL_RIDX(r13)	/* get log read index */
	ld	r10,LPPACA_DTLIDX(r10)	/* get log write index */
	cmpd	cr1,r11,r10
	beq+	cr1,33f
	bl	.accumulate_stolen_time
	REST_GPR(0,r1)
	REST_4GPRS(3,r1)
	REST_2GPRS(7,r1)
	addi	r9,r1,STACK_FRAME_OVERHEAD
33:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */

119 120 121 122 123 124 125 126 127 128 129 130
	/*
	 * A syscall should always be called with interrupts enabled
	 * so we just unconditionally hard-enable here. When some kind
	 * of irq tracing is used, we additionally check that condition
	 * is correct
	 */
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
	lbz	r10,PACASOFTIRQEN(r13)
	xori	r10,r10,1
1:	tdnei	r10,0
	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
131 132 133 134

#ifdef CONFIG_PPC_BOOK3E
	wrteei	1
#else
135
	ld	r11,PACAKMSR(r13)
136 137
	ori	r11,r11,MSR_EE
	mtmsrd	r11,1
138
#endif /* CONFIG_PPC_BOOK3E */
139

140 141 142 143 144 145
	/* We do need to set SOFTE in the stack frame or the return
	 * from interrupt will be painful
	 */
	li	r10,1
	std	r10,SOFTE(r1)

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
#ifdef SHOW_SYSCALLS
	bl	.do_show_syscall
	REST_GPR(0,r1)
	REST_4GPRS(3,r1)
	REST_2GPRS(7,r1)
	addi	r9,r1,STACK_FRAME_OVERHEAD
#endif
	clrrdi	r11,r1,THREAD_SHIFT
	ld	r10,TI_FLAGS(r11)
	andi.	r11,r10,_TIF_SYSCALL_T_OR_A
	bne-	syscall_dotrace
syscall_dotrace_cont:
	cmpldi	0,r0,NR_syscalls
	bge-	syscall_enosys

system_call:			/* label this so stack traces look sane */
/*
 * Need to vector to 32 Bit or default sys_call_table here,
 * based on caller's run-mode / personality.
 */
	ld	r11,.SYS_CALL_TABLE@toc(2)
	andi.	r10,r10,_TIF_32BIT
	beq	15f
	addi	r11,r11,8	/* use 32-bit syscall entries */
	clrldi	r3,r3,32
	clrldi	r4,r4,32
	clrldi	r5,r5,32
	clrldi	r6,r6,32
	clrldi	r7,r7,32
	clrldi	r8,r8,32
15:
	slwi	r0,r0,4
	ldx	r10,r11,r0	/* Fetch system call handler [ptr] */
	mtctr   r10
	bctrl			/* Call handler */

syscall_exit:
183
	std	r3,RESULT(r1)
184 185
#ifdef SHOW_SYSCALLS
	bl	.do_show_syscall_exit
186
	ld	r3,RESULT(r1)
187 188 189 190
#endif
	clrrdi	r12,r1,THREAD_SHIFT

	ld	r8,_MSR(r1)
191 192
#ifdef CONFIG_PPC_BOOK3S
	/* No MSR:RI on BookE */
193 194
	andi.	r10,r8,MSR_RI
	beq-	unrecov_restore
195
#endif
196 197
	/*
	 * Disable interrupts so current_thread_info()->flags can't change,
198 199 200 201 202
	 * and so that we don't get interrupted after loading SRR0/1.
	 */
#ifdef CONFIG_PPC_BOOK3E
	wrteei	0
#else
203
	ld	r10,PACAKMSR(r13)
204
	mtmsrd	r10,1
205 206
#endif /* CONFIG_PPC_BOOK3E */

207
	ld	r9,TI_FLAGS(r12)
208
	li	r11,-_LAST_ERRNO
209
	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
210
	bne-	syscall_exit_work
211 212 213 214
	cmpld	r3,r11
	ld	r5,_CCR(r1)
	bge-	syscall_error
syscall_error_cont:
215
	ld	r7,_NIP(r1)
216
BEGIN_FTR_SECTION
217
	stdcx.	r0,0,r1			/* to clear the reservation */
218
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
219 220
	andi.	r6,r8,MSR_PR
	ld	r4,_LINK(r1)
221 222 223 224 225
	/*
	 * Clear RI before restoring r13.  If we are returning to
	 * userspace and we take an exception after restoring r13,
	 * we end up corrupting the userspace r13 value.
	 */
226 227
#ifdef CONFIG_PPC_BOOK3S
	/* No MSR:RI on BookE */
228 229 230
	li	r12,MSR_RI
	andc	r11,r10,r12
	mtmsrd	r11,1			/* clear MSR.RI */
231 232
#endif /* CONFIG_PPC_BOOK3S */

233 234 235
	beq-	1f
	ACCOUNT_CPU_USER_EXIT(r11, r12)
	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
236 237 238 239 240 241
1:	ld	r2,GPR2(r1)
	ld	r1,GPR1(r1)
	mtlr	r4
	mtcr	r5
	mtspr	SPRN_SRR0,r7
	mtspr	SPRN_SRR1,r8
242
	RFI
243 244
	b	.	/* prevent speculative execution */

245
syscall_error:	
246
	oris	r5,r5,0x1000	/* Set SO bit in CR */
247
	neg	r3,r3
248 249
	std	r5,_CCR(r1)
	b	syscall_error_cont
250
	
251 252 253 254 255
/* Traced system call support */
syscall_dotrace:
	bl	.save_nvgprs
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	.do_syscall_trace_enter
256 257 258 259 260 261
	/*
	 * Restore argument registers possibly just changed.
	 * We use the return value of do_syscall_trace_enter
	 * for the call number to look up in the table (r0).
	 */
	mr	r0,r3
262 263 264 265 266 267 268 269 270 271 272
	ld	r3,GPR3(r1)
	ld	r4,GPR4(r1)
	ld	r5,GPR5(r1)
	ld	r6,GPR6(r1)
	ld	r7,GPR7(r1)
	ld	r8,GPR8(r1)
	addi	r9,r1,STACK_FRAME_OVERHEAD
	clrrdi	r10,r1,THREAD_SHIFT
	ld	r10,TI_FLAGS(r10)
	b	syscall_dotrace_cont

273 274 275 276 277 278 279 280 281
syscall_enosys:
	li	r3,-ENOSYS
	b	syscall_exit
	
syscall_exit_work:
	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
	 If TIF_NOERROR is set, just save r3 as it is. */

	andi.	r0,r9,_TIF_RESTOREALL
282 283 284 285
	beq+	0f
	REST_NVGPRS(r1)
	b	2f
0:	cmpld	r3,r11		/* r10 is -LAST_ERRNO */
286 287 288 289 290 291 292 293 294 295 296
	blt+	1f
	andi.	r0,r9,_TIF_NOERROR
	bne-	1f
	ld	r5,_CCR(r1)
	neg	r3,r3
	oris	r5,r5,0x1000	/* Set SO bit in CR */
	std	r5,_CCR(r1)
1:	std	r3,GPR3(r1)
2:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
	beq	4f

297
	/* Clear per-syscall TIF flags if any are set.  */
298 299 300 301 302 303 304 305

	li	r11,_TIF_PERSYSCALL_MASK
	addi	r12,r12,TI_FLAGS
3:	ldarx	r10,0,r12
	andc	r10,r10,r11
	stdcx.	r10,0,r12
	bne-	3b
	subi	r12,r12,TI_FLAGS
306 307 308

4:	/* Anything else left to do? */
	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
309 310 311
	beq	.ret_from_except_lite

	/* Re-enable interrupts */
312 313 314
#ifdef CONFIG_PPC_BOOK3E
	wrteei	1
#else
315
	ld	r10,PACAKMSR(r13)
316 317
	ori	r10,r10,MSR_EE
	mtmsrd	r10,1
318
#endif /* CONFIG_PPC_BOOK3E */
319

320
	bl	.save_nvgprs
321 322
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	.do_syscall_trace_leave
323
	b	.ret_from_except
324 325 326 327 328 329 330 331 332 333 334

/* Save non-volatile GPRs, if not already saved. */
_GLOBAL(save_nvgprs)
	ld	r11,_TRAP(r1)
	andi.	r0,r11,1
	beqlr-
	SAVE_NVGPRS(r1)
	clrrdi	r0,r11,1
	std	r0,_TRAP(r1)
	blr

335
	
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
/*
 * The sigsuspend and rt_sigsuspend system calls can call do_signal
 * and thus put the process into the stopped state where we might
 * want to examine its user state with ptrace.  Therefore we need
 * to save all the nonvolatile registers (r14 - r31) before calling
 * the C code.  Similarly, fork, vfork and clone need the full
 * register state on the stack so that it can be copied to the child.
 */

_GLOBAL(ppc_fork)
	bl	.save_nvgprs
	bl	.sys_fork
	b	syscall_exit

_GLOBAL(ppc_vfork)
	bl	.save_nvgprs
	bl	.sys_vfork
	b	syscall_exit

_GLOBAL(ppc_clone)
	bl	.save_nvgprs
	bl	.sys_clone
	b	syscall_exit

360 361 362 363 364 365 366 367 368 369
_GLOBAL(ppc32_swapcontext)
	bl	.save_nvgprs
	bl	.compat_sys_swapcontext
	b	syscall_exit

_GLOBAL(ppc64_swapcontext)
	bl	.save_nvgprs
	bl	.sys_swapcontext
	b	syscall_exit

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
_GLOBAL(ret_from_fork)
	bl	.schedule_tail
	REST_NVGPRS(r1)
	li	r3,0
	b	syscall_exit

/*
 * This routine switches between two different tasks.  The process
 * state of one is saved on its kernel stack.  Then the state
 * of the other is restored from its kernel stack.  The memory
 * management hardware is updated to the second process's state.
 * Finally, we can return to the second process, via ret_from_except.
 * On entry, r3 points to the THREAD for the current task, r4
 * points to the THREAD for the new task.
 *
 * Note: there are two ways to get to the "going out" portion
 * of this code; either by coming in via the entry (_switch)
 * or via "fork" which must set up an environment equivalent
 * to the "_switch" path.  If you change this you'll have to change
 * the fork code also.
 *
 * The code which creates the new task context is in 'copy_thread'
392
 * in arch/powerpc/kernel/process.c 
393 394 395 396 397 398 399 400 401 402 403 404
 */
	.align	7
_GLOBAL(_switch)
	mflr	r0
	std	r0,16(r1)
	stdu	r1,-SWITCH_FRAME_SIZE(r1)
	/* r3-r13 are caller saved -- Cort */
	SAVE_8GPRS(14, r1)
	SAVE_10GPRS(22, r1)
	mflr	r20		/* Return to switch caller */
	mfmsr	r22
	li	r0, MSR_FP
405 406 407 408 409
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r0,r0,MSR_VSX@h	/* Disable VSX */
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif /* CONFIG_VSX */
410 411 412 413 414 415 416
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
	mfspr	r24,SPRN_VRSAVE	/* save vrsave register value */
	std	r24,THREAD_VRSAVE(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
417 418 419 420 421 422
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
	mfspr	r25,SPRN_DSCR
	std	r25,THREAD_DSCR(r3)
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
423 424 425
	and.	r0,r0,r22
	beq+	1f
	andc	r22,r22,r0
426
	MTMSRD(r22)
427 428 429 430 431 432 433 434 435 436 437 438 439 440
	isync
1:	std	r20,_NIP(r1)
	mfcr	r23
	std	r23,_CCR(r1)
	std	r1,KSP(r3)	/* Set old stack pointer */

#ifdef CONFIG_SMP
	/* We need a sync somewhere here to make sure that if the
	 * previous task gets rescheduled on another CPU, it sees all
	 * stores it has performed on this one.
	 */
	sync
#endif /* CONFIG_SMP */

441 442 443 444 445 446 447 448 449 450 451
	/*
	 * If we optimise away the clear of the reservation in system
	 * calls because we know the CPU tracks the address of the
	 * reservation, then we need to clear it here to cover the
	 * case that the kernel context switch path has no larx
	 * instructions.
	 */
BEGIN_FTR_SECTION
	ldarx	r6,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)

452 453 454 455
	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
	std	r6,PACACURRENT(r13)	/* Set new 'current' */

	ld	r8,KSP(r4)	/* new stack pointer */
456
#ifdef CONFIG_PPC_BOOK3S
P
Paul Mackerras 已提交
457
BEGIN_FTR_SECTION
458
  BEGIN_FTR_SECTION_NESTED(95)
459 460
	clrrdi	r6,r8,28	/* get its ESID */
	clrrdi	r9,r1,28	/* get current sp ESID */
461
  FTR_SECTION_ELSE_NESTED(95)
P
Paul Mackerras 已提交
462 463
	clrrdi	r6,r8,40	/* get its 1T ESID */
	clrrdi	r9,r1,40	/* get current sp 1T ESID */
464
  ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
465 466
FTR_SECTION_ELSE
	b	2f
467
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
468 469 470 471 472 473 474 475 476
	clrldi.	r0,r6,2		/* is new ESID c00000000? */
	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
	cror	eq,4*cr1+eq,eq
	beq	2f		/* if yes, don't slbie it */

	/* Bolt in the new stack SLB entry */
	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
	oris	r0,r6,(SLB_ESID_V)@h
	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
P
Paul Mackerras 已提交
477 478 479 480
BEGIN_FTR_SECTION
	li	r9,MMU_SEGSIZE_1T	/* insert B field */
	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
481
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
482

483 484 485 486
	/* Update the last bolted SLB.  No write barriers are needed
	 * here, provided we only update the current CPU's SLB shadow
	 * buffer.
	 */
487
	ld	r9,PACA_SLBSHADOWPTR(r13)
488 489 490 491
	li	r12,0
	std	r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
	std	r7,SLBSHADOW_STACKVSID(r9)  /* Save VSID */
	std	r0,SLBSHADOW_STACKESID(r9)  /* Save ESID */
492

493
	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
494 495 496 497 498
	 * we have 1TB segments, the only CPUs known to have the errata
	 * only support less than 1TB of system memory and we'll never
	 * actually hit this code path.
	 */

499 500 501 502 503
	slbie	r6
	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
	slbmte	r7,r0
	isync
2:
504 505
#endif /* !CONFIG_PPC_BOOK3S */

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
	clrrdi	r7,r8,THREAD_SHIFT	/* base of new stack */
	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
	   because we don't need to leave the 288-byte ABI gap at the
	   top of the kernel stack. */
	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE

	mr	r1,r8		/* start using new stack pointer */
	std	r7,PACAKSAVE(r13)

	ld	r6,_CCR(r1)
	mtcrf	0xFF,r6

#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	ld	r0,THREAD_VRSAVE(r4)
	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
524 525 526 527 528 529 530 531 532
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
	ld	r0,THREAD_DSCR(r4)
	cmpd	r0,r25
	beq	1f
	mtspr	SPRN_DSCR,r0
1:	
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557

	/* r3-r13 are destroyed -- Cort */
	REST_8GPRS(14, r1)
	REST_10GPRS(22, r1)

	/* convert old thread to its task_struct for return value */
	addi	r3,r3,-THREAD
	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
	mtlr	r7
	addi	r1,r1,SWITCH_FRAME_SIZE
	blr

	.align	7
_GLOBAL(ret_from_except)
	ld	r11,_TRAP(r1)
	andi.	r0,r11,1
	bne	.ret_from_except_lite
	REST_NVGPRS(r1)

_GLOBAL(ret_from_except_lite)
	/*
	 * Disable interrupts so that current_thread_info()->flags
	 * can't change between when we test it and when we return
	 * from the interrupt.
	 */
558 559 560
#ifdef CONFIG_PPC_BOOK3E
	wrteei	0
#else
561 562
	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */
	mtmsrd	r10,1		  /* Update machine state */
563
#endif /* CONFIG_PPC_BOOK3E */
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584

#ifdef CONFIG_PREEMPT
	clrrdi	r9,r1,THREAD_SHIFT	/* current_thread_info() */
	li	r0,_TIF_NEED_RESCHED	/* bits to check */
	ld	r3,_MSR(r1)
	ld	r4,TI_FLAGS(r9)
	/* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
	rlwimi	r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
	and.	r0,r4,r0	/* check NEED_RESCHED and maybe SIGPENDING */
	bne	do_work

#else /* !CONFIG_PREEMPT */
	ld	r3,_MSR(r1)	/* Returning to user mode? */
	andi.	r3,r3,MSR_PR
	beq	restore		/* if not, just restore regs and return */

	/* Check current_thread_info()->flags */
	clrrdi	r9,r1,THREAD_SHIFT
	ld	r4,TI_FLAGS(r9)
	andi.	r0,r4,_TIF_USER_WORK_MASK
	bne	do_work
585
#endif /* !CONFIG_PREEMPT */
586

587 588
	.globl	fast_exc_return_irq
fast_exc_return_irq:
589
restore:
590 591 592 593
	/*
	 * This is the main kernel exit path, we first check if we
	 * have to change our interrupt state.
	 */
594
	ld	r5,SOFTE(r1)
595 596 597 598 599 600 601 602 603 604 605
	lbz	r6,PACASOFTIRQEN(r13)
	cmpwi	cr1,r5,0
	cmpw	cr0,r5,r6
	beq	cr0,4f

	/* We do, handle disable first, which is easy */
	bne	cr1,3f;
 	li	r0,0
	stb	r0,PACASOFTIRQEN(r13);
	TRACE_DISABLE_INTS
	b	4f
606

607 608 609 610 611 612 613 614
3:	/*
	 * We are about to soft-enable interrupts (we are hard disabled
	 * at this point). We check if there's anything that needs to
	 * be replayed first.
	 */
	lbz	r0,PACAIRQHAPPENED(r13)
	cmpwi	cr0,r0,0
	bne-	restore_check_irq_replay
615

616 617 618 619 620 621 622 623 624 625 626 627 628 629
	/*
	 * Get here when nothing happened while soft-disabled, just
	 * soft-enable and move-on. We will hard-enable as a side
	 * effect of rfi
	 */
restore_no_replay:
	TRACE_ENABLE_INTS
	li	r0,1
	stb	r0,PACASOFTIRQEN(r13);

	/*
	 * Final return path. BookE is handled in a different file
	 */
4:
630 631 632
#ifdef CONFIG_PPC_BOOK3E
	b	.exception_return_book3e
#else
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
	/*
	 * Clear the reservation. If we know the CPU tracks the address of
	 * the reservation then we can potentially save some cycles and use
	 * a larx. On POWER6 and POWER7 this is significantly faster.
	 */
BEGIN_FTR_SECTION
	stdcx.	r0,0,r1		/* to clear the reservation */
FTR_SECTION_ELSE
	ldarx	r4,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

	/*
	 * Some code path such as load_up_fpu or altivec return directly
	 * here. They run entirely hard disabled and do not alter the
	 * interrupt state. They also don't use lwarx/stwcx. and thus
	 * are known not to leave dangling reservations.
	 */
	.globl	fast_exception_return
fast_exception_return:
	ld	r3,_MSR(r1)
653 654 655 656 657 658 659 660 661
	ld	r4,_CTR(r1)
	ld	r0,_LINK(r1)
	mtctr	r4
	mtlr	r0
	ld	r4,_XER(r1)
	mtspr	SPRN_XER,r4

	REST_8GPRS(5, r1)

662 663 664
	andi.	r0,r3,MSR_RI
	beq-	unrecov_restore

665 666 667 668 669
	/*
	 * Clear RI before restoring r13.  If we are returning to
	 * userspace and we take an exception after restoring r13,
	 * we end up corrupting the userspace r13 value.
	 */
670 671
	ld	r4,PACAKMSR(r13) /* Get kernel MSR without EE */
	andc	r4,r4,r0	 /* r0 contains MSR_RI here */
672
	mtmsrd	r4,1
673 674 675

	/*
	 * r13 is our per cpu area, only restore it if we are returning to
676 677
	 * userspace the value stored in the stack frame may belong to
	 * another CPU.
678
	 */
679
	andi.	r0,r3,MSR_PR
680
	beq	1f
681
	ACCOUNT_CPU_USER_EXIT(r2, r4)
682 683
	REST_GPR(13, r1)
1:
684
	mtspr	SPRN_SRR1,r3
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699

	ld	r2,_CCR(r1)
	mtcrf	0xFF,r2
	ld	r2,_NIP(r1)
	mtspr	SPRN_SRR0,r2

	ld	r0,GPR0(r1)
	ld	r2,GPR2(r1)
	ld	r3,GPR3(r1)
	ld	r4,GPR4(r1)
	ld	r1,GPR1(r1)

	rfid
	b	.	/* prevent speculative execution */

700 701
#endif /* CONFIG_PPC_BOOK3E */

702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
	/*
	 * Something did happen, check if a re-emit is needed
	 * (this also clears paca->irq_happened)
	 */
restore_check_irq_replay:
	/* XXX: We could implement a fast path here where we check
	 * for irq_happened being just 0x01, in which case we can
	 * clear it and return. That means that we would potentially
	 * miss a decrementer having wrapped all the way around.
	 *
	 * Still, this might be useful for things like hash_page
	 */
	bl	.__check_irq_replay
	cmpwi	cr0,r3,0
 	beq	restore_no_replay
 
	/*
	 * We need to re-emit an interrupt. We do so by re-using our
	 * existing exception frame. We first change the trap value,
	 * but we need to ensure we preserve the low nibble of it
	 */
	ld	r4,_TRAP(r1)
	clrldi	r4,r4,60
	or	r4,r4,r3
	std	r4,_TRAP(r1)

	/*
	 * Then find the right handler and call it. Interrupts are
	 * still soft-disabled and we keep them that way.
	*/
	cmpwi	cr0,r3,0x500
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
 	bl	.do_IRQ
	b	.ret_from_except
1:	cmpwi	cr0,r3,0x900
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
	bl	.timer_interrupt
	b	.ret_from_except
#ifdef CONFIG_PPC_BOOK3E
1:	cmpwi	cr0,r3,0x280
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
	bl	.doorbell_exception
	b	.ret_from_except
#endif /* CONFIG_PPC_BOOK3E */
1:	b	.ret_from_except /* What else to do here ? */
 
751 752 753 754 755 756 757 758 759 760 761
do_work:
#ifdef CONFIG_PREEMPT
	andi.	r0,r3,MSR_PR	/* Returning to user mode? */
	bne	user_work
	/* Check that preempt_count() == 0 and interrupts are enabled */
	lwz	r8,TI_PREEMPT(r9)
	cmpwi	cr1,r8,0
	ld	r0,SOFTE(r1)
	cmpdi	r0,0
	crandc	eq,cr1*4+eq,eq
	bne	restore
762

763 764 765
	/*
	 * Here we are preempting the current task. We want to make
	 * sure we are soft-disabled first
766
	 */
767
	SOFT_DISABLE_INTS(r3,r4)
768 769 770 771
1:	bl	.preempt_schedule_irq

	/* Re-test flags and eventually loop */
	clrrdi	r9,r1,THREAD_SHIFT
772 773 774 775 776 777
	ld	r4,TI_FLAGS(r9)
	andi.	r0,r4,_TIF_NEED_RESCHED
	bne	1b
	b	restore

user_work:
778 779
#endif /* CONFIG_PREEMPT */

780 781
	andi.	r0,r4,_TIF_NEED_RESCHED
	beq	1f
782
	bl	.restore_interrupts
783 784 785 786
	bl	.schedule
	b	.ret_from_except_lite

1:	bl	.save_nvgprs
787
	bl	.restore_interrupts
788
	addi	r3,r1,STACK_FRAME_OVERHEAD
789
	bl	.do_notify_resume
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
	b	.ret_from_except

unrecov_restore:
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	.unrecoverable_exception
	b	unrecov_restore

#ifdef CONFIG_PPC_RTAS
/*
 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
 * called with the MMU off.
 *
 * In addition, we need to be in 32b mode, at least for now.
 * 
 * Note: r3 is an input parameter to rtas, so don't trash it...
 */
_GLOBAL(enter_rtas)
	mflr	r0
	std	r0,16(r1)
        stdu	r1,-RTAS_FRAME_SIZE(r1)	/* Save SP and create stack space. */

	/* Because RTAS is running in 32b mode, it clobbers the high order half
	 * of all registers that it saves.  We therefore save those registers
	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
   	 */
	SAVE_GPR(2, r1)			/* Save the TOC */
	SAVE_GPR(13, r1)		/* Save paca */
	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
	SAVE_10GPRS(22, r1)		/* ditto */

	mfcr	r4
	std	r4,_CCR(r1)
	mfctr	r5
	std	r5,_CTR(r1)
	mfspr	r6,SPRN_XER
	std	r6,_XER(r1)
	mfdar	r7
	std	r7,_DAR(r1)
	mfdsisr	r8
	std	r8,_DSISR(r1)

831 832 833 834 835 836
	/* Temporary workaround to clear CR until RTAS can be modified to
	 * ignore all bits.
	 */
	li	r0,0
	mtcr	r0

837
#ifdef CONFIG_BUG	
838 839 840
	/* There is no way it is acceptable to get here with interrupts enabled,
	 * check it with the asm equivalent of WARN_ON
	 */
841
	lbz	r0,PACASOFTIRQEN(r13)
842
1:	tdnei	r0,0
843 844 845
	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
	
846 847 848 849 850 851
	/* Hard-disable interrupts */
	mfmsr	r6
	rldicl	r7,r6,48,1
	rotldi	r7,r7,16
	mtmsrd	r7,1

852 853 854 855 856 857 858 859
	/* Unfortunately, the stack pointer and the MSR are also clobbered,
	 * so they are saved in the PACA which allows us to restore
	 * our original state after RTAS returns.
         */
	std	r1,PACAR1(r13)
        std	r6,PACASAVEDMSR(r13)

	/* Setup our real return addr */	
860 861
	LOAD_REG_ADDR(r4,.rtas_return_loc)
	clrldi	r4,r4,2			/* convert to realmode address */
862 863 864 865 866 867 868 869
       	mtlr	r4

	li	r0,0
	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
	andc	r0,r6,r0
	
        li      r9,1
        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
870
	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
871 872 873 874
	andc	r6,r0,r9
	sync				/* disable interrupts so SRR0/1 */
	mtmsrd	r0			/* don't get trashed */

875
	LOAD_REG_ADDR(r4, rtas)
876 877 878 879 880 881 882 883 884 885
	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
	
	mtspr	SPRN_SRR0,r5
	mtspr	SPRN_SRR1,r6
	rfid
	b	.	/* prevent speculative execution */

_STATIC(rtas_return_loc)
	/* relocation is off at this point */
886
	GET_PACA(r4)
887
	clrldi	r4,r4,2			/* convert to realmode address */
888

889 890 891 892
	bcl	20,31,$+4
0:	mflr	r3
	ld	r3,(1f-0b)(r3)		/* get &.rtas_restore_regs */

893 894 895 896 897 898 899 900 901 902 903 904 905 906
	mfmsr   r6
	li	r0,MSR_RI
	andc	r6,r6,r0
	sync	
	mtmsrd  r6
        
        ld	r1,PACAR1(r4)           /* Restore our SP */
        ld	r4,PACASAVEDMSR(r4)     /* Restore our MSR */

	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
	rfid
	b	.	/* prevent speculative execution */

907 908 909
	.align	3
1:	.llong	.rtas_restore_regs

910 911 912 913 914 915 916
_STATIC(rtas_restore_regs)
	/* relocation is on at this point */
	REST_GPR(2, r1)			/* Restore the TOC */
	REST_GPR(13, r1)		/* Restore paca */
	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
	REST_10GPRS(22, r1)		/* ditto */

917
	GET_PACA(r13)
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946

	ld	r4,_CCR(r1)
	mtcr	r4
	ld	r5,_CTR(r1)
	mtctr	r5
	ld	r6,_XER(r1)
	mtspr	SPRN_XER,r6
	ld	r7,_DAR(r1)
	mtdar	r7
	ld	r8,_DSISR(r1)
	mtdsisr	r8

        addi	r1,r1,RTAS_FRAME_SIZE	/* Unstack our frame */
	ld	r0,16(r1)		/* get return address */

	mtlr    r0
        blr				/* return to caller */

#endif /* CONFIG_PPC_RTAS */

_GLOBAL(enter_prom)
	mflr	r0
	std	r0,16(r1)
        stdu	r1,-PROM_FRAME_SIZE(r1)	/* Save SP and create stack space */

	/* Because PROM is running in 32b mode, it clobbers the high order half
	 * of all registers that it saves.  We therefore save those registers
	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
   	 */
947
	SAVE_GPR(2, r1)
948 949 950
	SAVE_GPR(13, r1)
	SAVE_8GPRS(14, r1)
	SAVE_10GPRS(22, r1)
951
	mfcr	r10
952
	mfmsr	r11
953
	std	r10,_CCR(r1)
954 955 956
	std	r11,_MSR(r1)

	/* Get the PROM entrypoint */
957
	mtlr	r4
958 959 960

	/* Switch MSR to 32 bits mode
	 */
961 962 963 964
#ifdef CONFIG_PPC_BOOK3E
	rlwinm	r11,r11,0,1,31
	mtmsr	r11
#else /* CONFIG_PPC_BOOK3E */
965 966 967 968 969 970 971 972
        mfmsr   r11
        li      r12,1
        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
        andc    r11,r11,r12
        li      r12,1
        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
        andc    r11,r11,r12
        mtmsrd  r11
973
#endif /* CONFIG_PPC_BOOK3E */
974 975
        isync

976
	/* Enter PROM here... */
977 978 979 980 981 982 983 984 985
	blrl

	/* Just make sure that r1 top 32 bits didn't get
	 * corrupt by OF
	 */
	rldicl	r1,r1,0,32

	/* Restore the MSR (back to 64 bits) */
	ld	r0,_MSR(r1)
986
	MTMSRD(r0)
987 988 989 990 991 992 993 994 995 996 997 998 999 1000
        isync

	/* Restore other registers */
	REST_GPR(2, r1)
	REST_GPR(13, r1)
	REST_8GPRS(14, r1)
	REST_10GPRS(22, r1)
	ld	r4,_CCR(r1)
	mtcr	r4
	
        addi	r1,r1,PROM_FRAME_SIZE
	ld	r0,16(r1)
	mtlr    r0
        blr
S
Steven Rostedt 已提交
1001

1002
#ifdef CONFIG_FUNCTION_TRACER
S
Steven Rostedt 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
	blr

_GLOBAL(ftrace_caller)
	/* Taken from output of objdump from lib64/glibc */
	mflr	r3
	ld	r11, 0(r1)
	stdu	r1, -112(r1)
	std	r3, 128(r1)
	ld	r4, 16(r11)
1015
	subi	r3, r3, MCOUNT_INSN_SIZE
S
Steven Rostedt 已提交
1016 1017 1018 1019
.globl ftrace_call
ftrace_call:
	bl	ftrace_stub
	nop
1020 1021 1022 1023 1024 1025
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
	b	ftrace_graph_stub
_GLOBAL(ftrace_graph_stub)
#endif
S
Steven Rostedt 已提交
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	ld	r0, 128(r1)
	mtlr	r0
	addi	r1, r1, 112
_GLOBAL(ftrace_stub)
	blr
#else
_GLOBAL(mcount)
	blr

_GLOBAL(_mcount)
	/* Taken from output of objdump from lib64/glibc */
	mflr	r3
	ld	r11, 0(r1)
	stdu	r1, -112(r1)
	std	r3, 128(r1)
	ld	r4, 16(r11)

1043
	subi	r3, r3, MCOUNT_INSN_SIZE
S
Steven Rostedt 已提交
1044 1045 1046 1047 1048 1049
	LOAD_REG_ADDR(r5,ftrace_trace_function)
	ld	r5,0(r5)
	ld	r5,0(r5)
	mtctr	r5
	bctrl
	nop
1050 1051 1052 1053 1054


#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	b	ftrace_graph_caller
#endif
S
Steven Rostedt 已提交
1055 1056 1057 1058 1059 1060
	ld	r0, 128(r1)
	mtlr	r0
	addi	r1, r1, 112
_GLOBAL(ftrace_stub)
	blr

1061 1062 1063
#endif /* CONFIG_DYNAMIC_FTRACE */

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1064
_GLOBAL(ftrace_graph_caller)
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
	/* load r4 with local address */
	ld	r4, 128(r1)
	subi	r4, r4, MCOUNT_INSN_SIZE

	/* get the parent address */
	ld	r11, 112(r1)
	addi	r3, r11, 16

	bl	.prepare_ftrace_return
	nop

	ld	r0, 128(r1)
	mtlr	r0
	addi	r1, r1, 112
	blr

_GLOBAL(return_to_handler)
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	/* need to save return values */
	std	r4,  -24(r1)
	std	r3,  -16(r1)
	std	r31, -8(r1)
	mr	r31, r1
	stdu	r1, -112(r1)

	bl	.ftrace_return_to_handler
	nop

	/* return value has real return address */
	mtlr	r3

	ld	r1, 0(r1)
	ld	r4,  -24(r1)
	ld	r3,  -16(r1)
	ld	r31, -8(r1)

	/* Jump back to real return address */
	blr

_GLOBAL(mod_return_to_handler)
1104 1105 1106 1107 1108 1109 1110 1111 1112
	/* need to save return values */
	std	r4,  -32(r1)
	std	r3,  -24(r1)
	/* save TOC */
	std	r2,  -16(r1)
	std	r31, -8(r1)
	mr	r31, r1
	stdu	r1, -112(r1)

1113 1114 1115 1116
	/*
	 * We are in a module using the module's TOC.
	 * Switch to our TOC to run inside the core kernel.
	 */
1117
	ld	r2, PACATOC(r13)
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134

	bl	.ftrace_return_to_handler
	nop

	/* return value has real return address */
	mtlr	r3

	ld	r1, 0(r1)
	ld	r4,  -32(r1)
	ld	r3,  -24(r1)
	ld	r2,  -16(r1)
	ld	r31, -8(r1)

	/* Jump back to real return address */
	blr
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_FUNCTION_TRACER */