entry_64.S 34.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 *  linux/arch/x86_64/entry.S
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
 */

/*
 * entry.S contains the system-call and fault low-level handling routines.
 *
 * NOTE: This code handles signal-recognition, which happens every time
 * after an interrupt and after each system call.
 * 
 * Normal syscalls and interrupts don't save a full stack frame, this is 
 * only done for syscall tracing, signals or fork/exec et.al.
 * 
 * A note on terminology:	 
 * - top of stack: Architecture defined interrupt frame from SS to RIP 
 * at the top of the kernel process stack.	
 * - partial stack frame: partially saved registers upto R11.
 * - full stack frame: Like partial stack frame, but all register saved. 
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 *
 * Some macro usage:
 * - CFI macros are used to generate dwarf2 unwind information for better
 * backtraces. They don't change any code.
 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
 * There are unfortunately lots of special cases where some registers
 * not touched. The macro is a big mess that should be cleaned up.
 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
 * Gives a full stack frame.
 * - ENTRY/END Define functions in the symbol table.
 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
 * frame that is otherwise undefined after a SYSCALL
 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45
 */

#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/dwarf2.h>
#include <asm/calling.h>
46
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
47 48 49 50
#include <asm/msr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/hw_irq.h>
51
#include <asm/page.h>
52
#include <asm/irqflags.h>
53
#include <asm/paravirt.h>
54
#include <asm/ftrace.h>
L
Linus Torvalds 已提交
55

R
Roland McGrath 已提交
56 57 58 59 60 61
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
#include <linux/elf-em.h>
#define AUDIT_ARCH_X86_64	(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define __AUDIT_ARCH_64BIT 0x80000000
#define __AUDIT_ARCH_LE	   0x40000000

L
Linus Torvalds 已提交
62 63
	.code64

64
#ifdef CONFIG_FUNCTION_TRACER
65 66 67 68 69 70
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
	retq
END(mcount)

ENTRY(ftrace_caller)
71 72
	cmpl $0, function_trace_stop
	jne  ftrace_stub
73 74 75 76 77 78 79 80 81 82 83 84 85

	/* taken from glibc */
	subq $0x38, %rsp
	movq %rax, (%rsp)
	movq %rcx, 8(%rsp)
	movq %rdx, 16(%rsp)
	movq %rsi, 24(%rsp)
	movq %rdi, 32(%rsp)
	movq %r8, 40(%rsp)
	movq %r9, 48(%rsp)

	movq 0x38(%rsp), %rdi
	movq 8(%rbp), %rsi
86
	subq $MCOUNT_INSN_SIZE, %rdi
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106

.globl ftrace_call
ftrace_call:
	call ftrace_stub

	movq 48(%rsp), %r9
	movq 40(%rsp), %r8
	movq 32(%rsp), %rdi
	movq 24(%rsp), %rsi
	movq 16(%rsp), %rdx
	movq 8(%rsp), %rcx
	movq (%rsp), %rax
	addq $0x38, %rsp

.globl ftrace_stub
ftrace_stub:
	retq
END(ftrace_caller)

#else /* ! CONFIG_DYNAMIC_FTRACE */
107
ENTRY(mcount)
108 109 110
	cmpl $0, function_trace_stop
	jne  ftrace_stub

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	cmpq $ftrace_stub, ftrace_trace_function
	jnz trace
.globl ftrace_stub
ftrace_stub:
	retq

trace:
	/* taken from glibc */
	subq $0x38, %rsp
	movq %rax, (%rsp)
	movq %rcx, 8(%rsp)
	movq %rdx, 16(%rsp)
	movq %rsi, 24(%rsp)
	movq %rdi, 32(%rsp)
	movq %r8, 40(%rsp)
	movq %r9, 48(%rsp)

	movq 0x38(%rsp), %rdi
	movq 8(%rbp), %rsi
130
	subq $MCOUNT_INSN_SIZE, %rdi
131 132 133 134 135 136 137 138 139 140 141 142 143 144

	call   *ftrace_trace_function

	movq 48(%rsp), %r9
	movq 40(%rsp), %r8
	movq 32(%rsp), %rdi
	movq 24(%rsp), %rsi
	movq 16(%rsp), %rdx
	movq 8(%rsp), %rcx
	movq (%rsp), %rax
	addq $0x38, %rsp

	jmp ftrace_stub
END(mcount)
145
#endif /* CONFIG_DYNAMIC_FTRACE */
146
#endif /* CONFIG_FUNCTION_TRACER */
147

148
#ifndef CONFIG_PREEMPT
L
Linus Torvalds 已提交
149 150
#define retint_kernel retint_restore_args
#endif	
151

152
#ifdef CONFIG_PARAVIRT
153
ENTRY(native_usergs_sysret64)
154 155 156 157
	swapgs
	sysretq
#endif /* CONFIG_PARAVIRT */

158 159 160 161 162 163 164 165 166 167

.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
	bt   $9,EFLAGS-\offset(%rsp)	/* interrupts off? */
	jnc  1f
	TRACE_IRQS_ON
1:
#endif
.endm

L
Linus Torvalds 已提交
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
/*
 * C code is not supposed to know about undefined top of stack. Every time 
 * a C function with an pt_regs argument is called from the SYSCALL based 
 * fast path FIXUP_TOP_OF_STACK is needed.
 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
 * manipulation.
 */        	
		
	/* %rsp:at FRAMEEND */ 
	.macro FIXUP_TOP_OF_STACK tmp
	movq	%gs:pda_oldrsp,\tmp
	movq  	\tmp,RSP(%rsp)
	movq    $__USER_DS,SS(%rsp)
	movq    $__USER_CS,CS(%rsp)
	movq 	$-1,RCX(%rsp)
	movq	R11(%rsp),\tmp  /* get eflags */
	movq	\tmp,EFLAGS(%rsp)
	.endm

	.macro RESTORE_TOP_OF_STACK tmp,offset=0
	movq   RSP-\offset(%rsp),\tmp
	movq   \tmp,%gs:pda_oldrsp
	movq   EFLAGS-\offset(%rsp),\tmp
	movq   \tmp,R11-\offset(%rsp)
	.endm

	.macro FAKE_STACK_FRAME child_rip
	/* push in order ss, rsp, eflags, cs, rip */
196
	xorl %eax, %eax
197
	pushq $__KERNEL_DS /* ss */
L
Linus Torvalds 已提交
198
	CFI_ADJUST_CFA_OFFSET	8
199
	/*CFI_REL_OFFSET	ss,0*/
L
Linus Torvalds 已提交
200 201
	pushq %rax /* rsp */
	CFI_ADJUST_CFA_OFFSET	8
202
	CFI_REL_OFFSET	rsp,0
L
Linus Torvalds 已提交
203 204
	pushq $(1<<9) /* eflags - interrupts on */
	CFI_ADJUST_CFA_OFFSET	8
205
	/*CFI_REL_OFFSET	rflags,0*/
L
Linus Torvalds 已提交
206 207
	pushq $__KERNEL_CS /* cs */
	CFI_ADJUST_CFA_OFFSET	8
208
	/*CFI_REL_OFFSET	cs,0*/
L
Linus Torvalds 已提交
209 210
	pushq \child_rip /* rip */
	CFI_ADJUST_CFA_OFFSET	8
211
	CFI_REL_OFFSET	rip,0
L
Linus Torvalds 已提交
212 213 214 215 216 217 218 219 220
	pushq	%rax /* orig rax */
	CFI_ADJUST_CFA_OFFSET	8
	.endm

	.macro UNFAKE_STACK_FRAME
	addq $8*6, %rsp
	CFI_ADJUST_CFA_OFFSET	-(6*8)
	.endm

221 222 223
	.macro	CFI_DEFAULT_STACK start=1
	.if \start
	CFI_STARTPROC	simple
224
	CFI_SIGNAL_FRAME
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
	CFI_DEF_CFA	rsp,SS+8
	.else
	CFI_DEF_CFA_OFFSET SS+8
	.endif
	CFI_REL_OFFSET	r15,R15
	CFI_REL_OFFSET	r14,R14
	CFI_REL_OFFSET	r13,R13
	CFI_REL_OFFSET	r12,R12
	CFI_REL_OFFSET	rbp,RBP
	CFI_REL_OFFSET	rbx,RBX
	CFI_REL_OFFSET	r11,R11
	CFI_REL_OFFSET	r10,R10
	CFI_REL_OFFSET	r9,R9
	CFI_REL_OFFSET	r8,R8
	CFI_REL_OFFSET	rax,RAX
	CFI_REL_OFFSET	rcx,RCX
	CFI_REL_OFFSET	rdx,RDX
	CFI_REL_OFFSET	rsi,RSI
	CFI_REL_OFFSET	rdi,RDI
	CFI_REL_OFFSET	rip,RIP
	/*CFI_REL_OFFSET	cs,CS*/
	/*CFI_REL_OFFSET	rflags,EFLAGS*/
	CFI_REL_OFFSET	rsp,RSP
	/*CFI_REL_OFFSET	ss,SS*/
L
Linus Torvalds 已提交
249 250 251 252 253 254 255
	.endm
/*
 * A newly forked process directly context switches into this.
 */ 	
/* rdi:	prev */	
ENTRY(ret_from_fork)
	CFI_DEFAULT_STACK
256
	push kernel_eflags(%rip)
257
	CFI_ADJUST_CFA_OFFSET 8
258
	popf				# reset kernel eflags
259
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
260 261
	call schedule_tail
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
262
	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
L
Linus Torvalds 已提交
263 264 265 266 267
	jnz rff_trace
rff_action:	
	RESTORE_REST
	testl $3,CS-ARGOFFSET(%rsp)	# from kernel_thread?
	je   int_ret_from_sys_call
G
Glauber Costa 已提交
268
	testl $_TIF_IA32,TI_flags(%rcx)
L
Linus Torvalds 已提交
269 270 271 272 273 274 275 276 277
	jnz  int_ret_from_sys_call
	RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
	jmp ret_from_sys_call
rff_trace:
	movq %rsp,%rdi
	call syscall_trace_leave
	GET_THREAD_INFO(%rcx)	
	jmp rff_action
	CFI_ENDPROC
278
END(ret_from_fork)
L
Linus Torvalds 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304

/*
 * System call entry. Upto 6 arguments in registers are supported.
 *
 * SYSCALL does not save anything on the stack and does not change the
 * stack pointer.
 */
		
/*
 * Register setup:	
 * rax  system call number
 * rdi  arg0
 * rcx  return address for syscall/sysret, C arg3 
 * rsi  arg1
 * rdx  arg2	
 * r10  arg3 	(--> moved to rcx for C)
 * r8   arg4
 * r9   arg5
 * r11  eflags for syscall/sysret, temporary for C
 * r12-r15,rbp,rbx saved by C code, not touched. 		
 * 
 * Interrupts are off on entry.
 * Only called from user space.
 *
 * XXX	if we had a free scratch register we could save the RSP into the stack frame
 *      and report it properly in ps. Unfortunately we haven't.
305 306 307 308
 *
 * When user can change the frames always force IRET. That is because
 * it deals with uncanonical addresses better. SYSRET has trouble
 * with them due to bugs in both AMD and Intel CPUs.
L
Linus Torvalds 已提交
309 310 311
 */ 			 		

ENTRY(system_call)
312
	CFI_STARTPROC	simple
313
	CFI_SIGNAL_FRAME
314
	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
315 316
	CFI_REGISTER	rip,rcx
	/*CFI_REGISTER	rflags,r11*/
317 318 319 320 321 322 323 324
	SWAPGS_UNSAFE_STACK
	/*
	 * A hypervisor implementation might want to use a label
	 * after the swapgs, so that it can do the swapgs
	 * for the guest and jump here on syscall.
	 */
ENTRY(system_call_after_swapgs)

L
Linus Torvalds 已提交
325 326
	movq	%rsp,%gs:pda_oldrsp 
	movq	%gs:pda_kernelstack,%rsp
327 328 329 330
	/*
	 * No need to follow this irqs off/on section - it's straight
	 * and short:
	 */
331
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
332 333
	SAVE_ARGS 8,1
	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
334 335
	movq  %rcx,RIP-ARGOFFSET(%rsp)
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
L
Linus Torvalds 已提交
336
	GET_THREAD_INFO(%rcx)
337
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
L
Linus Torvalds 已提交
338
	jnz tracesys
R
Roland McGrath 已提交
339
system_call_fastpath:
L
Linus Torvalds 已提交
340 341 342 343 344 345 346 347 348 349
	cmpq $__NR_syscall_max,%rax
	ja badsys
	movq %r10,%rcx
	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
	movq %rax,RAX-ARGOFFSET(%rsp)
/*
 * Syscall return path ending with SYSRET (fast path)
 * Has incomplete stack frame and undefined top of stack. 
 */		
ret_from_sys_call:
350
	movl $_TIF_ALLWORK_MASK,%edi
L
Linus Torvalds 已提交
351 352
	/* edi:	flagmask */
sysret_check:		
353
	LOCKDEP_SYS_EXIT
L
Linus Torvalds 已提交
354
	GET_THREAD_INFO(%rcx)
355
	DISABLE_INTERRUPTS(CLBR_NONE)
356
	TRACE_IRQS_OFF
G
Glauber Costa 已提交
357
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
358 359
	andl %edi,%edx
	jnz  sysret_careful 
360
	CFI_REMEMBER_STATE
361 362 363 364
	/*
	 * sysretq will re-enable interrupts:
	 */
	TRACE_IRQS_ON
L
Linus Torvalds 已提交
365
	movq RIP-ARGOFFSET(%rsp),%rcx
366
	CFI_REGISTER	rip,rcx
L
Linus Torvalds 已提交
367
	RESTORE_ARGS 0,-ARG_SKIP,1
368
	/*CFI_REGISTER	rflags,r11*/
369
	movq	%gs:pda_oldrsp, %rsp
370
	USERGS_SYSRET64
L
Linus Torvalds 已提交
371

372
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
373 374 375 376 377
	/* Handle reschedules */
	/* edx:	work, edi: workmask */	
sysret_careful:
	bt $TIF_NEED_RESCHED,%edx
	jnc sysret_signal
378
	TRACE_IRQS_ON
379
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
380
	pushq %rdi
381
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
382 383
	call schedule
	popq  %rdi
384
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
385 386 387 388
	jmp sysret_check

	/* Handle a signal */ 
sysret_signal:
389
	TRACE_IRQS_ON
390
	ENABLE_INTERRUPTS(CLBR_NONE)
R
Roland McGrath 已提交
391 392 393 394
#ifdef CONFIG_AUDITSYSCALL
	bt $TIF_SYSCALL_AUDIT,%edx
	jc sysret_audit
#endif
395
	/* edx:	work flags (arg3) */
L
Linus Torvalds 已提交
396 397 398 399
	leaq do_notify_resume(%rip),%rax
	leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
	xorl %esi,%esi # oldset -> arg2
	call ptregscall_common
400
	movl $_TIF_WORK_MASK,%edi
401 402
	/* Use IRET because user could have changed frame. This
	   works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
403
	DISABLE_INTERRUPTS(CLBR_NONE)
404
	TRACE_IRQS_OFF
405
	jmp int_with_check
L
Linus Torvalds 已提交
406
	
407 408 409 410
badsys:
	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
	jmp ret_from_sys_call

R
Roland McGrath 已提交
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
#ifdef CONFIG_AUDITSYSCALL
	/*
	 * Fast path for syscall audit without full syscall trace.
	 * We just call audit_syscall_entry() directly, and then
	 * jump back to the normal fast path.
	 */
auditsys:
	movq %r10,%r9			/* 6th arg: 4th syscall arg */
	movq %rdx,%r8			/* 5th arg: 3rd syscall arg */
	movq %rsi,%rcx			/* 4th arg: 2nd syscall arg */
	movq %rdi,%rdx			/* 3rd arg: 1st syscall arg */
	movq %rax,%rsi			/* 2nd arg: syscall number */
	movl $AUDIT_ARCH_X86_64,%edi	/* 1st arg: audit arch */
	call audit_syscall_entry
	LOAD_ARGS 0		/* reload call-clobbered registers */
	jmp system_call_fastpath

	/*
	 * Return fast path for syscall audit.  Call audit_syscall_exit()
	 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
	 * masked off.
	 */
sysret_audit:
	movq %rax,%rsi		/* second arg, syscall return value */
	cmpq $0,%rax		/* is it < 0? */
	setl %al		/* 1 if so, 0 if not */
	movzbl %al,%edi		/* zero-extend that into %edi */
	inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
	call audit_syscall_exit
	movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
	jmp sysret_check
#endif	/* CONFIG_AUDITSYSCALL */

L
Linus Torvalds 已提交
444 445
	/* Do syscall tracing */
tracesys:			 
R
Roland McGrath 已提交
446 447 448 449
#ifdef CONFIG_AUDITSYSCALL
	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
	jz auditsys
#endif
L
Linus Torvalds 已提交
450
	SAVE_REST
R
Roland McGrath 已提交
451
	movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
L
Linus Torvalds 已提交
452 453 454
	FIXUP_TOP_OF_STACK %rdi
	movq %rsp,%rdi
	call syscall_trace_enter
455 456 457 458 459 460
	/*
	 * Reload arg registers from stack in case ptrace changed them.
	 * We don't reload %rax because syscall_trace_enter() returned
	 * the value it wants us to use in the table lookup.
	 */
	LOAD_ARGS ARGOFFSET, 1
L
Linus Torvalds 已提交
461 462
	RESTORE_REST
	cmpq $__NR_syscall_max,%rax
R
Roland McGrath 已提交
463
	ja   int_ret_from_sys_call	/* RAX(%rsp) set to -ENOSYS above */
L
Linus Torvalds 已提交
464 465
	movq %r10,%rcx	/* fixup for C */
	call *sys_call_table(,%rax,8)
R
Roland McGrath 已提交
466
	movq %rax,RAX-ARGOFFSET(%rsp)
467
	/* Use IRET because user could have changed frame */
L
Linus Torvalds 已提交
468 469 470 471
		
/* 
 * Syscall return path ending with IRET.
 * Has correct top of stack, but partial stack frame.
472 473
 */
	.globl int_ret_from_sys_call
474
	.globl int_with_check
475
int_ret_from_sys_call:
476
	DISABLE_INTERRUPTS(CLBR_NONE)
477
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
478 479 480 481 482
	testl $3,CS-ARGOFFSET(%rsp)
	je retint_restore_args
	movl $_TIF_ALLWORK_MASK,%edi
	/* edi:	mask to check */
int_with_check:
483
	LOCKDEP_SYS_EXIT_IRQ
L
Linus Torvalds 已提交
484
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
485
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
486 487
	andl %edi,%edx
	jnz   int_careful
G
Glauber Costa 已提交
488
	andl    $~TS_COMPAT,TI_status(%rcx)
L
Linus Torvalds 已提交
489 490 491 492 493 494 495 496
	jmp   retint_swapgs

	/* Either reschedule or signal or syscall exit tracking needed. */
	/* First do a reschedule test. */
	/* edx:	work, edi: workmask */
int_careful:
	bt $TIF_NEED_RESCHED,%edx
	jnc  int_very_careful
497
	TRACE_IRQS_ON
498
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
499
	pushq %rdi
500
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
501 502
	call schedule
	popq %rdi
503
	CFI_ADJUST_CFA_OFFSET -8
504
	DISABLE_INTERRUPTS(CLBR_NONE)
505
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
506 507 508 509
	jmp int_with_check

	/* handle signals and tracing -- both require a full stack frame */
int_very_careful:
510
	TRACE_IRQS_ON
511
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
512 513
	SAVE_REST
	/* Check for syscall exit trace */	
514
	testl $_TIF_WORK_SYSCALL_EXIT,%edx
L
Linus Torvalds 已提交
515 516
	jz int_signal
	pushq %rdi
517
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
518 519 520
	leaq 8(%rsp),%rdi	# &ptregs -> arg1	
	call syscall_trace_leave
	popq %rdi
521
	CFI_ADJUST_CFA_OFFSET -8
522
	andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
L
Linus Torvalds 已提交
523 524 525
	jmp int_restore_rest
	
int_signal:
P
Peter Zijlstra 已提交
526
	testl $_TIF_DO_NOTIFY_MASK,%edx
L
Linus Torvalds 已提交
527 528 529 530
	jz 1f
	movq %rsp,%rdi		# &ptregs -> arg1
	xorl %esi,%esi		# oldset -> arg2
	call do_notify_resume
R
Roland McGrath 已提交
531
1:	movl $_TIF_WORK_MASK,%edi
L
Linus Torvalds 已提交
532 533
int_restore_rest:
	RESTORE_REST
534
	DISABLE_INTERRUPTS(CLBR_NONE)
535
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
536 537
	jmp int_with_check
	CFI_ENDPROC
538
END(system_call)
L
Linus Torvalds 已提交
539 540 541 542 543 544 545 546 547 548 549
		
/* 
 * Certain special system calls that need to save a complete full stack frame.
 */ 								
	
	.macro PTREGSCALL label,func,arg
	.globl \label
\label:
	leaq	\func(%rip),%rax
	leaq    -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
	jmp	ptregscall_common
550
END(\label)
L
Linus Torvalds 已提交
551 552
	.endm

553 554
	CFI_STARTPROC

L
Linus Torvalds 已提交
555 556 557 558 559 560 561 562
	PTREGSCALL stub_clone, sys_clone, %r8
	PTREGSCALL stub_fork, sys_fork, %rdi
	PTREGSCALL stub_vfork, sys_vfork, %rdi
	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
	PTREGSCALL stub_iopl, sys_iopl, %rsi

ENTRY(ptregscall_common)
	popq %r11
563 564
	CFI_ADJUST_CFA_OFFSET -8
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
565 566
	SAVE_REST
	movq %r11, %r15
567
	CFI_REGISTER rip, r15
L
Linus Torvalds 已提交
568 569 570 571
	FIXUP_TOP_OF_STACK %r11
	call *%rax
	RESTORE_TOP_OF_STACK %r11
	movq %r15, %r11
572
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
573 574
	RESTORE_REST
	pushq %r11
575 576
	CFI_ADJUST_CFA_OFFSET 8
	CFI_REL_OFFSET rip, 0
L
Linus Torvalds 已提交
577 578
	ret
	CFI_ENDPROC
579
END(ptregscall_common)
L
Linus Torvalds 已提交
580 581 582 583
	
ENTRY(stub_execve)
	CFI_STARTPROC
	popq %r11
584 585
	CFI_ADJUST_CFA_OFFSET -8
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
586 587
	SAVE_REST
	FIXUP_TOP_OF_STACK %r11
588
	movq %rsp, %rcx
L
Linus Torvalds 已提交
589 590 591 592 593 594
	call sys_execve
	RESTORE_TOP_OF_STACK %r11
	movq %rax,RAX(%rsp)
	RESTORE_REST
	jmp int_ret_from_sys_call
	CFI_ENDPROC
595
END(stub_execve)
L
Linus Torvalds 已提交
596 597 598 599 600 601 602
	
/*
 * sigreturn is special because it needs to restore all registers on return.
 * This cannot be done with SYSRET, so use the IRET return path instead.
 */                
ENTRY(stub_rt_sigreturn)
	CFI_STARTPROC
603 604
	addq $8, %rsp
	CFI_ADJUST_CFA_OFFSET	-8
L
Linus Torvalds 已提交
605 606 607 608 609 610 611 612
	SAVE_REST
	movq %rsp,%rdi
	FIXUP_TOP_OF_STACK %r11
	call sys_rt_sigreturn
	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
	RESTORE_REST
	jmp int_ret_from_sys_call
	CFI_ENDPROC
613
END(stub_rt_sigreturn)
L
Linus Torvalds 已提交
614

615 616 617 618 619
/*
 * initial frame state for interrupts and exceptions
 */
	.macro _frame ref
	CFI_STARTPROC simple
620
	CFI_SIGNAL_FRAME
621 622 623 624 625 626 627 628 629 630 631 632 633 634
	CFI_DEF_CFA rsp,SS+8-\ref
	/*CFI_REL_OFFSET ss,SS-\ref*/
	CFI_REL_OFFSET rsp,RSP-\ref
	/*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
	/*CFI_REL_OFFSET cs,CS-\ref*/
	CFI_REL_OFFSET rip,RIP-\ref
	.endm

/* initial frame state for interrupts (and exceptions without error code) */
#define INTR_FRAME _frame RIP
/* initial frame state for exceptions with error code (and interrupts with
   vector already pushed) */
#define XCPT_FRAME _frame ORIG_RAX

L
Linus Torvalds 已提交
635 636 637 638 639 640 641 642 643 644 645 646 647
/* 
 * Interrupt entry/exit.
 *
 * Interrupt entry points save only callee clobbered registers in fast path.
 *	
 * Entry runs with interrupts off.	
 */ 

/* 0(%rsp): interrupt number */ 
	.macro interrupt func
	cld
	SAVE_ARGS
	leaq -ARGOFFSET(%rsp),%rdi	# arg1 for handler
648
	pushq %rbp
649 650 651 652 653 654 655
	/*
	 * Save rbp twice: One is for marking the stack frame, as usual, and the
	 * other, to fill pt_regs properly. This is because bx comes right
	 * before the last saved register in that structure, and not bp. If the
	 * base pointer were in the place bx is today, this would not be needed.
	 */
	movq %rbp, -8(%rsp)
656 657 658 659
	CFI_ADJUST_CFA_OFFSET	8
	CFI_REL_OFFSET		rbp, 0
	movq %rsp,%rbp
	CFI_DEF_CFA_REGISTER	rbp
L
Linus Torvalds 已提交
660 661
	testl $3,CS(%rdi)
	je 1f
662
	SWAPGS
663 664 665 666 667 668
	/* irqcount is used to check if a CPU is already on an interrupt
	   stack or not. While this is essentially redundant with preempt_count
	   it is a little cheaper to use a separate counter in the PDA
	   (short of moving irq_enter into assembly, which would be too
	    much work) */
1:	incl	%gs:pda_irqcount
669
	cmoveq %gs:pda_irqstackptr,%rsp
670
	push    %rbp			# backlink for old unwinder
671 672 673 674
	/*
	 * We entered an interrupt context - irqs are off:
	 */
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
675 676 677 678
	call \func
	.endm

ENTRY(common_interrupt)
679
	XCPT_FRAME
L
Linus Torvalds 已提交
680 681
	interrupt do_IRQ
	/* 0(%rsp): oldrsp-ARGOFFSET */
682
ret_from_intr:
683
	DISABLE_INTERRUPTS(CLBR_NONE)
684
	TRACE_IRQS_OFF
685
	decl %gs:pda_irqcount
686
	leaveq
687
	CFI_DEF_CFA_REGISTER	rsp
688
	CFI_ADJUST_CFA_OFFSET	-8
689
exit_intr:
L
Linus Torvalds 已提交
690 691 692 693 694 695 696 697 698 699 700
	GET_THREAD_INFO(%rcx)
	testl $3,CS-ARGOFFSET(%rsp)
	je retint_kernel
	
	/* Interrupt came from user space */
	/*
	 * Has a correct top of stack, but a partial stack frame
	 * %rcx: thread info. Interrupts off.
	 */		
retint_with_reschedule:
	movl $_TIF_WORK_MASK,%edi
701
retint_check:
702
	LOCKDEP_SYS_EXIT_IRQ
G
Glauber Costa 已提交
703
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
704
	andl %edi,%edx
705
	CFI_REMEMBER_STATE
L
Linus Torvalds 已提交
706
	jnz  retint_careful
707 708

retint_swapgs:		/* return to user-space */
709 710 711
	/*
	 * The iretq could re-enable interrupts:
	 */
712
	DISABLE_INTERRUPTS(CLBR_ANY)
713
	TRACE_IRQS_IRETQ
714
	SWAPGS
715 716
	jmp restore_args

717
retint_restore_args:	/* return to kernel space */
718
	DISABLE_INTERRUPTS(CLBR_ANY)
719 720 721 722 723
	/*
	 * The iretq could re-enable interrupts:
	 */
	TRACE_IRQS_IRETQ
restore_args:
I
Ingo Molnar 已提交
724 725
	RESTORE_ARGS 0,8,0

A
Adrian Bunk 已提交
726
irq_return:
727
	INTERRUPT_RETURN
I
Ingo Molnar 已提交
728 729 730 731 732 733

	.section __ex_table, "a"
	.quad irq_return, bad_iret
	.previous

#ifdef CONFIG_PARAVIRT
734
ENTRY(native_iret)
L
Linus Torvalds 已提交
735 736 737
	iretq

	.section __ex_table,"a"
738
	.quad native_iret, bad_iret
L
Linus Torvalds 已提交
739
	.previous
I
Ingo Molnar 已提交
740 741
#endif

L
Linus Torvalds 已提交
742 743
	.section .fixup,"ax"
bad_iret:
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
	/*
	 * The iret traps when the %cs or %ss being restored is bogus.
	 * We've lost the original trap vector and error code.
	 * #GPF is the most likely one to get for an invalid selector.
	 * So pretend we completed the iret and took the #GPF in user mode.
	 *
	 * We are now running with the kernel GS after exception recovery.
	 * But error_entry expects us to have user GS to match the user %cs,
	 * so swap back.
	 */
	pushq $0

	SWAPGS
	jmp general_protection

759 760
	.previous

761
	/* edi: workmask, edx: work */
L
Linus Torvalds 已提交
762
retint_careful:
763
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
764 765
	bt    $TIF_NEED_RESCHED,%edx
	jnc   retint_signal
766
	TRACE_IRQS_ON
767
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
768
	pushq %rdi
769
	CFI_ADJUST_CFA_OFFSET	8
L
Linus Torvalds 已提交
770 771
	call  schedule
	popq %rdi		
772
	CFI_ADJUST_CFA_OFFSET	-8
L
Linus Torvalds 已提交
773
	GET_THREAD_INFO(%rcx)
774
	DISABLE_INTERRUPTS(CLBR_NONE)
775
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
776 777 778
	jmp retint_check
	
retint_signal:
P
Peter Zijlstra 已提交
779
	testl $_TIF_DO_NOTIFY_MASK,%edx
780
	jz    retint_swapgs
781
	TRACE_IRQS_ON
782
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
783 784
	SAVE_REST
	movq $-1,ORIG_RAX(%rsp) 			
785
	xorl %esi,%esi		# oldset
L
Linus Torvalds 已提交
786 787 788
	movq %rsp,%rdi		# &pt_regs
	call do_notify_resume
	RESTORE_REST
789
	DISABLE_INTERRUPTS(CLBR_NONE)
790
	TRACE_IRQS_OFF
791
	GET_THREAD_INFO(%rcx)
R
Roland McGrath 已提交
792
	jmp retint_with_reschedule
L
Linus Torvalds 已提交
793 794 795 796

#ifdef CONFIG_PREEMPT
	/* Returning to kernel space. Check if we need preemption */
	/* rcx:	 threadinfo. interrupts off. */
797
ENTRY(retint_kernel)
G
Glauber Costa 已提交
798
	cmpl $0,TI_preempt_count(%rcx)
L
Linus Torvalds 已提交
799
	jnz  retint_restore_args
G
Glauber Costa 已提交
800
	bt  $TIF_NEED_RESCHED,TI_flags(%rcx)
L
Linus Torvalds 已提交
801 802 803 804 805 806
	jnc  retint_restore_args
	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
	jnc  retint_restore_args
	call preempt_schedule_irq
	jmp exit_intr
#endif	
807

L
Linus Torvalds 已提交
808
	CFI_ENDPROC
809
END(common_interrupt)
L
Linus Torvalds 已提交
810 811 812 813 814
	
/*
 * APIC interrupts.
 */		
	.macro apicinterrupt num,func
815
	INTR_FRAME
816
	pushq $~(\num)
817
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
818 819 820 821 822 823 824
	interrupt \func
	jmp ret_from_intr
	CFI_ENDPROC
	.endm

ENTRY(thermal_interrupt)
	apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
825
END(thermal_interrupt)
L
Linus Torvalds 已提交
826

827 828
ENTRY(threshold_interrupt)
	apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
829
END(threshold_interrupt)
830

L
Linus Torvalds 已提交
831 832 833
#ifdef CONFIG_SMP	
ENTRY(reschedule_interrupt)
	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
834
END(reschedule_interrupt)
L
Linus Torvalds 已提交
835

836 837 838
	.macro INVALIDATE_ENTRY num
ENTRY(invalidate_interrupt\num)
	apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt	
839
END(invalidate_interrupt\num)
840 841 842 843 844 845 846 847 848 849
	.endm

	INVALIDATE_ENTRY 0
	INVALIDATE_ENTRY 1
	INVALIDATE_ENTRY 2
	INVALIDATE_ENTRY 3
	INVALIDATE_ENTRY 4
	INVALIDATE_ENTRY 5
	INVALIDATE_ENTRY 6
	INVALIDATE_ENTRY 7
L
Linus Torvalds 已提交
850 851 852

ENTRY(call_function_interrupt)
	apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
853
END(call_function_interrupt)
854 855 856
ENTRY(call_function_single_interrupt)
	apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
END(call_function_single_interrupt)
857 858 859
ENTRY(irq_move_cleanup_interrupt)
	apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
END(irq_move_cleanup_interrupt)
L
Linus Torvalds 已提交
860 861 862 863
#endif

ENTRY(apic_timer_interrupt)
	apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
864
END(apic_timer_interrupt)
L
Linus Torvalds 已提交
865

866 867 868 869
ENTRY(uv_bau_message_intr1)
	apicinterrupt 220,uv_bau_message_interrupt
END(uv_bau_message_intr1)

L
Linus Torvalds 已提交
870 871
ENTRY(error_interrupt)
	apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
872
END(error_interrupt)
L
Linus Torvalds 已提交
873 874 875

ENTRY(spurious_interrupt)
	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
876
END(spurious_interrupt)
L
Linus Torvalds 已提交
877 878 879 880 881
				
/*
 * Exception entry points.
 */ 		
	.macro zeroentry sym
882
	INTR_FRAME
883
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
884
	pushq $0	/* push error code/oldrax */ 
885
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
886
	pushq %rax	/* push real oldrax to the rdi slot */ 
887
	CFI_ADJUST_CFA_OFFSET 8
888
	CFI_REL_OFFSET rax,0
L
Linus Torvalds 已提交
889 890
	leaq  \sym(%rip),%rax
	jmp error_entry
891
	CFI_ENDPROC
L
Linus Torvalds 已提交
892 893 894
	.endm	

	.macro errorentry sym
895
	XCPT_FRAME
896
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
897
	pushq %rax
898
	CFI_ADJUST_CFA_OFFSET 8
899
	CFI_REL_OFFSET rax,0
L
Linus Torvalds 已提交
900 901
	leaq  \sym(%rip),%rax
	jmp error_entry
902
	CFI_ENDPROC
L
Linus Torvalds 已提交
903 904 905 906
	.endm

	/* error code is on the stack already */
	/* handle NMI like exceptions that can happen everywhere */
907
	.macro paranoidentry sym, ist=0, irqtrace=1
L
Linus Torvalds 已提交
908 909 910 911 912 913 914
	SAVE_ALL
	cld
	movl $1,%ebx
	movl  $MSR_GS_BASE,%ecx
	rdmsr
	testl %edx,%edx
	js    1f
915
	SWAPGS
L
Linus Torvalds 已提交
916
	xorl  %ebx,%ebx
917 918 919 920
1:
	.if \ist
	movq	%gs:pda_data_offset, %rbp
	.endif
921 922 923
	.if \irqtrace
	TRACE_IRQS_OFF
	.endif
924
	movq %rsp,%rdi
L
Linus Torvalds 已提交
925 926
	movq ORIG_RAX(%rsp),%rsi
	movq $-1,ORIG_RAX(%rsp)
927
	.if \ist
928
	subq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
929
	.endif
L
Linus Torvalds 已提交
930
	call \sym
931
	.if \ist
932
	addq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
933
	.endif
934
	DISABLE_INTERRUPTS(CLBR_NONE)
935 936 937
	.if \irqtrace
	TRACE_IRQS_OFF
	.endif
L
Linus Torvalds 已提交
938
	.endm
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959

	/*
 	 * "Paranoid" exit path from exception stack.
  	 * Paranoid because this is used by NMIs and cannot take
	 * any kernel state for granted.
	 * We don't do kernel preemption checks here, because only
	 * NMI should be common and it does not enable IRQs and
	 * cannot get reschedule ticks.
	 *
	 * "trace" is 0 for the NMI handler only, because irq-tracing
	 * is fundamentally NMI-unsafe. (we cannot change the soft and
	 * hard flags at once, atomically)
	 */
	.macro paranoidexit trace=1
	/* ebx:	no swapgs flag */
paranoid_exit\trace:
	testl %ebx,%ebx				/* swapgs needed? */
	jnz paranoid_restore\trace
	testl $3,CS(%rsp)
	jnz   paranoid_userspace\trace
paranoid_swapgs\trace:
960
	.if \trace
961
	TRACE_IRQS_IRETQ 0
962
	.endif
963
	SWAPGS_UNSAFE_STACK
964 965
paranoid_restore\trace:
	RESTORE_ALL 8
I
Ingo Molnar 已提交
966
	jmp irq_return
967 968
paranoid_userspace\trace:
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
969
	movl TI_flags(%rcx),%ebx
970 971 972 973 974 975 976 977 978 979 980
	andl $_TIF_WORK_MASK,%ebx
	jz paranoid_swapgs\trace
	movq %rsp,%rdi			/* &pt_regs */
	call sync_regs
	movq %rax,%rsp			/* switch stack for scheduling */
	testl $_TIF_NEED_RESCHED,%ebx
	jnz paranoid_schedule\trace
	movl %ebx,%edx			/* arg3: thread flags */
	.if \trace
	TRACE_IRQS_ON
	.endif
981
	ENABLE_INTERRUPTS(CLBR_NONE)
982 983 984
	xorl %esi,%esi 			/* arg2: oldset */
	movq %rsp,%rdi 			/* arg1: &pt_regs */
	call do_notify_resume
985
	DISABLE_INTERRUPTS(CLBR_NONE)
986 987 988 989 990 991 992 993
	.if \trace
	TRACE_IRQS_OFF
	.endif
	jmp paranoid_userspace\trace
paranoid_schedule\trace:
	.if \trace
	TRACE_IRQS_ON
	.endif
994
	ENABLE_INTERRUPTS(CLBR_ANY)
995
	call schedule
996
	DISABLE_INTERRUPTS(CLBR_ANY)
997 998 999 1000 1001 1002 1003
	.if \trace
	TRACE_IRQS_OFF
	.endif
	jmp paranoid_userspace\trace
	CFI_ENDPROC
	.endm

L
Linus Torvalds 已提交
1004 1005 1006 1007
/*
 * Exception entry point. This expects an error code/orig_rax on the stack
 * and the exception handler in %rax.	
 */ 		  				
1008
KPROBE_ENTRY(error_entry)
1009
	_frame RDI
1010
	CFI_REL_OFFSET rax,0
L
Linus Torvalds 已提交
1011 1012 1013 1014 1015 1016 1017
	/* rdi slot contains rax, oldrax contains error code */
	cld	
	subq  $14*8,%rsp
	CFI_ADJUST_CFA_OFFSET	(14*8)
	movq %rsi,13*8(%rsp)
	CFI_REL_OFFSET	rsi,RSI
	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */
1018
	CFI_REGISTER	rax,rsi
L
Linus Torvalds 已提交
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
	movq %rdx,12*8(%rsp)
	CFI_REL_OFFSET	rdx,RDX
	movq %rcx,11*8(%rsp)
	CFI_REL_OFFSET	rcx,RCX
	movq %rsi,10*8(%rsp)	/* store rax */ 
	CFI_REL_OFFSET	rax,RAX
	movq %r8, 9*8(%rsp)
	CFI_REL_OFFSET	r8,R8
	movq %r9, 8*8(%rsp)
	CFI_REL_OFFSET	r9,R9
	movq %r10,7*8(%rsp)
	CFI_REL_OFFSET	r10,R10
	movq %r11,6*8(%rsp)
	CFI_REL_OFFSET	r11,R11
	movq %rbx,5*8(%rsp) 
	CFI_REL_OFFSET	rbx,RBX
	movq %rbp,4*8(%rsp) 
	CFI_REL_OFFSET	rbp,RBP
	movq %r12,3*8(%rsp) 
	CFI_REL_OFFSET	r12,R12
	movq %r13,2*8(%rsp) 
	CFI_REL_OFFSET	r13,R13
	movq %r14,1*8(%rsp) 
	CFI_REL_OFFSET	r14,R14
	movq %r15,(%rsp) 
	CFI_REL_OFFSET	r15,R15
	xorl %ebx,%ebx	
	testl $3,CS(%rsp)
	je  error_kernelspace
error_swapgs:	
1049
	SWAPGS
1050 1051
error_sti:
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
1052
	movq %rdi,RDI(%rsp) 	
1053
	CFI_REL_OFFSET	rdi,RDI
L
Linus Torvalds 已提交
1054 1055 1056 1057
	movq %rsp,%rdi
	movq ORIG_RAX(%rsp),%rsi	/* get error code */ 
	movq $-1,ORIG_RAX(%rsp)
	call *%rax
1058 1059 1060
	/* ebx:	no swapgs flag (1: don't need swapgs, 0: need it) */
error_exit:
	movl %ebx,%eax
L
Linus Torvalds 已提交
1061
	RESTORE_REST
1062
	DISABLE_INTERRUPTS(CLBR_NONE)
1063
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
1064 1065 1066
	GET_THREAD_INFO(%rcx)	
	testl %eax,%eax
	jne  retint_kernel
1067
	LOCKDEP_SYS_EXIT_IRQ
G
Glauber Costa 已提交
1068
	movl  TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
1069 1070 1071
	movl  $_TIF_WORK_MASK,%edi
	andl  %edi,%edx
	jnz  retint_careful
1072
	jmp retint_swapgs
L
Linus Torvalds 已提交
1073 1074 1075 1076 1077 1078 1079 1080 1081
	CFI_ENDPROC

error_kernelspace:
	incl %ebx
       /* There are two places in the kernel that can potentially fault with
          usergs. Handle them here. The exception handlers after
	   iret run with kernel gs again, so don't set the user space flag.
	   B stepping K8s sometimes report an truncated RIP for IRET 
	   exceptions returning to compat mode. Check for these here too. */
1082 1083
	leaq irq_return(%rip),%rcx
	cmpq %rcx,RIP(%rsp)
L
Linus Torvalds 已提交
1084
	je   error_swapgs
1085 1086
	movl %ecx,%ecx	/* zero extend */
	cmpq %rcx,RIP(%rsp)
L
Linus Torvalds 已提交
1087 1088 1089 1090
	je   error_swapgs
	cmpq $gs_change,RIP(%rsp)
        je   error_swapgs
	jmp  error_sti
1091
KPROBE_END(error_entry)
L
Linus Torvalds 已提交
1092 1093 1094
	
       /* Reload gs selector with exception handling */
       /* edi:  new selector */ 
1095
ENTRY(native_load_gs_index)
1096
	CFI_STARTPROC
L
Linus Torvalds 已提交
1097
	pushf
1098
	CFI_ADJUST_CFA_OFFSET 8
1099 1100
	DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
        SWAPGS
L
Linus Torvalds 已提交
1101 1102 1103
gs_change:     
        movl %edi,%gs   
2:	mfence		/* workaround */
1104
	SWAPGS
L
Linus Torvalds 已提交
1105
        popf
1106
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
1107
        ret
1108
	CFI_ENDPROC
1109
ENDPROC(native_load_gs_index)
L
Linus Torvalds 已提交
1110 1111 1112 1113 1114 1115 1116 1117
       
        .section __ex_table,"a"
        .align 8
        .quad gs_change,bad_gs
        .previous
        .section .fixup,"ax"
	/* running with kernelgs */
bad_gs: 
1118
	SWAPGS			/* switch back to user gs */
L
Linus Torvalds 已提交
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
	xorl %eax,%eax
        movl %eax,%gs
        jmp  2b
        .previous       
	
/*
 * Create a kernel thread.
 *
 * C extern interface:
 *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 *
 * asm input arguments:
 *	rdi: fn, rsi: arg, rdx: flags
 */
ENTRY(kernel_thread)
	CFI_STARTPROC
	FAKE_STACK_FRAME $child_rip
	SAVE_ALL

	# rdi: flags, rsi: usp, rdx: will be &pt_regs
	movq %rdx,%rdi
	orq  kernel_thread_flags(%rip),%rdi
	movq $-1, %rsi
	movq %rsp, %rdx

	xorl %r8d,%r8d
	xorl %r9d,%r9d
	
	# clone now
	call do_fork
	movq %rax,RAX(%rsp)
	xorl %edi,%edi

	/*
	 * It isn't worth to check for reschedule here,
	 * so internally to the x86_64 port you can rely on kernel_thread()
	 * not to reschedule the child before returning, this avoids the need
	 * of hacks for example to fork off the per-CPU idle tasks.
         * [Hopefully no generic code relies on the reschedule -AK]	
	 */
	RESTORE_ALL
	UNFAKE_STACK_FRAME
	ret
	CFI_ENDPROC
1163
ENDPROC(kernel_thread)
L
Linus Torvalds 已提交
1164 1165
	
child_rip:
1166 1167
	pushq $0		# fake return address
	CFI_STARTPROC
L
Linus Torvalds 已提交
1168 1169 1170 1171 1172 1173 1174 1175
	/*
	 * Here we are in the child and the registers are set as they were
	 * at kernel_thread() invocation in the parent.
	 */
	movq %rdi, %rax
	movq %rsi, %rdi
	call *%rax
	# exit
1176
	mov %eax, %edi
L
Linus Torvalds 已提交
1177
	call do_exit
1178
	CFI_ENDPROC
1179
ENDPROC(child_rip)
L
Linus Torvalds 已提交
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190

/*
 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
 *
 * C extern interface:
 *	 extern long execve(char *name, char **argv, char **envp)
 *
 * asm input arguments:
 *	rdi: name, rsi: argv, rdx: envp
 *
 * We want to fallback into:
1191
 *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
L
Linus Torvalds 已提交
1192 1193
 *
 * do_sys_execve asm fallback arguments:
1194
 *	rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
L
Linus Torvalds 已提交
1195
 */
1196
ENTRY(kernel_execve)
L
Linus Torvalds 已提交
1197 1198 1199
	CFI_STARTPROC
	FAKE_STACK_FRAME $0
	SAVE_ALL	
1200
	movq %rsp,%rcx
L
Linus Torvalds 已提交
1201 1202 1203 1204 1205 1206 1207 1208 1209
	call sys_execve
	movq %rax, RAX(%rsp)	
	RESTORE_REST
	testq %rax,%rax
	je int_ret_from_sys_call
	RESTORE_ARGS
	UNFAKE_STACK_FRAME
	ret
	CFI_ENDPROC
1210
ENDPROC(kernel_execve)
L
Linus Torvalds 已提交
1211

1212
KPROBE_ENTRY(page_fault)
L
Linus Torvalds 已提交
1213
	errorentry do_page_fault
1214
KPROBE_END(page_fault)
L
Linus Torvalds 已提交
1215 1216 1217

ENTRY(coprocessor_error)
	zeroentry do_coprocessor_error
1218
END(coprocessor_error)
L
Linus Torvalds 已提交
1219 1220 1221

ENTRY(simd_coprocessor_error)
	zeroentry do_simd_coprocessor_error	
1222
END(simd_coprocessor_error)
L
Linus Torvalds 已提交
1223 1224

ENTRY(device_not_available)
1225
	zeroentry do_device_not_available
1226
END(device_not_available)
L
Linus Torvalds 已提交
1227 1228

	/* runs on exception stack */
1229
KPROBE_ENTRY(debug)
1230
 	INTR_FRAME
1231
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1232 1233
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8		
1234
	paranoidentry do_debug, DEBUG_STACK
1235
	paranoidexit
1236
KPROBE_END(debug)
L
Linus Torvalds 已提交
1237 1238

	/* runs on exception stack */	
1239
KPROBE_ENTRY(nmi)
1240
	INTR_FRAME
1241
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1242
	pushq $-1
1243
	CFI_ADJUST_CFA_OFFSET 8
1244 1245 1246 1247 1248 1249 1250
	paranoidentry do_nmi, 0, 0
#ifdef CONFIG_TRACE_IRQFLAGS
	paranoidexit 0
#else
	jmp paranoid_exit1
 	CFI_ENDPROC
#endif
1251
KPROBE_END(nmi)
1252

1253
KPROBE_ENTRY(int3)
1254
 	INTR_FRAME
1255
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1256 1257
 	pushq $0
 	CFI_ADJUST_CFA_OFFSET 8
1258
 	paranoidentry do_int3, DEBUG_STACK
1259
 	jmp paranoid_exit1
1260
 	CFI_ENDPROC
1261
KPROBE_END(int3)
L
Linus Torvalds 已提交
1262 1263 1264

ENTRY(overflow)
	zeroentry do_overflow
1265
END(overflow)
L
Linus Torvalds 已提交
1266 1267 1268

ENTRY(bounds)
	zeroentry do_bounds
1269
END(bounds)
L
Linus Torvalds 已提交
1270 1271 1272

ENTRY(invalid_op)
	zeroentry do_invalid_op	
1273
END(invalid_op)
L
Linus Torvalds 已提交
1274 1275 1276

ENTRY(coprocessor_segment_overrun)
	zeroentry do_coprocessor_segment_overrun
1277
END(coprocessor_segment_overrun)
L
Linus Torvalds 已提交
1278 1279 1280

	/* runs on exception stack */
ENTRY(double_fault)
1281
	XCPT_FRAME
1282
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1283
	paranoidentry do_double_fault
1284
	jmp paranoid_exit1
L
Linus Torvalds 已提交
1285
	CFI_ENDPROC
1286
END(double_fault)
L
Linus Torvalds 已提交
1287 1288 1289

ENTRY(invalid_TSS)
	errorentry do_invalid_TSS
1290
END(invalid_TSS)
L
Linus Torvalds 已提交
1291 1292 1293

ENTRY(segment_not_present)
	errorentry do_segment_not_present
1294
END(segment_not_present)
L
Linus Torvalds 已提交
1295 1296 1297

	/* runs on exception stack */
ENTRY(stack_segment)
1298
	XCPT_FRAME
1299
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1300
	paranoidentry do_stack_segment
1301
	jmp paranoid_exit1
L
Linus Torvalds 已提交
1302
	CFI_ENDPROC
1303
END(stack_segment)
L
Linus Torvalds 已提交
1304

1305
KPROBE_ENTRY(general_protection)
L
Linus Torvalds 已提交
1306
	errorentry do_general_protection
1307
KPROBE_END(general_protection)
L
Linus Torvalds 已提交
1308 1309 1310

ENTRY(alignment_check)
	errorentry do_alignment_check
1311
END(alignment_check)
L
Linus Torvalds 已提交
1312 1313 1314

ENTRY(divide_error)
	zeroentry do_divide_error
1315
END(divide_error)
L
Linus Torvalds 已提交
1316 1317 1318

ENTRY(spurious_interrupt_bug)
	zeroentry do_spurious_interrupt_bug
1319
END(spurious_interrupt_bug)
L
Linus Torvalds 已提交
1320 1321 1322 1323

#ifdef CONFIG_X86_MCE
	/* runs on exception stack */
ENTRY(machine_check)
1324
	INTR_FRAME
1325
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1326 1327 1328
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8	
	paranoidentry do_machine_check
1329
	jmp paranoid_exit1
L
Linus Torvalds 已提交
1330
	CFI_ENDPROC
1331
END(machine_check)
L
Linus Torvalds 已提交
1332 1333
#endif

1334
/* Call softirq on interrupt stack. Interrupts are off. */
1335
ENTRY(call_softirq)
1336
	CFI_STARTPROC
1337 1338 1339 1340 1341
	push %rbp
	CFI_ADJUST_CFA_OFFSET	8
	CFI_REL_OFFSET rbp,0
	mov  %rsp,%rbp
	CFI_DEF_CFA_REGISTER rbp
1342
	incl %gs:pda_irqcount
1343 1344
	cmove %gs:pda_irqstackptr,%rsp
	push  %rbp			# backlink for old unwinder
1345
	call __do_softirq
1346
	leaveq
1347
	CFI_DEF_CFA_REGISTER	rsp
1348
	CFI_ADJUST_CFA_OFFSET   -8
1349 1350
	decl %gs:pda_irqcount
	ret
1351
	CFI_ENDPROC
1352
ENDPROC(call_softirq)
1353 1354 1355 1356 1357 1358 1359

KPROBE_ENTRY(ignore_sysret)
	CFI_STARTPROC
	mov $-ENOSYS,%eax
	sysret
	CFI_ENDPROC
ENDPROC(ignore_sysret)
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412

#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
	zeroentry xen_do_hypervisor_callback
END(xen_hypervisor_callback)

/*
# A note on the "critical region" in our callback handler.
# We want to avoid stacking callback handlers due to events occurring
# during handling of the last event. To do this, we keep events disabled
# until we've done all processing. HOWEVER, we must enable events before
# popping the stack frame (can't be done atomically) and so it would still
# be possible to get enough handler activations to overflow the stack.
# Although unlikely, bugs of that kind are hard to track down, so we'd
# like to avoid the possibility.
# So, on entry to the handler we detect whether we interrupted an
# existing activation in its critical region -- if so, we pop the current
# activation and restart the handler using the previous one.
*/
ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
	CFI_STARTPROC
/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
   see the correct pointer to the pt_regs */
	movq %rdi, %rsp            # we don't return, adjust the stack frame
	CFI_ENDPROC
	CFI_DEFAULT_STACK
11:	incl %gs:pda_irqcount
	movq %rsp,%rbp
	CFI_DEF_CFA_REGISTER rbp
	cmovzq %gs:pda_irqstackptr,%rsp
	pushq %rbp			# backlink for old unwinder
	call xen_evtchn_do_upcall
	popq %rsp
	CFI_DEF_CFA_REGISTER rsp
	decl %gs:pda_irqcount
	jmp  error_exit
	CFI_ENDPROC
END(do_hypervisor_callback)

/*
# Hypervisor uses this for application faults while it executes.
# We get here for two reasons:
#  1. Fault while reloading DS, ES, FS or GS
#  2. Fault while executing IRET
# Category 1 we do not need to fix up as Xen has already reloaded all segment
# registers that could be reloaded and zeroed the others.
# Category 2 we fix up by killing the current process. We cannot use the
# normal Linux return path in this case because if we use the IRET hypercall
# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
# We distinguish between categories by comparing each saved segment register
# with its current contents: any discrepancy means we in category 1.
*/
ENTRY(xen_failsafe_callback)
1413 1414
	framesz = (RIP-0x30)	/* workaround buggy gas */
	_frame framesz
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
	CFI_REL_OFFSET rcx, 0
	CFI_REL_OFFSET r11, 8
	movw %ds,%cx
	cmpw %cx,0x10(%rsp)
	CFI_REMEMBER_STATE
	jne 1f
	movw %es,%cx
	cmpw %cx,0x18(%rsp)
	jne 1f
	movw %fs,%cx
	cmpw %cx,0x20(%rsp)
	jne 1f
	movw %gs,%cx
	cmpw %cx,0x28(%rsp)
	jne 1f
	/* All segments match their saved values => Category 2 (Bad IRET). */
	movq (%rsp),%rcx
	CFI_RESTORE rcx
	movq 8(%rsp),%r11
	CFI_RESTORE r11
	addq $0x30,%rsp
	CFI_ADJUST_CFA_OFFSET -0x30
1437 1438 1439 1440 1441 1442 1443
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8
	pushq %r11
	CFI_ADJUST_CFA_OFFSET 8
	pushq %rcx
	CFI_ADJUST_CFA_OFFSET 8
	jmp general_protection
1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
	CFI_RESTORE_STATE
1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
	movq (%rsp),%rcx
	CFI_RESTORE rcx
	movq 8(%rsp),%r11
	CFI_RESTORE r11
	addq $0x30,%rsp
	CFI_ADJUST_CFA_OFFSET -0x30
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8
	SAVE_ALL
	jmp error_exit
	CFI_ENDPROC
END(xen_failsafe_callback)

#endif /* CONFIG_XEN */