entry_64.S 34.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 *  linux/arch/x86_64/entry.S
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
 */

/*
 * entry.S contains the system-call and fault low-level handling routines.
 *
 * NOTE: This code handles signal-recognition, which happens every time
 * after an interrupt and after each system call.
 * 
 * Normal syscalls and interrupts don't save a full stack frame, this is 
 * only done for syscall tracing, signals or fork/exec et.al.
 * 
 * A note on terminology:	 
 * - top of stack: Architecture defined interrupt frame from SS to RIP 
 * at the top of the kernel process stack.	
 * - partial stack frame: partially saved registers upto R11.
 * - full stack frame: Like partial stack frame, but all register saved. 
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 *
 * Some macro usage:
 * - CFI macros are used to generate dwarf2 unwind information for better
 * backtraces. They don't change any code.
 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
 * There are unfortunately lots of special cases where some registers
 * not touched. The macro is a big mess that should be cleaned up.
 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
 * Gives a full stack frame.
 * - ENTRY/END Define functions in the symbol table.
 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
 * frame that is otherwise undefined after a SYSCALL
 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45
 */

#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/dwarf2.h>
#include <asm/calling.h>
46
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
47 48 49 50
#include <asm/msr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/hw_irq.h>
51
#include <asm/page.h>
52
#include <asm/irqflags.h>
53
#include <asm/paravirt.h>
54
#include <asm/ftrace.h>
L
Linus Torvalds 已提交
55

R
Roland McGrath 已提交
56 57 58 59 60 61
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
#include <linux/elf-em.h>
#define AUDIT_ARCH_X86_64	(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define __AUDIT_ARCH_64BIT 0x80000000
#define __AUDIT_ARCH_LE	   0x40000000

L
Linus Torvalds 已提交
62 63
	.code64

64
#ifdef CONFIG_FUNCTION_TRACER
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
	retq
END(mcount)

ENTRY(ftrace_caller)

	/* taken from glibc */
	subq $0x38, %rsp
	movq %rax, (%rsp)
	movq %rcx, 8(%rsp)
	movq %rdx, 16(%rsp)
	movq %rsi, 24(%rsp)
	movq %rdi, 32(%rsp)
	movq %r8, 40(%rsp)
	movq %r9, 48(%rsp)

	movq 0x38(%rsp), %rdi
	movq 8(%rbp), %rsi
84
	subq $MCOUNT_INSN_SIZE, %rdi
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104

.globl ftrace_call
ftrace_call:
	call ftrace_stub

	movq 48(%rsp), %r9
	movq 40(%rsp), %r8
	movq 32(%rsp), %rdi
	movq 24(%rsp), %rsi
	movq 16(%rsp), %rdx
	movq 8(%rsp), %rcx
	movq (%rsp), %rax
	addq $0x38, %rsp

.globl ftrace_stub
ftrace_stub:
	retq
END(ftrace_caller)

#else /* ! CONFIG_DYNAMIC_FTRACE */
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
ENTRY(mcount)
	cmpq $ftrace_stub, ftrace_trace_function
	jnz trace
.globl ftrace_stub
ftrace_stub:
	retq

trace:
	/* taken from glibc */
	subq $0x38, %rsp
	movq %rax, (%rsp)
	movq %rcx, 8(%rsp)
	movq %rdx, 16(%rsp)
	movq %rsi, 24(%rsp)
	movq %rdi, 32(%rsp)
	movq %r8, 40(%rsp)
	movq %r9, 48(%rsp)

	movq 0x38(%rsp), %rdi
	movq 8(%rbp), %rsi
125
	subq $MCOUNT_INSN_SIZE, %rdi
126 127 128 129 130 131 132 133 134 135 136 137 138 139

	call   *ftrace_trace_function

	movq 48(%rsp), %r9
	movq 40(%rsp), %r8
	movq 32(%rsp), %rdi
	movq 24(%rsp), %rsi
	movq 16(%rsp), %rdx
	movq 8(%rsp), %rcx
	movq (%rsp), %rax
	addq $0x38, %rsp

	jmp ftrace_stub
END(mcount)
140
#endif /* CONFIG_DYNAMIC_FTRACE */
141
#endif /* CONFIG_FUNCTION_TRACER */
142

143
#ifndef CONFIG_PREEMPT
L
Linus Torvalds 已提交
144 145
#define retint_kernel retint_restore_args
#endif	
146

147
#ifdef CONFIG_PARAVIRT
148
ENTRY(native_usergs_sysret64)
149 150 151 152
	swapgs
	sysretq
#endif /* CONFIG_PARAVIRT */

153 154 155 156 157 158 159 160 161 162

.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
	bt   $9,EFLAGS-\offset(%rsp)	/* interrupts off? */
	jnc  1f
	TRACE_IRQS_ON
1:
#endif
.endm

L
Linus Torvalds 已提交
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
/*
 * C code is not supposed to know about undefined top of stack. Every time 
 * a C function with an pt_regs argument is called from the SYSCALL based 
 * fast path FIXUP_TOP_OF_STACK is needed.
 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
 * manipulation.
 */        	
		
	/* %rsp:at FRAMEEND */ 
	.macro FIXUP_TOP_OF_STACK tmp
	movq	%gs:pda_oldrsp,\tmp
	movq  	\tmp,RSP(%rsp)
	movq    $__USER_DS,SS(%rsp)
	movq    $__USER_CS,CS(%rsp)
	movq 	$-1,RCX(%rsp)
	movq	R11(%rsp),\tmp  /* get eflags */
	movq	\tmp,EFLAGS(%rsp)
	.endm

	.macro RESTORE_TOP_OF_STACK tmp,offset=0
	movq   RSP-\offset(%rsp),\tmp
	movq   \tmp,%gs:pda_oldrsp
	movq   EFLAGS-\offset(%rsp),\tmp
	movq   \tmp,R11-\offset(%rsp)
	.endm

	.macro FAKE_STACK_FRAME child_rip
	/* push in order ss, rsp, eflags, cs, rip */
191
	xorl %eax, %eax
192
	pushq $__KERNEL_DS /* ss */
L
Linus Torvalds 已提交
193
	CFI_ADJUST_CFA_OFFSET	8
194
	/*CFI_REL_OFFSET	ss,0*/
L
Linus Torvalds 已提交
195 196
	pushq %rax /* rsp */
	CFI_ADJUST_CFA_OFFSET	8
197
	CFI_REL_OFFSET	rsp,0
L
Linus Torvalds 已提交
198 199
	pushq $(1<<9) /* eflags - interrupts on */
	CFI_ADJUST_CFA_OFFSET	8
200
	/*CFI_REL_OFFSET	rflags,0*/
L
Linus Torvalds 已提交
201 202
	pushq $__KERNEL_CS /* cs */
	CFI_ADJUST_CFA_OFFSET	8
203
	/*CFI_REL_OFFSET	cs,0*/
L
Linus Torvalds 已提交
204 205
	pushq \child_rip /* rip */
	CFI_ADJUST_CFA_OFFSET	8
206
	CFI_REL_OFFSET	rip,0
L
Linus Torvalds 已提交
207 208 209 210 211 212 213 214 215
	pushq	%rax /* orig rax */
	CFI_ADJUST_CFA_OFFSET	8
	.endm

	.macro UNFAKE_STACK_FRAME
	addq $8*6, %rsp
	CFI_ADJUST_CFA_OFFSET	-(6*8)
	.endm

216 217 218
	.macro	CFI_DEFAULT_STACK start=1
	.if \start
	CFI_STARTPROC	simple
219
	CFI_SIGNAL_FRAME
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
	CFI_DEF_CFA	rsp,SS+8
	.else
	CFI_DEF_CFA_OFFSET SS+8
	.endif
	CFI_REL_OFFSET	r15,R15
	CFI_REL_OFFSET	r14,R14
	CFI_REL_OFFSET	r13,R13
	CFI_REL_OFFSET	r12,R12
	CFI_REL_OFFSET	rbp,RBP
	CFI_REL_OFFSET	rbx,RBX
	CFI_REL_OFFSET	r11,R11
	CFI_REL_OFFSET	r10,R10
	CFI_REL_OFFSET	r9,R9
	CFI_REL_OFFSET	r8,R8
	CFI_REL_OFFSET	rax,RAX
	CFI_REL_OFFSET	rcx,RCX
	CFI_REL_OFFSET	rdx,RDX
	CFI_REL_OFFSET	rsi,RSI
	CFI_REL_OFFSET	rdi,RDI
	CFI_REL_OFFSET	rip,RIP
	/*CFI_REL_OFFSET	cs,CS*/
	/*CFI_REL_OFFSET	rflags,EFLAGS*/
	CFI_REL_OFFSET	rsp,RSP
	/*CFI_REL_OFFSET	ss,SS*/
L
Linus Torvalds 已提交
244 245 246 247 248 249 250
	.endm
/*
 * A newly forked process directly context switches into this.
 */ 	
/* rdi:	prev */	
ENTRY(ret_from_fork)
	CFI_DEFAULT_STACK
251
	push kernel_eflags(%rip)
252
	CFI_ADJUST_CFA_OFFSET 8
253
	popf				# reset kernel eflags
254
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
255 256
	call schedule_tail
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
257
	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
258
	CFI_REMEMBER_STATE
L
Linus Torvalds 已提交
259 260 261 262 263
	jnz rff_trace
rff_action:	
	RESTORE_REST
	testl $3,CS-ARGOFFSET(%rsp)	# from kernel_thread?
	je   int_ret_from_sys_call
G
Glauber Costa 已提交
264
	testl $_TIF_IA32,TI_flags(%rcx)
L
Linus Torvalds 已提交
265 266 267
	jnz  int_ret_from_sys_call
	RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
	jmp ret_from_sys_call
268
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
269 270 271 272 273 274
rff_trace:
	movq %rsp,%rdi
	call syscall_trace_leave
	GET_THREAD_INFO(%rcx)	
	jmp rff_action
	CFI_ENDPROC
275
END(ret_from_fork)
L
Linus Torvalds 已提交
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301

/*
 * System call entry. Upto 6 arguments in registers are supported.
 *
 * SYSCALL does not save anything on the stack and does not change the
 * stack pointer.
 */
		
/*
 * Register setup:	
 * rax  system call number
 * rdi  arg0
 * rcx  return address for syscall/sysret, C arg3 
 * rsi  arg1
 * rdx  arg2	
 * r10  arg3 	(--> moved to rcx for C)
 * r8   arg4
 * r9   arg5
 * r11  eflags for syscall/sysret, temporary for C
 * r12-r15,rbp,rbx saved by C code, not touched. 		
 * 
 * Interrupts are off on entry.
 * Only called from user space.
 *
 * XXX	if we had a free scratch register we could save the RSP into the stack frame
 *      and report it properly in ps. Unfortunately we haven't.
302 303 304 305
 *
 * When user can change the frames always force IRET. That is because
 * it deals with uncanonical addresses better. SYSRET has trouble
 * with them due to bugs in both AMD and Intel CPUs.
L
Linus Torvalds 已提交
306 307 308
 */ 			 		

ENTRY(system_call)
309
	CFI_STARTPROC	simple
310
	CFI_SIGNAL_FRAME
311
	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
312 313
	CFI_REGISTER	rip,rcx
	/*CFI_REGISTER	rflags,r11*/
314 315 316 317 318 319 320 321
	SWAPGS_UNSAFE_STACK
	/*
	 * A hypervisor implementation might want to use a label
	 * after the swapgs, so that it can do the swapgs
	 * for the guest and jump here on syscall.
	 */
ENTRY(system_call_after_swapgs)

L
Linus Torvalds 已提交
322 323
	movq	%rsp,%gs:pda_oldrsp 
	movq	%gs:pda_kernelstack,%rsp
324 325 326 327
	/*
	 * No need to follow this irqs off/on section - it's straight
	 * and short:
	 */
328
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
329 330
	SAVE_ARGS 8,1
	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
331 332
	movq  %rcx,RIP-ARGOFFSET(%rsp)
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
L
Linus Torvalds 已提交
333
	GET_THREAD_INFO(%rcx)
334
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
L
Linus Torvalds 已提交
335
	jnz tracesys
R
Roland McGrath 已提交
336
system_call_fastpath:
L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344 345 346
	cmpq $__NR_syscall_max,%rax
	ja badsys
	movq %r10,%rcx
	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
	movq %rax,RAX-ARGOFFSET(%rsp)
/*
 * Syscall return path ending with SYSRET (fast path)
 * Has incomplete stack frame and undefined top of stack. 
 */		
ret_from_sys_call:
347
	movl $_TIF_ALLWORK_MASK,%edi
L
Linus Torvalds 已提交
348 349
	/* edi:	flagmask */
sysret_check:		
350
	LOCKDEP_SYS_EXIT
L
Linus Torvalds 已提交
351
	GET_THREAD_INFO(%rcx)
352
	DISABLE_INTERRUPTS(CLBR_NONE)
353
	TRACE_IRQS_OFF
G
Glauber Costa 已提交
354
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
355 356
	andl %edi,%edx
	jnz  sysret_careful 
357
	CFI_REMEMBER_STATE
358 359 360 361
	/*
	 * sysretq will re-enable interrupts:
	 */
	TRACE_IRQS_ON
L
Linus Torvalds 已提交
362
	movq RIP-ARGOFFSET(%rsp),%rcx
363
	CFI_REGISTER	rip,rcx
L
Linus Torvalds 已提交
364
	RESTORE_ARGS 0,-ARG_SKIP,1
365
	/*CFI_REGISTER	rflags,r11*/
366
	movq	%gs:pda_oldrsp, %rsp
367
	USERGS_SYSRET64
L
Linus Torvalds 已提交
368

369
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
370 371 372 373 374
	/* Handle reschedules */
	/* edx:	work, edi: workmask */	
sysret_careful:
	bt $TIF_NEED_RESCHED,%edx
	jnc sysret_signal
375
	TRACE_IRQS_ON
376
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
377
	pushq %rdi
378
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
379 380
	call schedule
	popq  %rdi
381
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
382 383 384 385
	jmp sysret_check

	/* Handle a signal */ 
sysret_signal:
386
	TRACE_IRQS_ON
387
	ENABLE_INTERRUPTS(CLBR_NONE)
R
Roland McGrath 已提交
388 389 390 391
#ifdef CONFIG_AUDITSYSCALL
	bt $TIF_SYSCALL_AUDIT,%edx
	jc sysret_audit
#endif
392
	/* edx:	work flags (arg3) */
L
Linus Torvalds 已提交
393 394 395 396
	leaq do_notify_resume(%rip),%rax
	leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
	xorl %esi,%esi # oldset -> arg2
	call ptregscall_common
397
	movl $_TIF_WORK_MASK,%edi
398 399
	/* Use IRET because user could have changed frame. This
	   works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
400
	DISABLE_INTERRUPTS(CLBR_NONE)
401
	TRACE_IRQS_OFF
402
	jmp int_with_check
L
Linus Torvalds 已提交
403
	
404 405 406 407
badsys:
	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
	jmp ret_from_sys_call

R
Roland McGrath 已提交
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
#ifdef CONFIG_AUDITSYSCALL
	/*
	 * Fast path for syscall audit without full syscall trace.
	 * We just call audit_syscall_entry() directly, and then
	 * jump back to the normal fast path.
	 */
auditsys:
	movq %r10,%r9			/* 6th arg: 4th syscall arg */
	movq %rdx,%r8			/* 5th arg: 3rd syscall arg */
	movq %rsi,%rcx			/* 4th arg: 2nd syscall arg */
	movq %rdi,%rdx			/* 3rd arg: 1st syscall arg */
	movq %rax,%rsi			/* 2nd arg: syscall number */
	movl $AUDIT_ARCH_X86_64,%edi	/* 1st arg: audit arch */
	call audit_syscall_entry
	LOAD_ARGS 0		/* reload call-clobbered registers */
	jmp system_call_fastpath

	/*
	 * Return fast path for syscall audit.  Call audit_syscall_exit()
	 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
	 * masked off.
	 */
sysret_audit:
	movq %rax,%rsi		/* second arg, syscall return value */
	cmpq $0,%rax		/* is it < 0? */
	setl %al		/* 1 if so, 0 if not */
	movzbl %al,%edi		/* zero-extend that into %edi */
	inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
	call audit_syscall_exit
	movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
	jmp sysret_check
#endif	/* CONFIG_AUDITSYSCALL */

L
Linus Torvalds 已提交
441 442
	/* Do syscall tracing */
tracesys:			 
R
Roland McGrath 已提交
443 444 445 446
#ifdef CONFIG_AUDITSYSCALL
	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
	jz auditsys
#endif
L
Linus Torvalds 已提交
447
	SAVE_REST
R
Roland McGrath 已提交
448
	movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
L
Linus Torvalds 已提交
449 450 451
	FIXUP_TOP_OF_STACK %rdi
	movq %rsp,%rdi
	call syscall_trace_enter
452 453 454 455 456 457
	/*
	 * Reload arg registers from stack in case ptrace changed them.
	 * We don't reload %rax because syscall_trace_enter() returned
	 * the value it wants us to use in the table lookup.
	 */
	LOAD_ARGS ARGOFFSET, 1
L
Linus Torvalds 已提交
458 459
	RESTORE_REST
	cmpq $__NR_syscall_max,%rax
R
Roland McGrath 已提交
460
	ja   int_ret_from_sys_call	/* RAX(%rsp) set to -ENOSYS above */
L
Linus Torvalds 已提交
461 462
	movq %r10,%rcx	/* fixup for C */
	call *sys_call_table(,%rax,8)
R
Roland McGrath 已提交
463
	movq %rax,RAX-ARGOFFSET(%rsp)
464
	/* Use IRET because user could have changed frame */
L
Linus Torvalds 已提交
465 466 467 468
		
/* 
 * Syscall return path ending with IRET.
 * Has correct top of stack, but partial stack frame.
469 470
 */
	.globl int_ret_from_sys_call
471
	.globl int_with_check
472
int_ret_from_sys_call:
473
	DISABLE_INTERRUPTS(CLBR_NONE)
474
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
475 476 477 478 479
	testl $3,CS-ARGOFFSET(%rsp)
	je retint_restore_args
	movl $_TIF_ALLWORK_MASK,%edi
	/* edi:	mask to check */
int_with_check:
480
	LOCKDEP_SYS_EXIT_IRQ
L
Linus Torvalds 已提交
481
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
482
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
483 484
	andl %edi,%edx
	jnz   int_careful
G
Glauber Costa 已提交
485
	andl    $~TS_COMPAT,TI_status(%rcx)
L
Linus Torvalds 已提交
486 487 488 489 490 491 492 493
	jmp   retint_swapgs

	/* Either reschedule or signal or syscall exit tracking needed. */
	/* First do a reschedule test. */
	/* edx:	work, edi: workmask */
int_careful:
	bt $TIF_NEED_RESCHED,%edx
	jnc  int_very_careful
494
	TRACE_IRQS_ON
495
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
496
	pushq %rdi
497
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
498 499
	call schedule
	popq %rdi
500
	CFI_ADJUST_CFA_OFFSET -8
501
	DISABLE_INTERRUPTS(CLBR_NONE)
502
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
503 504 505 506
	jmp int_with_check

	/* handle signals and tracing -- both require a full stack frame */
int_very_careful:
507
	TRACE_IRQS_ON
508
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
509 510
	SAVE_REST
	/* Check for syscall exit trace */	
511
	testl $_TIF_WORK_SYSCALL_EXIT,%edx
L
Linus Torvalds 已提交
512 513
	jz int_signal
	pushq %rdi
514
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
515 516 517
	leaq 8(%rsp),%rdi	# &ptregs -> arg1	
	call syscall_trace_leave
	popq %rdi
518
	CFI_ADJUST_CFA_OFFSET -8
519
	andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
L
Linus Torvalds 已提交
520 521 522
	jmp int_restore_rest
	
int_signal:
P
Peter Zijlstra 已提交
523
	testl $_TIF_DO_NOTIFY_MASK,%edx
L
Linus Torvalds 已提交
524 525 526 527
	jz 1f
	movq %rsp,%rdi		# &ptregs -> arg1
	xorl %esi,%esi		# oldset -> arg2
	call do_notify_resume
R
Roland McGrath 已提交
528
1:	movl $_TIF_WORK_MASK,%edi
L
Linus Torvalds 已提交
529 530
int_restore_rest:
	RESTORE_REST
531
	DISABLE_INTERRUPTS(CLBR_NONE)
532
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
533 534
	jmp int_with_check
	CFI_ENDPROC
535
END(system_call)
L
Linus Torvalds 已提交
536 537 538 539 540 541 542 543 544 545 546
		
/* 
 * Certain special system calls that need to save a complete full stack frame.
 */ 								
	
	.macro PTREGSCALL label,func,arg
	.globl \label
\label:
	leaq	\func(%rip),%rax
	leaq    -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
	jmp	ptregscall_common
547
END(\label)
L
Linus Torvalds 已提交
548 549
	.endm

550 551
	CFI_STARTPROC

L
Linus Torvalds 已提交
552 553 554 555 556 557 558 559
	PTREGSCALL stub_clone, sys_clone, %r8
	PTREGSCALL stub_fork, sys_fork, %rdi
	PTREGSCALL stub_vfork, sys_vfork, %rdi
	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
	PTREGSCALL stub_iopl, sys_iopl, %rsi

ENTRY(ptregscall_common)
	popq %r11
560 561
	CFI_ADJUST_CFA_OFFSET -8
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
562 563
	SAVE_REST
	movq %r11, %r15
564
	CFI_REGISTER rip, r15
L
Linus Torvalds 已提交
565 566 567 568
	FIXUP_TOP_OF_STACK %r11
	call *%rax
	RESTORE_TOP_OF_STACK %r11
	movq %r15, %r11
569
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
570 571
	RESTORE_REST
	pushq %r11
572 573
	CFI_ADJUST_CFA_OFFSET 8
	CFI_REL_OFFSET rip, 0
L
Linus Torvalds 已提交
574 575
	ret
	CFI_ENDPROC
576
END(ptregscall_common)
L
Linus Torvalds 已提交
577 578 579 580
	
ENTRY(stub_execve)
	CFI_STARTPROC
	popq %r11
581 582
	CFI_ADJUST_CFA_OFFSET -8
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
583 584
	SAVE_REST
	FIXUP_TOP_OF_STACK %r11
585
	movq %rsp, %rcx
L
Linus Torvalds 已提交
586 587 588 589 590 591
	call sys_execve
	RESTORE_TOP_OF_STACK %r11
	movq %rax,RAX(%rsp)
	RESTORE_REST
	jmp int_ret_from_sys_call
	CFI_ENDPROC
592
END(stub_execve)
L
Linus Torvalds 已提交
593 594 595 596 597 598 599
	
/*
 * sigreturn is special because it needs to restore all registers on return.
 * This cannot be done with SYSRET, so use the IRET return path instead.
 */                
ENTRY(stub_rt_sigreturn)
	CFI_STARTPROC
600 601
	addq $8, %rsp
	CFI_ADJUST_CFA_OFFSET	-8
L
Linus Torvalds 已提交
602 603 604 605 606 607 608 609
	SAVE_REST
	movq %rsp,%rdi
	FIXUP_TOP_OF_STACK %r11
	call sys_rt_sigreturn
	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
	RESTORE_REST
	jmp int_ret_from_sys_call
	CFI_ENDPROC
610
END(stub_rt_sigreturn)
L
Linus Torvalds 已提交
611

612 613 614 615 616
/*
 * initial frame state for interrupts and exceptions
 */
	.macro _frame ref
	CFI_STARTPROC simple
617
	CFI_SIGNAL_FRAME
618 619 620 621 622 623 624 625 626 627 628 629 630 631
	CFI_DEF_CFA rsp,SS+8-\ref
	/*CFI_REL_OFFSET ss,SS-\ref*/
	CFI_REL_OFFSET rsp,RSP-\ref
	/*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
	/*CFI_REL_OFFSET cs,CS-\ref*/
	CFI_REL_OFFSET rip,RIP-\ref
	.endm

/* initial frame state for interrupts (and exceptions without error code) */
#define INTR_FRAME _frame RIP
/* initial frame state for exceptions with error code (and interrupts with
   vector already pushed) */
#define XCPT_FRAME _frame ORIG_RAX

L
Linus Torvalds 已提交
632 633 634 635 636 637 638 639 640 641 642 643 644
/* 
 * Interrupt entry/exit.
 *
 * Interrupt entry points save only callee clobbered registers in fast path.
 *	
 * Entry runs with interrupts off.	
 */ 

/* 0(%rsp): interrupt number */ 
	.macro interrupt func
	cld
	SAVE_ARGS
	leaq -ARGOFFSET(%rsp),%rdi	# arg1 for handler
645
	pushq %rbp
646 647 648 649 650 651 652
	/*
	 * Save rbp twice: One is for marking the stack frame, as usual, and the
	 * other, to fill pt_regs properly. This is because bx comes right
	 * before the last saved register in that structure, and not bp. If the
	 * base pointer were in the place bx is today, this would not be needed.
	 */
	movq %rbp, -8(%rsp)
653 654 655 656
	CFI_ADJUST_CFA_OFFSET	8
	CFI_REL_OFFSET		rbp, 0
	movq %rsp,%rbp
	CFI_DEF_CFA_REGISTER	rbp
L
Linus Torvalds 已提交
657 658
	testl $3,CS(%rdi)
	je 1f
659
	SWAPGS
660 661 662 663 664 665
	/* irqcount is used to check if a CPU is already on an interrupt
	   stack or not. While this is essentially redundant with preempt_count
	   it is a little cheaper to use a separate counter in the PDA
	   (short of moving irq_enter into assembly, which would be too
	    much work) */
1:	incl	%gs:pda_irqcount
666
	cmoveq %gs:pda_irqstackptr,%rsp
667
	push    %rbp			# backlink for old unwinder
668 669 670 671
	/*
	 * We entered an interrupt context - irqs are off:
	 */
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
672 673 674 675
	call \func
	.endm

ENTRY(common_interrupt)
676
	XCPT_FRAME
L
Linus Torvalds 已提交
677 678
	interrupt do_IRQ
	/* 0(%rsp): oldrsp-ARGOFFSET */
679
ret_from_intr:
680
	DISABLE_INTERRUPTS(CLBR_NONE)
681
	TRACE_IRQS_OFF
682
	decl %gs:pda_irqcount
683
	leaveq
684
	CFI_DEF_CFA_REGISTER	rsp
685
	CFI_ADJUST_CFA_OFFSET	-8
686
exit_intr:
L
Linus Torvalds 已提交
687 688 689 690 691 692 693 694 695 696 697
	GET_THREAD_INFO(%rcx)
	testl $3,CS-ARGOFFSET(%rsp)
	je retint_kernel
	
	/* Interrupt came from user space */
	/*
	 * Has a correct top of stack, but a partial stack frame
	 * %rcx: thread info. Interrupts off.
	 */		
retint_with_reschedule:
	movl $_TIF_WORK_MASK,%edi
698
retint_check:
699
	LOCKDEP_SYS_EXIT_IRQ
G
Glauber Costa 已提交
700
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
701
	andl %edi,%edx
702
	CFI_REMEMBER_STATE
L
Linus Torvalds 已提交
703
	jnz  retint_careful
704 705

retint_swapgs:		/* return to user-space */
706 707 708
	/*
	 * The iretq could re-enable interrupts:
	 */
709
	DISABLE_INTERRUPTS(CLBR_ANY)
710
	TRACE_IRQS_IRETQ
711
	SWAPGS
712 713
	jmp restore_args

714
retint_restore_args:	/* return to kernel space */
715
	DISABLE_INTERRUPTS(CLBR_ANY)
716 717 718 719 720
	/*
	 * The iretq could re-enable interrupts:
	 */
	TRACE_IRQS_IRETQ
restore_args:
I
Ingo Molnar 已提交
721 722
	RESTORE_ARGS 0,8,0

A
Adrian Bunk 已提交
723
irq_return:
724
	INTERRUPT_RETURN
I
Ingo Molnar 已提交
725 726 727 728 729 730

	.section __ex_table, "a"
	.quad irq_return, bad_iret
	.previous

#ifdef CONFIG_PARAVIRT
731
ENTRY(native_iret)
L
Linus Torvalds 已提交
732 733 734
	iretq

	.section __ex_table,"a"
735
	.quad native_iret, bad_iret
L
Linus Torvalds 已提交
736
	.previous
I
Ingo Molnar 已提交
737 738
#endif

L
Linus Torvalds 已提交
739 740
	.section .fixup,"ax"
bad_iret:
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
	/*
	 * The iret traps when the %cs or %ss being restored is bogus.
	 * We've lost the original trap vector and error code.
	 * #GPF is the most likely one to get for an invalid selector.
	 * So pretend we completed the iret and took the #GPF in user mode.
	 *
	 * We are now running with the kernel GS after exception recovery.
	 * But error_entry expects us to have user GS to match the user %cs,
	 * so swap back.
	 */
	pushq $0

	SWAPGS
	jmp general_protection

756 757
	.previous

758
	/* edi: workmask, edx: work */
L
Linus Torvalds 已提交
759
retint_careful:
760
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
761 762
	bt    $TIF_NEED_RESCHED,%edx
	jnc   retint_signal
763
	TRACE_IRQS_ON
764
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
765
	pushq %rdi
766
	CFI_ADJUST_CFA_OFFSET	8
L
Linus Torvalds 已提交
767 768
	call  schedule
	popq %rdi		
769
	CFI_ADJUST_CFA_OFFSET	-8
L
Linus Torvalds 已提交
770
	GET_THREAD_INFO(%rcx)
771
	DISABLE_INTERRUPTS(CLBR_NONE)
772
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
773 774 775
	jmp retint_check
	
retint_signal:
P
Peter Zijlstra 已提交
776
	testl $_TIF_DO_NOTIFY_MASK,%edx
777
	jz    retint_swapgs
778
	TRACE_IRQS_ON
779
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
780 781
	SAVE_REST
	movq $-1,ORIG_RAX(%rsp) 			
782
	xorl %esi,%esi		# oldset
L
Linus Torvalds 已提交
783 784 785
	movq %rsp,%rdi		# &pt_regs
	call do_notify_resume
	RESTORE_REST
786
	DISABLE_INTERRUPTS(CLBR_NONE)
787
	TRACE_IRQS_OFF
788
	GET_THREAD_INFO(%rcx)
R
Roland McGrath 已提交
789
	jmp retint_with_reschedule
L
Linus Torvalds 已提交
790 791 792 793

#ifdef CONFIG_PREEMPT
	/* Returning to kernel space. Check if we need preemption */
	/* rcx:	 threadinfo. interrupts off. */
794
ENTRY(retint_kernel)
G
Glauber Costa 已提交
795
	cmpl $0,TI_preempt_count(%rcx)
L
Linus Torvalds 已提交
796
	jnz  retint_restore_args
G
Glauber Costa 已提交
797
	bt  $TIF_NEED_RESCHED,TI_flags(%rcx)
L
Linus Torvalds 已提交
798 799 800 801 802 803
	jnc  retint_restore_args
	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
	jnc  retint_restore_args
	call preempt_schedule_irq
	jmp exit_intr
#endif	
804

L
Linus Torvalds 已提交
805
	CFI_ENDPROC
806
END(common_interrupt)
L
Linus Torvalds 已提交
807 808 809 810 811
	
/*
 * APIC interrupts.
 */		
	.macro apicinterrupt num,func
812
	INTR_FRAME
813
	pushq $~(\num)
814
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
815 816 817 818 819 820 821
	interrupt \func
	jmp ret_from_intr
	CFI_ENDPROC
	.endm

ENTRY(thermal_interrupt)
	apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
822
END(thermal_interrupt)
L
Linus Torvalds 已提交
823

824 825
ENTRY(threshold_interrupt)
	apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
826
END(threshold_interrupt)
827

L
Linus Torvalds 已提交
828 829 830
#ifdef CONFIG_SMP	
ENTRY(reschedule_interrupt)
	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
831
END(reschedule_interrupt)
L
Linus Torvalds 已提交
832

833 834 835
	.macro INVALIDATE_ENTRY num
ENTRY(invalidate_interrupt\num)
	apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt	
836
END(invalidate_interrupt\num)
837 838 839 840 841 842 843 844 845 846
	.endm

	INVALIDATE_ENTRY 0
	INVALIDATE_ENTRY 1
	INVALIDATE_ENTRY 2
	INVALIDATE_ENTRY 3
	INVALIDATE_ENTRY 4
	INVALIDATE_ENTRY 5
	INVALIDATE_ENTRY 6
	INVALIDATE_ENTRY 7
L
Linus Torvalds 已提交
847 848 849

ENTRY(call_function_interrupt)
	apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
850
END(call_function_interrupt)
851 852 853
ENTRY(call_function_single_interrupt)
	apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
END(call_function_single_interrupt)
854 855 856
ENTRY(irq_move_cleanup_interrupt)
	apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
END(irq_move_cleanup_interrupt)
L
Linus Torvalds 已提交
857 858 859 860
#endif

ENTRY(apic_timer_interrupt)
	apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
861
END(apic_timer_interrupt)
L
Linus Torvalds 已提交
862

863 864 865 866
ENTRY(uv_bau_message_intr1)
	apicinterrupt 220,uv_bau_message_interrupt
END(uv_bau_message_intr1)

L
Linus Torvalds 已提交
867 868
ENTRY(error_interrupt)
	apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
869
END(error_interrupt)
L
Linus Torvalds 已提交
870 871 872

ENTRY(spurious_interrupt)
	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
873
END(spurious_interrupt)
L
Linus Torvalds 已提交
874 875 876 877 878
				
/*
 * Exception entry points.
 */ 		
	.macro zeroentry sym
879
	INTR_FRAME
880
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
881
	pushq $0	/* push error code/oldrax */ 
882
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
883
	pushq %rax	/* push real oldrax to the rdi slot */ 
884
	CFI_ADJUST_CFA_OFFSET 8
885
	CFI_REL_OFFSET rax,0
L
Linus Torvalds 已提交
886 887
	leaq  \sym(%rip),%rax
	jmp error_entry
888
	CFI_ENDPROC
L
Linus Torvalds 已提交
889 890 891
	.endm	

	.macro errorentry sym
892
	XCPT_FRAME
893
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
894
	pushq %rax
895
	CFI_ADJUST_CFA_OFFSET 8
896
	CFI_REL_OFFSET rax,0
L
Linus Torvalds 已提交
897 898
	leaq  \sym(%rip),%rax
	jmp error_entry
899
	CFI_ENDPROC
L
Linus Torvalds 已提交
900 901 902 903
	.endm

	/* error code is on the stack already */
	/* handle NMI like exceptions that can happen everywhere */
904
	.macro paranoidentry sym, ist=0, irqtrace=1
L
Linus Torvalds 已提交
905 906 907 908 909 910 911
	SAVE_ALL
	cld
	movl $1,%ebx
	movl  $MSR_GS_BASE,%ecx
	rdmsr
	testl %edx,%edx
	js    1f
912
	SWAPGS
L
Linus Torvalds 已提交
913
	xorl  %ebx,%ebx
914 915 916 917
1:
	.if \ist
	movq	%gs:pda_data_offset, %rbp
	.endif
918 919 920
	.if \irqtrace
	TRACE_IRQS_OFF
	.endif
921
	movq %rsp,%rdi
L
Linus Torvalds 已提交
922 923
	movq ORIG_RAX(%rsp),%rsi
	movq $-1,ORIG_RAX(%rsp)
924
	.if \ist
925
	subq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
926
	.endif
L
Linus Torvalds 已提交
927
	call \sym
928
	.if \ist
929
	addq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
930
	.endif
931
	DISABLE_INTERRUPTS(CLBR_NONE)
932 933 934
	.if \irqtrace
	TRACE_IRQS_OFF
	.endif
L
Linus Torvalds 已提交
935
	.endm
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956

	/*
 	 * "Paranoid" exit path from exception stack.
  	 * Paranoid because this is used by NMIs and cannot take
	 * any kernel state for granted.
	 * We don't do kernel preemption checks here, because only
	 * NMI should be common and it does not enable IRQs and
	 * cannot get reschedule ticks.
	 *
	 * "trace" is 0 for the NMI handler only, because irq-tracing
	 * is fundamentally NMI-unsafe. (we cannot change the soft and
	 * hard flags at once, atomically)
	 */
	.macro paranoidexit trace=1
	/* ebx:	no swapgs flag */
paranoid_exit\trace:
	testl %ebx,%ebx				/* swapgs needed? */
	jnz paranoid_restore\trace
	testl $3,CS(%rsp)
	jnz   paranoid_userspace\trace
paranoid_swapgs\trace:
957
	.if \trace
958
	TRACE_IRQS_IRETQ 0
959
	.endif
960
	SWAPGS_UNSAFE_STACK
961 962
paranoid_restore\trace:
	RESTORE_ALL 8
I
Ingo Molnar 已提交
963
	jmp irq_return
964 965
paranoid_userspace\trace:
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
966
	movl TI_flags(%rcx),%ebx
967 968 969 970 971 972 973 974 975 976 977
	andl $_TIF_WORK_MASK,%ebx
	jz paranoid_swapgs\trace
	movq %rsp,%rdi			/* &pt_regs */
	call sync_regs
	movq %rax,%rsp			/* switch stack for scheduling */
	testl $_TIF_NEED_RESCHED,%ebx
	jnz paranoid_schedule\trace
	movl %ebx,%edx			/* arg3: thread flags */
	.if \trace
	TRACE_IRQS_ON
	.endif
978
	ENABLE_INTERRUPTS(CLBR_NONE)
979 980 981
	xorl %esi,%esi 			/* arg2: oldset */
	movq %rsp,%rdi 			/* arg1: &pt_regs */
	call do_notify_resume
982
	DISABLE_INTERRUPTS(CLBR_NONE)
983 984 985 986 987 988 989 990
	.if \trace
	TRACE_IRQS_OFF
	.endif
	jmp paranoid_userspace\trace
paranoid_schedule\trace:
	.if \trace
	TRACE_IRQS_ON
	.endif
991
	ENABLE_INTERRUPTS(CLBR_ANY)
992
	call schedule
993
	DISABLE_INTERRUPTS(CLBR_ANY)
994 995 996 997 998 999 1000
	.if \trace
	TRACE_IRQS_OFF
	.endif
	jmp paranoid_userspace\trace
	CFI_ENDPROC
	.endm

L
Linus Torvalds 已提交
1001 1002 1003 1004
/*
 * Exception entry point. This expects an error code/orig_rax on the stack
 * and the exception handler in %rax.	
 */ 		  				
1005
KPROBE_ENTRY(error_entry)
1006
	_frame RDI
1007
	CFI_REL_OFFSET rax,0
L
Linus Torvalds 已提交
1008 1009 1010 1011 1012 1013 1014
	/* rdi slot contains rax, oldrax contains error code */
	cld	
	subq  $14*8,%rsp
	CFI_ADJUST_CFA_OFFSET	(14*8)
	movq %rsi,13*8(%rsp)
	CFI_REL_OFFSET	rsi,RSI
	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */
1015
	CFI_REGISTER	rax,rsi
L
Linus Torvalds 已提交
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
	movq %rdx,12*8(%rsp)
	CFI_REL_OFFSET	rdx,RDX
	movq %rcx,11*8(%rsp)
	CFI_REL_OFFSET	rcx,RCX
	movq %rsi,10*8(%rsp)	/* store rax */ 
	CFI_REL_OFFSET	rax,RAX
	movq %r8, 9*8(%rsp)
	CFI_REL_OFFSET	r8,R8
	movq %r9, 8*8(%rsp)
	CFI_REL_OFFSET	r9,R9
	movq %r10,7*8(%rsp)
	CFI_REL_OFFSET	r10,R10
	movq %r11,6*8(%rsp)
	CFI_REL_OFFSET	r11,R11
	movq %rbx,5*8(%rsp) 
	CFI_REL_OFFSET	rbx,RBX
	movq %rbp,4*8(%rsp) 
	CFI_REL_OFFSET	rbp,RBP
	movq %r12,3*8(%rsp) 
	CFI_REL_OFFSET	r12,R12
	movq %r13,2*8(%rsp) 
	CFI_REL_OFFSET	r13,R13
	movq %r14,1*8(%rsp) 
	CFI_REL_OFFSET	r14,R14
	movq %r15,(%rsp) 
	CFI_REL_OFFSET	r15,R15
	xorl %ebx,%ebx	
	testl $3,CS(%rsp)
	je  error_kernelspace
error_swapgs:	
1046
	SWAPGS
1047 1048
error_sti:
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
1049
	movq %rdi,RDI(%rsp) 	
1050
	CFI_REL_OFFSET	rdi,RDI
L
Linus Torvalds 已提交
1051 1052 1053 1054
	movq %rsp,%rdi
	movq ORIG_RAX(%rsp),%rsi	/* get error code */ 
	movq $-1,ORIG_RAX(%rsp)
	call *%rax
1055 1056 1057
	/* ebx:	no swapgs flag (1: don't need swapgs, 0: need it) */
error_exit:
	movl %ebx,%eax
L
Linus Torvalds 已提交
1058
	RESTORE_REST
1059
	DISABLE_INTERRUPTS(CLBR_NONE)
1060
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
1061 1062 1063
	GET_THREAD_INFO(%rcx)	
	testl %eax,%eax
	jne  retint_kernel
1064
	LOCKDEP_SYS_EXIT_IRQ
G
Glauber Costa 已提交
1065
	movl  TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
1066 1067 1068
	movl  $_TIF_WORK_MASK,%edi
	andl  %edi,%edx
	jnz  retint_careful
1069
	jmp retint_swapgs
L
Linus Torvalds 已提交
1070 1071 1072 1073 1074 1075 1076 1077 1078
	CFI_ENDPROC

error_kernelspace:
	incl %ebx
       /* There are two places in the kernel that can potentially fault with
          usergs. Handle them here. The exception handlers after
	   iret run with kernel gs again, so don't set the user space flag.
	   B stepping K8s sometimes report an truncated RIP for IRET 
	   exceptions returning to compat mode. Check for these here too. */
1079 1080
	leaq irq_return(%rip),%rcx
	cmpq %rcx,RIP(%rsp)
L
Linus Torvalds 已提交
1081
	je   error_swapgs
1082 1083
	movl %ecx,%ecx	/* zero extend */
	cmpq %rcx,RIP(%rsp)
L
Linus Torvalds 已提交
1084 1085 1086 1087
	je   error_swapgs
	cmpq $gs_change,RIP(%rsp)
        je   error_swapgs
	jmp  error_sti
1088
KPROBE_END(error_entry)
L
Linus Torvalds 已提交
1089 1090 1091
	
       /* Reload gs selector with exception handling */
       /* edi:  new selector */ 
1092
ENTRY(native_load_gs_index)
1093
	CFI_STARTPROC
L
Linus Torvalds 已提交
1094
	pushf
1095
	CFI_ADJUST_CFA_OFFSET 8
1096 1097
	DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
        SWAPGS
L
Linus Torvalds 已提交
1098 1099 1100
gs_change:     
        movl %edi,%gs   
2:	mfence		/* workaround */
1101
	SWAPGS
L
Linus Torvalds 已提交
1102
        popf
1103
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
1104
        ret
1105
	CFI_ENDPROC
1106
ENDPROC(native_load_gs_index)
L
Linus Torvalds 已提交
1107 1108 1109 1110 1111 1112 1113 1114
       
        .section __ex_table,"a"
        .align 8
        .quad gs_change,bad_gs
        .previous
        .section .fixup,"ax"
	/* running with kernelgs */
bad_gs: 
1115
	SWAPGS			/* switch back to user gs */
L
Linus Torvalds 已提交
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
	xorl %eax,%eax
        movl %eax,%gs
        jmp  2b
        .previous       
	
/*
 * Create a kernel thread.
 *
 * C extern interface:
 *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 *
 * asm input arguments:
 *	rdi: fn, rsi: arg, rdx: flags
 */
ENTRY(kernel_thread)
	CFI_STARTPROC
	FAKE_STACK_FRAME $child_rip
	SAVE_ALL

	# rdi: flags, rsi: usp, rdx: will be &pt_regs
	movq %rdx,%rdi
	orq  kernel_thread_flags(%rip),%rdi
	movq $-1, %rsi
	movq %rsp, %rdx

	xorl %r8d,%r8d
	xorl %r9d,%r9d
	
	# clone now
	call do_fork
	movq %rax,RAX(%rsp)
	xorl %edi,%edi

	/*
	 * It isn't worth to check for reschedule here,
	 * so internally to the x86_64 port you can rely on kernel_thread()
	 * not to reschedule the child before returning, this avoids the need
	 * of hacks for example to fork off the per-CPU idle tasks.
         * [Hopefully no generic code relies on the reschedule -AK]	
	 */
	RESTORE_ALL
	UNFAKE_STACK_FRAME
	ret
	CFI_ENDPROC
1160
ENDPROC(kernel_thread)
L
Linus Torvalds 已提交
1161 1162
	
child_rip:
1163 1164
	pushq $0		# fake return address
	CFI_STARTPROC
L
Linus Torvalds 已提交
1165 1166 1167 1168 1169 1170 1171 1172
	/*
	 * Here we are in the child and the registers are set as they were
	 * at kernel_thread() invocation in the parent.
	 */
	movq %rdi, %rax
	movq %rsi, %rdi
	call *%rax
	# exit
1173
	mov %eax, %edi
L
Linus Torvalds 已提交
1174
	call do_exit
1175
	CFI_ENDPROC
1176
ENDPROC(child_rip)
L
Linus Torvalds 已提交
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187

/*
 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
 *
 * C extern interface:
 *	 extern long execve(char *name, char **argv, char **envp)
 *
 * asm input arguments:
 *	rdi: name, rsi: argv, rdx: envp
 *
 * We want to fallback into:
1188
 *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
L
Linus Torvalds 已提交
1189 1190
 *
 * do_sys_execve asm fallback arguments:
1191
 *	rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
L
Linus Torvalds 已提交
1192
 */
1193
ENTRY(kernel_execve)
L
Linus Torvalds 已提交
1194 1195 1196
	CFI_STARTPROC
	FAKE_STACK_FRAME $0
	SAVE_ALL	
1197
	movq %rsp,%rcx
L
Linus Torvalds 已提交
1198 1199 1200 1201 1202 1203 1204 1205 1206
	call sys_execve
	movq %rax, RAX(%rsp)	
	RESTORE_REST
	testq %rax,%rax
	je int_ret_from_sys_call
	RESTORE_ARGS
	UNFAKE_STACK_FRAME
	ret
	CFI_ENDPROC
1207
ENDPROC(kernel_execve)
L
Linus Torvalds 已提交
1208

1209
KPROBE_ENTRY(page_fault)
L
Linus Torvalds 已提交
1210
	errorentry do_page_fault
1211
KPROBE_END(page_fault)
L
Linus Torvalds 已提交
1212 1213 1214

ENTRY(coprocessor_error)
	zeroentry do_coprocessor_error
1215
END(coprocessor_error)
L
Linus Torvalds 已提交
1216 1217 1218

ENTRY(simd_coprocessor_error)
	zeroentry do_simd_coprocessor_error	
1219
END(simd_coprocessor_error)
L
Linus Torvalds 已提交
1220 1221

ENTRY(device_not_available)
1222
	zeroentry do_device_not_available
1223
END(device_not_available)
L
Linus Torvalds 已提交
1224 1225

	/* runs on exception stack */
1226
KPROBE_ENTRY(debug)
1227
 	INTR_FRAME
1228
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1229 1230
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8		
1231
	paranoidentry do_debug, DEBUG_STACK
1232
	paranoidexit
1233
KPROBE_END(debug)
L
Linus Torvalds 已提交
1234 1235

	/* runs on exception stack */	
1236
KPROBE_ENTRY(nmi)
1237
	INTR_FRAME
1238
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1239
	pushq $-1
1240
	CFI_ADJUST_CFA_OFFSET 8
1241 1242 1243 1244 1245 1246 1247
	paranoidentry do_nmi, 0, 0
#ifdef CONFIG_TRACE_IRQFLAGS
	paranoidexit 0
#else
	jmp paranoid_exit1
 	CFI_ENDPROC
#endif
1248
KPROBE_END(nmi)
1249

1250
KPROBE_ENTRY(int3)
1251
 	INTR_FRAME
1252
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1253 1254
 	pushq $0
 	CFI_ADJUST_CFA_OFFSET 8
1255
 	paranoidentry do_int3, DEBUG_STACK
1256
 	jmp paranoid_exit1
1257
 	CFI_ENDPROC
1258
KPROBE_END(int3)
L
Linus Torvalds 已提交
1259 1260 1261

ENTRY(overflow)
	zeroentry do_overflow
1262
END(overflow)
L
Linus Torvalds 已提交
1263 1264 1265

ENTRY(bounds)
	zeroentry do_bounds
1266
END(bounds)
L
Linus Torvalds 已提交
1267 1268 1269

ENTRY(invalid_op)
	zeroentry do_invalid_op	
1270
END(invalid_op)
L
Linus Torvalds 已提交
1271 1272 1273

ENTRY(coprocessor_segment_overrun)
	zeroentry do_coprocessor_segment_overrun
1274
END(coprocessor_segment_overrun)
L
Linus Torvalds 已提交
1275 1276 1277

	/* runs on exception stack */
ENTRY(double_fault)
1278
	XCPT_FRAME
1279
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1280
	paranoidentry do_double_fault
1281
	jmp paranoid_exit1
L
Linus Torvalds 已提交
1282
	CFI_ENDPROC
1283
END(double_fault)
L
Linus Torvalds 已提交
1284 1285 1286

ENTRY(invalid_TSS)
	errorentry do_invalid_TSS
1287
END(invalid_TSS)
L
Linus Torvalds 已提交
1288 1289 1290

ENTRY(segment_not_present)
	errorentry do_segment_not_present
1291
END(segment_not_present)
L
Linus Torvalds 已提交
1292 1293 1294

	/* runs on exception stack */
ENTRY(stack_segment)
1295
	XCPT_FRAME
1296
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1297
	paranoidentry do_stack_segment
1298
	jmp paranoid_exit1
L
Linus Torvalds 已提交
1299
	CFI_ENDPROC
1300
END(stack_segment)
L
Linus Torvalds 已提交
1301

1302
KPROBE_ENTRY(general_protection)
L
Linus Torvalds 已提交
1303
	errorentry do_general_protection
1304
KPROBE_END(general_protection)
L
Linus Torvalds 已提交
1305 1306 1307

ENTRY(alignment_check)
	errorentry do_alignment_check
1308
END(alignment_check)
L
Linus Torvalds 已提交
1309 1310 1311

ENTRY(divide_error)
	zeroentry do_divide_error
1312
END(divide_error)
L
Linus Torvalds 已提交
1313 1314 1315

ENTRY(spurious_interrupt_bug)
	zeroentry do_spurious_interrupt_bug
1316
END(spurious_interrupt_bug)
L
Linus Torvalds 已提交
1317 1318 1319 1320

#ifdef CONFIG_X86_MCE
	/* runs on exception stack */
ENTRY(machine_check)
1321
	INTR_FRAME
1322
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1323 1324 1325
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8	
	paranoidentry do_machine_check
1326
	jmp paranoid_exit1
L
Linus Torvalds 已提交
1327
	CFI_ENDPROC
1328
END(machine_check)
L
Linus Torvalds 已提交
1329 1330
#endif

1331
/* Call softirq on interrupt stack. Interrupts are off. */
1332
ENTRY(call_softirq)
1333
	CFI_STARTPROC
1334 1335 1336 1337 1338
	push %rbp
	CFI_ADJUST_CFA_OFFSET	8
	CFI_REL_OFFSET rbp,0
	mov  %rsp,%rbp
	CFI_DEF_CFA_REGISTER rbp
1339
	incl %gs:pda_irqcount
1340 1341
	cmove %gs:pda_irqstackptr,%rsp
	push  %rbp			# backlink for old unwinder
1342
	call __do_softirq
1343
	leaveq
1344
	CFI_DEF_CFA_REGISTER	rsp
1345
	CFI_ADJUST_CFA_OFFSET   -8
1346 1347
	decl %gs:pda_irqcount
	ret
1348
	CFI_ENDPROC
1349
ENDPROC(call_softirq)
1350 1351 1352 1353 1354 1355 1356

KPROBE_ENTRY(ignore_sysret)
	CFI_STARTPROC
	mov $-ENOSYS,%eax
	sysret
	CFI_ENDPROC
ENDPROC(ignore_sysret)
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409

#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
	zeroentry xen_do_hypervisor_callback
END(xen_hypervisor_callback)

/*
# A note on the "critical region" in our callback handler.
# We want to avoid stacking callback handlers due to events occurring
# during handling of the last event. To do this, we keep events disabled
# until we've done all processing. HOWEVER, we must enable events before
# popping the stack frame (can't be done atomically) and so it would still
# be possible to get enough handler activations to overflow the stack.
# Although unlikely, bugs of that kind are hard to track down, so we'd
# like to avoid the possibility.
# So, on entry to the handler we detect whether we interrupted an
# existing activation in its critical region -- if so, we pop the current
# activation and restart the handler using the previous one.
*/
ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
	CFI_STARTPROC
/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
   see the correct pointer to the pt_regs */
	movq %rdi, %rsp            # we don't return, adjust the stack frame
	CFI_ENDPROC
	CFI_DEFAULT_STACK
11:	incl %gs:pda_irqcount
	movq %rsp,%rbp
	CFI_DEF_CFA_REGISTER rbp
	cmovzq %gs:pda_irqstackptr,%rsp
	pushq %rbp			# backlink for old unwinder
	call xen_evtchn_do_upcall
	popq %rsp
	CFI_DEF_CFA_REGISTER rsp
	decl %gs:pda_irqcount
	jmp  error_exit
	CFI_ENDPROC
END(do_hypervisor_callback)

/*
# Hypervisor uses this for application faults while it executes.
# We get here for two reasons:
#  1. Fault while reloading DS, ES, FS or GS
#  2. Fault while executing IRET
# Category 1 we do not need to fix up as Xen has already reloaded all segment
# registers that could be reloaded and zeroed the others.
# Category 2 we fix up by killing the current process. We cannot use the
# normal Linux return path in this case because if we use the IRET hypercall
# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
# We distinguish between categories by comparing each saved segment register
# with its current contents: any discrepancy means we in category 1.
*/
ENTRY(xen_failsafe_callback)
1410 1411
	framesz = (RIP-0x30)	/* workaround buggy gas */
	_frame framesz
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
	CFI_REL_OFFSET rcx, 0
	CFI_REL_OFFSET r11, 8
	movw %ds,%cx
	cmpw %cx,0x10(%rsp)
	CFI_REMEMBER_STATE
	jne 1f
	movw %es,%cx
	cmpw %cx,0x18(%rsp)
	jne 1f
	movw %fs,%cx
	cmpw %cx,0x20(%rsp)
	jne 1f
	movw %gs,%cx
	cmpw %cx,0x28(%rsp)
	jne 1f
	/* All segments match their saved values => Category 2 (Bad IRET). */
	movq (%rsp),%rcx
	CFI_RESTORE rcx
	movq 8(%rsp),%r11
	CFI_RESTORE r11
	addq $0x30,%rsp
	CFI_ADJUST_CFA_OFFSET -0x30
1434 1435 1436 1437 1438 1439 1440
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8
	pushq %r11
	CFI_ADJUST_CFA_OFFSET 8
	pushq %rcx
	CFI_ADJUST_CFA_OFFSET 8
	jmp general_protection
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
	CFI_RESTORE_STATE
1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
	movq (%rsp),%rcx
	CFI_RESTORE rcx
	movq 8(%rsp),%r11
	CFI_RESTORE r11
	addq $0x30,%rsp
	CFI_ADJUST_CFA_OFFSET -0x30
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8
	SAVE_ALL
	jmp error_exit
	CFI_ENDPROC
END(xen_failsafe_callback)

#endif /* CONFIG_XEN */