entry_64.S 34.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 *  linux/arch/x86_64/entry.S
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
 */

/*
 * entry.S contains the system-call and fault low-level handling routines.
 *
 * NOTE: This code handles signal-recognition, which happens every time
 * after an interrupt and after each system call.
 * 
 * Normal syscalls and interrupts don't save a full stack frame, this is 
 * only done for syscall tracing, signals or fork/exec et.al.
 * 
 * A note on terminology:	 
 * - top of stack: Architecture defined interrupt frame from SS to RIP 
 * at the top of the kernel process stack.	
 * - partial stack frame: partially saved registers upto R11.
 * - full stack frame: Like partial stack frame, but all register saved. 
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 *
 * Some macro usage:
 * - CFI macros are used to generate dwarf2 unwind information for better
 * backtraces. They don't change any code.
 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
 * There are unfortunately lots of special cases where some registers
 * not touched. The macro is a big mess that should be cleaned up.
 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
 * Gives a full stack frame.
 * - ENTRY/END Define functions in the symbol table.
 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
 * frame that is otherwise undefined after a SYSCALL
 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45
 */

#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/dwarf2.h>
#include <asm/calling.h>
46
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
47 48 49 50
#include <asm/msr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/hw_irq.h>
51
#include <asm/page.h>
52
#include <asm/irqflags.h>
53
#include <asm/paravirt.h>
54
#include <asm/ftrace.h>
L
Linus Torvalds 已提交
55

R
Roland McGrath 已提交
56 57 58 59 60 61
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
#include <linux/elf-em.h>
#define AUDIT_ARCH_X86_64	(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define __AUDIT_ARCH_64BIT 0x80000000
#define __AUDIT_ARCH_LE	   0x40000000

L
Linus Torvalds 已提交
62 63
	.code64

64
#ifdef CONFIG_FTRACE
65 66 67 68 69 70 71 72 73 74 75 76 77
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)

	subq $0x38, %rsp
	movq %rax, (%rsp)
	movq %rcx, 8(%rsp)
	movq %rdx, 16(%rsp)
	movq %rsi, 24(%rsp)
	movq %rdi, 32(%rsp)
	movq %r8, 40(%rsp)
	movq %r9, 48(%rsp)

	movq 0x38(%rsp), %rdi
78
	subq $MCOUNT_INSN_SIZE, %rdi
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

.globl mcount_call
mcount_call:
	call ftrace_stub

	movq 48(%rsp), %r9
	movq 40(%rsp), %r8
	movq 32(%rsp), %rdi
	movq 24(%rsp), %rsi
	movq 16(%rsp), %rdx
	movq 8(%rsp), %rcx
	movq (%rsp), %rax
	addq $0x38, %rsp

	retq
END(mcount)

ENTRY(ftrace_caller)

	/* taken from glibc */
	subq $0x38, %rsp
	movq %rax, (%rsp)
	movq %rcx, 8(%rsp)
	movq %rdx, 16(%rsp)
	movq %rsi, 24(%rsp)
	movq %rdi, 32(%rsp)
	movq %r8, 40(%rsp)
	movq %r9, 48(%rsp)

	movq 0x38(%rsp), %rdi
	movq 8(%rbp), %rsi
110
	subq $MCOUNT_INSN_SIZE, %rdi
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130

.globl ftrace_call
ftrace_call:
	call ftrace_stub

	movq 48(%rsp), %r9
	movq 40(%rsp), %r8
	movq 32(%rsp), %rdi
	movq 24(%rsp), %rsi
	movq 16(%rsp), %rdx
	movq 8(%rsp), %rcx
	movq (%rsp), %rax
	addq $0x38, %rsp

.globl ftrace_stub
ftrace_stub:
	retq
END(ftrace_caller)

#else /* ! CONFIG_DYNAMIC_FTRACE */
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
ENTRY(mcount)
	cmpq $ftrace_stub, ftrace_trace_function
	jnz trace
.globl ftrace_stub
ftrace_stub:
	retq

trace:
	/* taken from glibc */
	subq $0x38, %rsp
	movq %rax, (%rsp)
	movq %rcx, 8(%rsp)
	movq %rdx, 16(%rsp)
	movq %rsi, 24(%rsp)
	movq %rdi, 32(%rsp)
	movq %r8, 40(%rsp)
	movq %r9, 48(%rsp)

	movq 0x38(%rsp), %rdi
	movq 8(%rbp), %rsi
151
	subq $MCOUNT_INSN_SIZE, %rdi
152 153 154 155 156 157 158 159 160 161 162 163 164 165

	call   *ftrace_trace_function

	movq 48(%rsp), %r9
	movq 40(%rsp), %r8
	movq 32(%rsp), %rdi
	movq 24(%rsp), %rsi
	movq 16(%rsp), %rdx
	movq 8(%rsp), %rcx
	movq (%rsp), %rax
	addq $0x38, %rsp

	jmp ftrace_stub
END(mcount)
166 167
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FTRACE */
168

169
#ifndef CONFIG_PREEMPT
L
Linus Torvalds 已提交
170 171
#define retint_kernel retint_restore_args
#endif	
172

173
#ifdef CONFIG_PARAVIRT
174
ENTRY(native_usergs_sysret64)
175 176 177 178
	swapgs
	sysretq
#endif /* CONFIG_PARAVIRT */

179 180 181 182 183 184 185 186 187 188

.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
	bt   $9,EFLAGS-\offset(%rsp)	/* interrupts off? */
	jnc  1f
	TRACE_IRQS_ON
1:
#endif
.endm

L
Linus Torvalds 已提交
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
/*
 * C code is not supposed to know about undefined top of stack. Every time 
 * a C function with an pt_regs argument is called from the SYSCALL based 
 * fast path FIXUP_TOP_OF_STACK is needed.
 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
 * manipulation.
 */        	
		
	/* %rsp:at FRAMEEND */ 
	.macro FIXUP_TOP_OF_STACK tmp
	movq	%gs:pda_oldrsp,\tmp
	movq  	\tmp,RSP(%rsp)
	movq    $__USER_DS,SS(%rsp)
	movq    $__USER_CS,CS(%rsp)
	movq 	$-1,RCX(%rsp)
	movq	R11(%rsp),\tmp  /* get eflags */
	movq	\tmp,EFLAGS(%rsp)
	.endm

	.macro RESTORE_TOP_OF_STACK tmp,offset=0
	movq   RSP-\offset(%rsp),\tmp
	movq   \tmp,%gs:pda_oldrsp
	movq   EFLAGS-\offset(%rsp),\tmp
	movq   \tmp,R11-\offset(%rsp)
	.endm

	.macro FAKE_STACK_FRAME child_rip
	/* push in order ss, rsp, eflags, cs, rip */
217
	xorl %eax, %eax
218
	pushq $__KERNEL_DS /* ss */
L
Linus Torvalds 已提交
219
	CFI_ADJUST_CFA_OFFSET	8
220
	/*CFI_REL_OFFSET	ss,0*/
L
Linus Torvalds 已提交
221 222
	pushq %rax /* rsp */
	CFI_ADJUST_CFA_OFFSET	8
223
	CFI_REL_OFFSET	rsp,0
L
Linus Torvalds 已提交
224 225
	pushq $(1<<9) /* eflags - interrupts on */
	CFI_ADJUST_CFA_OFFSET	8
226
	/*CFI_REL_OFFSET	rflags,0*/
L
Linus Torvalds 已提交
227 228
	pushq $__KERNEL_CS /* cs */
	CFI_ADJUST_CFA_OFFSET	8
229
	/*CFI_REL_OFFSET	cs,0*/
L
Linus Torvalds 已提交
230 231
	pushq \child_rip /* rip */
	CFI_ADJUST_CFA_OFFSET	8
232
	CFI_REL_OFFSET	rip,0
L
Linus Torvalds 已提交
233 234 235 236 237 238 239 240 241
	pushq	%rax /* orig rax */
	CFI_ADJUST_CFA_OFFSET	8
	.endm

	.macro UNFAKE_STACK_FRAME
	addq $8*6, %rsp
	CFI_ADJUST_CFA_OFFSET	-(6*8)
	.endm

242 243 244
	.macro	CFI_DEFAULT_STACK start=1
	.if \start
	CFI_STARTPROC	simple
245
	CFI_SIGNAL_FRAME
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	CFI_DEF_CFA	rsp,SS+8
	.else
	CFI_DEF_CFA_OFFSET SS+8
	.endif
	CFI_REL_OFFSET	r15,R15
	CFI_REL_OFFSET	r14,R14
	CFI_REL_OFFSET	r13,R13
	CFI_REL_OFFSET	r12,R12
	CFI_REL_OFFSET	rbp,RBP
	CFI_REL_OFFSET	rbx,RBX
	CFI_REL_OFFSET	r11,R11
	CFI_REL_OFFSET	r10,R10
	CFI_REL_OFFSET	r9,R9
	CFI_REL_OFFSET	r8,R8
	CFI_REL_OFFSET	rax,RAX
	CFI_REL_OFFSET	rcx,RCX
	CFI_REL_OFFSET	rdx,RDX
	CFI_REL_OFFSET	rsi,RSI
	CFI_REL_OFFSET	rdi,RDI
	CFI_REL_OFFSET	rip,RIP
	/*CFI_REL_OFFSET	cs,CS*/
	/*CFI_REL_OFFSET	rflags,EFLAGS*/
	CFI_REL_OFFSET	rsp,RSP
	/*CFI_REL_OFFSET	ss,SS*/
L
Linus Torvalds 已提交
270 271 272 273 274 275 276
	.endm
/*
 * A newly forked process directly context switches into this.
 */ 	
/* rdi:	prev */	
ENTRY(ret_from_fork)
	CFI_DEFAULT_STACK
277
	push kernel_eflags(%rip)
278
	CFI_ADJUST_CFA_OFFSET 8
279
	popf				# reset kernel eflags
280
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
281 282
	call schedule_tail
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
283
	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
L
Linus Torvalds 已提交
284 285 286 287 288
	jnz rff_trace
rff_action:	
	RESTORE_REST
	testl $3,CS-ARGOFFSET(%rsp)	# from kernel_thread?
	je   int_ret_from_sys_call
G
Glauber Costa 已提交
289
	testl $_TIF_IA32,TI_flags(%rcx)
L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298
	jnz  int_ret_from_sys_call
	RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
	jmp ret_from_sys_call
rff_trace:
	movq %rsp,%rdi
	call syscall_trace_leave
	GET_THREAD_INFO(%rcx)	
	jmp rff_action
	CFI_ENDPROC
299
END(ret_from_fork)
L
Linus Torvalds 已提交
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325

/*
 * System call entry. Upto 6 arguments in registers are supported.
 *
 * SYSCALL does not save anything on the stack and does not change the
 * stack pointer.
 */
		
/*
 * Register setup:	
 * rax  system call number
 * rdi  arg0
 * rcx  return address for syscall/sysret, C arg3 
 * rsi  arg1
 * rdx  arg2	
 * r10  arg3 	(--> moved to rcx for C)
 * r8   arg4
 * r9   arg5
 * r11  eflags for syscall/sysret, temporary for C
 * r12-r15,rbp,rbx saved by C code, not touched. 		
 * 
 * Interrupts are off on entry.
 * Only called from user space.
 *
 * XXX	if we had a free scratch register we could save the RSP into the stack frame
 *      and report it properly in ps. Unfortunately we haven't.
326 327 328 329
 *
 * When user can change the frames always force IRET. That is because
 * it deals with uncanonical addresses better. SYSRET has trouble
 * with them due to bugs in both AMD and Intel CPUs.
L
Linus Torvalds 已提交
330 331 332
 */ 			 		

ENTRY(system_call)
333
	CFI_STARTPROC	simple
334
	CFI_SIGNAL_FRAME
335
	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
336 337
	CFI_REGISTER	rip,rcx
	/*CFI_REGISTER	rflags,r11*/
338 339 340 341 342 343 344 345
	SWAPGS_UNSAFE_STACK
	/*
	 * A hypervisor implementation might want to use a label
	 * after the swapgs, so that it can do the swapgs
	 * for the guest and jump here on syscall.
	 */
ENTRY(system_call_after_swapgs)

L
Linus Torvalds 已提交
346 347
	movq	%rsp,%gs:pda_oldrsp 
	movq	%gs:pda_kernelstack,%rsp
348 349 350 351
	/*
	 * No need to follow this irqs off/on section - it's straight
	 * and short:
	 */
352
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
353 354
	SAVE_ARGS 8,1
	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
355 356
	movq  %rcx,RIP-ARGOFFSET(%rsp)
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
L
Linus Torvalds 已提交
357
	GET_THREAD_INFO(%rcx)
358
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
L
Linus Torvalds 已提交
359
	jnz tracesys
R
Roland McGrath 已提交
360
system_call_fastpath:
L
Linus Torvalds 已提交
361 362 363 364 365 366 367 368 369 370
	cmpq $__NR_syscall_max,%rax
	ja badsys
	movq %r10,%rcx
	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
	movq %rax,RAX-ARGOFFSET(%rsp)
/*
 * Syscall return path ending with SYSRET (fast path)
 * Has incomplete stack frame and undefined top of stack. 
 */		
ret_from_sys_call:
371
	movl $_TIF_ALLWORK_MASK,%edi
L
Linus Torvalds 已提交
372 373
	/* edi:	flagmask */
sysret_check:		
374
	LOCKDEP_SYS_EXIT
L
Linus Torvalds 已提交
375
	GET_THREAD_INFO(%rcx)
376
	DISABLE_INTERRUPTS(CLBR_NONE)
377
	TRACE_IRQS_OFF
G
Glauber Costa 已提交
378
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
379 380
	andl %edi,%edx
	jnz  sysret_careful 
381
	CFI_REMEMBER_STATE
382 383 384 385
	/*
	 * sysretq will re-enable interrupts:
	 */
	TRACE_IRQS_ON
L
Linus Torvalds 已提交
386
	movq RIP-ARGOFFSET(%rsp),%rcx
387
	CFI_REGISTER	rip,rcx
L
Linus Torvalds 已提交
388
	RESTORE_ARGS 0,-ARG_SKIP,1
389
	/*CFI_REGISTER	rflags,r11*/
390
	movq	%gs:pda_oldrsp, %rsp
391
	USERGS_SYSRET64
L
Linus Torvalds 已提交
392

393
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
394 395 396 397 398
	/* Handle reschedules */
	/* edx:	work, edi: workmask */	
sysret_careful:
	bt $TIF_NEED_RESCHED,%edx
	jnc sysret_signal
399
	TRACE_IRQS_ON
400
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
401
	pushq %rdi
402
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
403 404
	call schedule
	popq  %rdi
405
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
406 407 408 409
	jmp sysret_check

	/* Handle a signal */ 
sysret_signal:
410
	TRACE_IRQS_ON
411
	ENABLE_INTERRUPTS(CLBR_NONE)
R
Roland McGrath 已提交
412 413 414 415
#ifdef CONFIG_AUDITSYSCALL
	bt $TIF_SYSCALL_AUDIT,%edx
	jc sysret_audit
#endif
416
	/* edx:	work flags (arg3) */
L
Linus Torvalds 已提交
417 418 419 420
	leaq do_notify_resume(%rip),%rax
	leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
	xorl %esi,%esi # oldset -> arg2
	call ptregscall_common
421
	movl $_TIF_WORK_MASK,%edi
422 423
	/* Use IRET because user could have changed frame. This
	   works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
424
	DISABLE_INTERRUPTS(CLBR_NONE)
425
	TRACE_IRQS_OFF
426
	jmp int_with_check
L
Linus Torvalds 已提交
427
	
428 429 430 431
badsys:
	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
	jmp ret_from_sys_call

R
Roland McGrath 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
#ifdef CONFIG_AUDITSYSCALL
	/*
	 * Fast path for syscall audit without full syscall trace.
	 * We just call audit_syscall_entry() directly, and then
	 * jump back to the normal fast path.
	 */
auditsys:
	movq %r10,%r9			/* 6th arg: 4th syscall arg */
	movq %rdx,%r8			/* 5th arg: 3rd syscall arg */
	movq %rsi,%rcx			/* 4th arg: 2nd syscall arg */
	movq %rdi,%rdx			/* 3rd arg: 1st syscall arg */
	movq %rax,%rsi			/* 2nd arg: syscall number */
	movl $AUDIT_ARCH_X86_64,%edi	/* 1st arg: audit arch */
	call audit_syscall_entry
	LOAD_ARGS 0		/* reload call-clobbered registers */
	jmp system_call_fastpath

	/*
	 * Return fast path for syscall audit.  Call audit_syscall_exit()
	 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
	 * masked off.
	 */
sysret_audit:
	movq %rax,%rsi		/* second arg, syscall return value */
	cmpq $0,%rax		/* is it < 0? */
	setl %al		/* 1 if so, 0 if not */
	movzbl %al,%edi		/* zero-extend that into %edi */
	inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
	call audit_syscall_exit
	movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
	jmp sysret_check
#endif	/* CONFIG_AUDITSYSCALL */

L
Linus Torvalds 已提交
465 466
	/* Do syscall tracing */
tracesys:			 
R
Roland McGrath 已提交
467 468 469 470
#ifdef CONFIG_AUDITSYSCALL
	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
	jz auditsys
#endif
L
Linus Torvalds 已提交
471
	SAVE_REST
R
Roland McGrath 已提交
472
	movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
L
Linus Torvalds 已提交
473 474 475
	FIXUP_TOP_OF_STACK %rdi
	movq %rsp,%rdi
	call syscall_trace_enter
476 477 478 479 480 481
	/*
	 * Reload arg registers from stack in case ptrace changed them.
	 * We don't reload %rax because syscall_trace_enter() returned
	 * the value it wants us to use in the table lookup.
	 */
	LOAD_ARGS ARGOFFSET, 1
L
Linus Torvalds 已提交
482 483
	RESTORE_REST
	cmpq $__NR_syscall_max,%rax
R
Roland McGrath 已提交
484
	ja   int_ret_from_sys_call	/* RAX(%rsp) set to -ENOSYS above */
L
Linus Torvalds 已提交
485 486
	movq %r10,%rcx	/* fixup for C */
	call *sys_call_table(,%rax,8)
R
Roland McGrath 已提交
487
	movq %rax,RAX-ARGOFFSET(%rsp)
488
	/* Use IRET because user could have changed frame */
L
Linus Torvalds 已提交
489 490 491 492
		
/* 
 * Syscall return path ending with IRET.
 * Has correct top of stack, but partial stack frame.
493 494
 */
	.globl int_ret_from_sys_call
495
	.globl int_with_check
496
int_ret_from_sys_call:
497
	DISABLE_INTERRUPTS(CLBR_NONE)
498
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
499 500 501 502 503
	testl $3,CS-ARGOFFSET(%rsp)
	je retint_restore_args
	movl $_TIF_ALLWORK_MASK,%edi
	/* edi:	mask to check */
int_with_check:
504
	LOCKDEP_SYS_EXIT_IRQ
L
Linus Torvalds 已提交
505
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
506
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
507 508
	andl %edi,%edx
	jnz   int_careful
G
Glauber Costa 已提交
509
	andl    $~TS_COMPAT,TI_status(%rcx)
L
Linus Torvalds 已提交
510 511 512 513 514 515 516 517
	jmp   retint_swapgs

	/* Either reschedule or signal or syscall exit tracking needed. */
	/* First do a reschedule test. */
	/* edx:	work, edi: workmask */
int_careful:
	bt $TIF_NEED_RESCHED,%edx
	jnc  int_very_careful
518
	TRACE_IRQS_ON
519
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
520
	pushq %rdi
521
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
522 523
	call schedule
	popq %rdi
524
	CFI_ADJUST_CFA_OFFSET -8
525
	DISABLE_INTERRUPTS(CLBR_NONE)
526
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
527 528 529 530
	jmp int_with_check

	/* handle signals and tracing -- both require a full stack frame */
int_very_careful:
531
	TRACE_IRQS_ON
532
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
533 534
	SAVE_REST
	/* Check for syscall exit trace */	
535
	testl $_TIF_WORK_SYSCALL_EXIT,%edx
L
Linus Torvalds 已提交
536 537
	jz int_signal
	pushq %rdi
538
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
539 540 541
	leaq 8(%rsp),%rdi	# &ptregs -> arg1	
	call syscall_trace_leave
	popq %rdi
542
	CFI_ADJUST_CFA_OFFSET -8
543
	andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
L
Linus Torvalds 已提交
544 545 546
	jmp int_restore_rest
	
int_signal:
P
Peter Zijlstra 已提交
547
	testl $_TIF_DO_NOTIFY_MASK,%edx
L
Linus Torvalds 已提交
548 549 550 551
	jz 1f
	movq %rsp,%rdi		# &ptregs -> arg1
	xorl %esi,%esi		# oldset -> arg2
	call do_notify_resume
R
Roland McGrath 已提交
552
1:	movl $_TIF_WORK_MASK,%edi
L
Linus Torvalds 已提交
553 554
int_restore_rest:
	RESTORE_REST
555
	DISABLE_INTERRUPTS(CLBR_NONE)
556
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
557 558
	jmp int_with_check
	CFI_ENDPROC
559
END(system_call)
L
Linus Torvalds 已提交
560 561 562 563 564 565 566 567 568 569 570
		
/* 
 * Certain special system calls that need to save a complete full stack frame.
 */ 								
	
	.macro PTREGSCALL label,func,arg
	.globl \label
\label:
	leaq	\func(%rip),%rax
	leaq    -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
	jmp	ptregscall_common
571
END(\label)
L
Linus Torvalds 已提交
572 573
	.endm

574 575
	CFI_STARTPROC

L
Linus Torvalds 已提交
576 577 578 579 580 581 582 583
	PTREGSCALL stub_clone, sys_clone, %r8
	PTREGSCALL stub_fork, sys_fork, %rdi
	PTREGSCALL stub_vfork, sys_vfork, %rdi
	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
	PTREGSCALL stub_iopl, sys_iopl, %rsi

ENTRY(ptregscall_common)
	popq %r11
584 585
	CFI_ADJUST_CFA_OFFSET -8
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
586 587
	SAVE_REST
	movq %r11, %r15
588
	CFI_REGISTER rip, r15
L
Linus Torvalds 已提交
589 590 591 592
	FIXUP_TOP_OF_STACK %r11
	call *%rax
	RESTORE_TOP_OF_STACK %r11
	movq %r15, %r11
593
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
594 595
	RESTORE_REST
	pushq %r11
596 597
	CFI_ADJUST_CFA_OFFSET 8
	CFI_REL_OFFSET rip, 0
L
Linus Torvalds 已提交
598 599
	ret
	CFI_ENDPROC
600
END(ptregscall_common)
L
Linus Torvalds 已提交
601 602 603 604
	
ENTRY(stub_execve)
	CFI_STARTPROC
	popq %r11
605 606
	CFI_ADJUST_CFA_OFFSET -8
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
607 608
	SAVE_REST
	FIXUP_TOP_OF_STACK %r11
609
	movq %rsp, %rcx
L
Linus Torvalds 已提交
610 611 612 613 614 615
	call sys_execve
	RESTORE_TOP_OF_STACK %r11
	movq %rax,RAX(%rsp)
	RESTORE_REST
	jmp int_ret_from_sys_call
	CFI_ENDPROC
616
END(stub_execve)
L
Linus Torvalds 已提交
617 618 619 620 621 622 623
	
/*
 * sigreturn is special because it needs to restore all registers on return.
 * This cannot be done with SYSRET, so use the IRET return path instead.
 */                
ENTRY(stub_rt_sigreturn)
	CFI_STARTPROC
624 625
	addq $8, %rsp
	CFI_ADJUST_CFA_OFFSET	-8
L
Linus Torvalds 已提交
626 627 628 629 630 631 632 633
	SAVE_REST
	movq %rsp,%rdi
	FIXUP_TOP_OF_STACK %r11
	call sys_rt_sigreturn
	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
	RESTORE_REST
	jmp int_ret_from_sys_call
	CFI_ENDPROC
634
END(stub_rt_sigreturn)
L
Linus Torvalds 已提交
635

636 637 638 639 640
/*
 * initial frame state for interrupts and exceptions
 */
	.macro _frame ref
	CFI_STARTPROC simple
641
	CFI_SIGNAL_FRAME
642 643 644 645 646 647 648 649 650 651 652 653 654 655
	CFI_DEF_CFA rsp,SS+8-\ref
	/*CFI_REL_OFFSET ss,SS-\ref*/
	CFI_REL_OFFSET rsp,RSP-\ref
	/*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
	/*CFI_REL_OFFSET cs,CS-\ref*/
	CFI_REL_OFFSET rip,RIP-\ref
	.endm

/* initial frame state for interrupts (and exceptions without error code) */
#define INTR_FRAME _frame RIP
/* initial frame state for exceptions with error code (and interrupts with
   vector already pushed) */
#define XCPT_FRAME _frame ORIG_RAX

L
Linus Torvalds 已提交
656 657 658 659 660 661 662 663 664 665 666 667 668
/* 
 * Interrupt entry/exit.
 *
 * Interrupt entry points save only callee clobbered registers in fast path.
 *	
 * Entry runs with interrupts off.	
 */ 

/* 0(%rsp): interrupt number */ 
	.macro interrupt func
	cld
	SAVE_ARGS
	leaq -ARGOFFSET(%rsp),%rdi	# arg1 for handler
669 670 671 672 673
	pushq %rbp
	CFI_ADJUST_CFA_OFFSET	8
	CFI_REL_OFFSET		rbp, 0
	movq %rsp,%rbp
	CFI_DEF_CFA_REGISTER	rbp
L
Linus Torvalds 已提交
674 675
	testl $3,CS(%rdi)
	je 1f
676
	SWAPGS
677 678 679 680 681 682
	/* irqcount is used to check if a CPU is already on an interrupt
	   stack or not. While this is essentially redundant with preempt_count
	   it is a little cheaper to use a separate counter in the PDA
	   (short of moving irq_enter into assembly, which would be too
	    much work) */
1:	incl	%gs:pda_irqcount
683
	cmoveq %gs:pda_irqstackptr,%rsp
684
	push    %rbp			# backlink for old unwinder
685 686 687 688
	/*
	 * We entered an interrupt context - irqs are off:
	 */
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
689 690 691 692
	call \func
	.endm

ENTRY(common_interrupt)
693
	XCPT_FRAME
L
Linus Torvalds 已提交
694 695
	interrupt do_IRQ
	/* 0(%rsp): oldrsp-ARGOFFSET */
696
ret_from_intr:
697
	DISABLE_INTERRUPTS(CLBR_NONE)
698
	TRACE_IRQS_OFF
699
	decl %gs:pda_irqcount
700
	leaveq
701
	CFI_DEF_CFA_REGISTER	rsp
702
	CFI_ADJUST_CFA_OFFSET	-8
703
exit_intr:
L
Linus Torvalds 已提交
704 705 706 707 708 709 710 711 712 713 714
	GET_THREAD_INFO(%rcx)
	testl $3,CS-ARGOFFSET(%rsp)
	je retint_kernel
	
	/* Interrupt came from user space */
	/*
	 * Has a correct top of stack, but a partial stack frame
	 * %rcx: thread info. Interrupts off.
	 */		
retint_with_reschedule:
	movl $_TIF_WORK_MASK,%edi
715
retint_check:
716
	LOCKDEP_SYS_EXIT_IRQ
G
Glauber Costa 已提交
717
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
718
	andl %edi,%edx
719
	CFI_REMEMBER_STATE
L
Linus Torvalds 已提交
720
	jnz  retint_careful
721 722

retint_swapgs:		/* return to user-space */
723 724 725
	/*
	 * The iretq could re-enable interrupts:
	 */
726
	DISABLE_INTERRUPTS(CLBR_ANY)
727
	TRACE_IRQS_IRETQ
728
	SWAPGS
729 730
	jmp restore_args

731
retint_restore_args:	/* return to kernel space */
732
	DISABLE_INTERRUPTS(CLBR_ANY)
733 734 735 736 737
	/*
	 * The iretq could re-enable interrupts:
	 */
	TRACE_IRQS_IRETQ
restore_args:
I
Ingo Molnar 已提交
738 739
	RESTORE_ARGS 0,8,0

A
Adrian Bunk 已提交
740
irq_return:
741
	INTERRUPT_RETURN
I
Ingo Molnar 已提交
742 743 744 745 746 747

	.section __ex_table, "a"
	.quad irq_return, bad_iret
	.previous

#ifdef CONFIG_PARAVIRT
748
ENTRY(native_iret)
L
Linus Torvalds 已提交
749 750 751
	iretq

	.section __ex_table,"a"
752
	.quad native_iret, bad_iret
L
Linus Torvalds 已提交
753
	.previous
I
Ingo Molnar 已提交
754 755
#endif

L
Linus Torvalds 已提交
756 757
	.section .fixup,"ax"
bad_iret:
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
	/*
	 * The iret traps when the %cs or %ss being restored is bogus.
	 * We've lost the original trap vector and error code.
	 * #GPF is the most likely one to get for an invalid selector.
	 * So pretend we completed the iret and took the #GPF in user mode.
	 *
	 * We are now running with the kernel GS after exception recovery.
	 * But error_entry expects us to have user GS to match the user %cs,
	 * so swap back.
	 */
	pushq $0

	SWAPGS
	jmp general_protection

773 774
	.previous

775
	/* edi: workmask, edx: work */
L
Linus Torvalds 已提交
776
retint_careful:
777
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
778 779
	bt    $TIF_NEED_RESCHED,%edx
	jnc   retint_signal
780
	TRACE_IRQS_ON
781
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
782
	pushq %rdi
783
	CFI_ADJUST_CFA_OFFSET	8
L
Linus Torvalds 已提交
784 785
	call  schedule
	popq %rdi		
786
	CFI_ADJUST_CFA_OFFSET	-8
L
Linus Torvalds 已提交
787
	GET_THREAD_INFO(%rcx)
788
	DISABLE_INTERRUPTS(CLBR_NONE)
789
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
790 791 792
	jmp retint_check
	
retint_signal:
P
Peter Zijlstra 已提交
793
	testl $_TIF_DO_NOTIFY_MASK,%edx
794
	jz    retint_swapgs
795
	TRACE_IRQS_ON
796
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
797 798
	SAVE_REST
	movq $-1,ORIG_RAX(%rsp) 			
799
	xorl %esi,%esi		# oldset
L
Linus Torvalds 已提交
800 801 802
	movq %rsp,%rdi		# &pt_regs
	call do_notify_resume
	RESTORE_REST
803
	DISABLE_INTERRUPTS(CLBR_NONE)
804
	TRACE_IRQS_OFF
805
	GET_THREAD_INFO(%rcx)
R
Roland McGrath 已提交
806
	jmp retint_with_reschedule
L
Linus Torvalds 已提交
807 808 809 810

#ifdef CONFIG_PREEMPT
	/* Returning to kernel space. Check if we need preemption */
	/* rcx:	 threadinfo. interrupts off. */
811
ENTRY(retint_kernel)
G
Glauber Costa 已提交
812
	cmpl $0,TI_preempt_count(%rcx)
L
Linus Torvalds 已提交
813
	jnz  retint_restore_args
G
Glauber Costa 已提交
814
	bt  $TIF_NEED_RESCHED,TI_flags(%rcx)
L
Linus Torvalds 已提交
815 816 817 818 819 820
	jnc  retint_restore_args
	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
	jnc  retint_restore_args
	call preempt_schedule_irq
	jmp exit_intr
#endif	
821

L
Linus Torvalds 已提交
822
	CFI_ENDPROC
823
END(common_interrupt)
L
Linus Torvalds 已提交
824 825 826 827 828
	
/*
 * APIC interrupts.
 */		
	.macro apicinterrupt num,func
829
	INTR_FRAME
830
	pushq $~(\num)
831
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
832 833 834 835 836 837 838
	interrupt \func
	jmp ret_from_intr
	CFI_ENDPROC
	.endm

ENTRY(thermal_interrupt)
	apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
839
END(thermal_interrupt)
L
Linus Torvalds 已提交
840

841 842
ENTRY(threshold_interrupt)
	apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
843
END(threshold_interrupt)
844

L
Linus Torvalds 已提交
845 846 847
#ifdef CONFIG_SMP	
ENTRY(reschedule_interrupt)
	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
848
END(reschedule_interrupt)
L
Linus Torvalds 已提交
849

850 851 852
	.macro INVALIDATE_ENTRY num
ENTRY(invalidate_interrupt\num)
	apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt	
853
END(invalidate_interrupt\num)
854 855 856 857 858 859 860 861 862 863
	.endm

	INVALIDATE_ENTRY 0
	INVALIDATE_ENTRY 1
	INVALIDATE_ENTRY 2
	INVALIDATE_ENTRY 3
	INVALIDATE_ENTRY 4
	INVALIDATE_ENTRY 5
	INVALIDATE_ENTRY 6
	INVALIDATE_ENTRY 7
L
Linus Torvalds 已提交
864 865 866

ENTRY(call_function_interrupt)
	apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
867
END(call_function_interrupt)
868 869 870
ENTRY(call_function_single_interrupt)
	apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
END(call_function_single_interrupt)
871 872 873
ENTRY(irq_move_cleanup_interrupt)
	apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
END(irq_move_cleanup_interrupt)
L
Linus Torvalds 已提交
874 875 876 877
#endif

ENTRY(apic_timer_interrupt)
	apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
878
END(apic_timer_interrupt)
L
Linus Torvalds 已提交
879

880 881 882 883
ENTRY(uv_bau_message_intr1)
	apicinterrupt 220,uv_bau_message_interrupt
END(uv_bau_message_intr1)

L
Linus Torvalds 已提交
884 885
ENTRY(error_interrupt)
	apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
886
END(error_interrupt)
L
Linus Torvalds 已提交
887 888 889

ENTRY(spurious_interrupt)
	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
890
END(spurious_interrupt)
L
Linus Torvalds 已提交
891 892 893 894 895
				
/*
 * Exception entry points.
 */ 		
	.macro zeroentry sym
896
	INTR_FRAME
897
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
898
	pushq $0	/* push error code/oldrax */ 
899
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
900
	pushq %rax	/* push real oldrax to the rdi slot */ 
901
	CFI_ADJUST_CFA_OFFSET 8
902
	CFI_REL_OFFSET rax,0
L
Linus Torvalds 已提交
903 904
	leaq  \sym(%rip),%rax
	jmp error_entry
905
	CFI_ENDPROC
L
Linus Torvalds 已提交
906 907 908
	.endm	

	.macro errorentry sym
909
	XCPT_FRAME
910
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
911
	pushq %rax
912
	CFI_ADJUST_CFA_OFFSET 8
913
	CFI_REL_OFFSET rax,0
L
Linus Torvalds 已提交
914 915
	leaq  \sym(%rip),%rax
	jmp error_entry
916
	CFI_ENDPROC
L
Linus Torvalds 已提交
917 918 919 920
	.endm

	/* error code is on the stack already */
	/* handle NMI like exceptions that can happen everywhere */
921
	.macro paranoidentry sym, ist=0, irqtrace=1
L
Linus Torvalds 已提交
922 923 924 925 926 927 928
	SAVE_ALL
	cld
	movl $1,%ebx
	movl  $MSR_GS_BASE,%ecx
	rdmsr
	testl %edx,%edx
	js    1f
929
	SWAPGS
L
Linus Torvalds 已提交
930
	xorl  %ebx,%ebx
931 932 933 934 935
1:
	.if \ist
	movq	%gs:pda_data_offset, %rbp
	.endif
	movq %rsp,%rdi
L
Linus Torvalds 已提交
936 937
	movq ORIG_RAX(%rsp),%rsi
	movq $-1,ORIG_RAX(%rsp)
938
	.if \ist
939
	subq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
940
	.endif
L
Linus Torvalds 已提交
941
	call \sym
942
	.if \ist
943
	addq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
944
	.endif
945
	DISABLE_INTERRUPTS(CLBR_NONE)
946 947 948
	.if \irqtrace
	TRACE_IRQS_OFF
	.endif
L
Linus Torvalds 已提交
949
	.endm
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970

	/*
 	 * "Paranoid" exit path from exception stack.
  	 * Paranoid because this is used by NMIs and cannot take
	 * any kernel state for granted.
	 * We don't do kernel preemption checks here, because only
	 * NMI should be common and it does not enable IRQs and
	 * cannot get reschedule ticks.
	 *
	 * "trace" is 0 for the NMI handler only, because irq-tracing
	 * is fundamentally NMI-unsafe. (we cannot change the soft and
	 * hard flags at once, atomically)
	 */
	.macro paranoidexit trace=1
	/* ebx:	no swapgs flag */
paranoid_exit\trace:
	testl %ebx,%ebx				/* swapgs needed? */
	jnz paranoid_restore\trace
	testl $3,CS(%rsp)
	jnz   paranoid_userspace\trace
paranoid_swapgs\trace:
971
	.if \trace
972
	TRACE_IRQS_IRETQ 0
973
	.endif
974
	SWAPGS_UNSAFE_STACK
975 976
paranoid_restore\trace:
	RESTORE_ALL 8
I
Ingo Molnar 已提交
977
	jmp irq_return
978 979
paranoid_userspace\trace:
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
980
	movl TI_flags(%rcx),%ebx
981 982 983 984 985 986 987 988 989 990 991
	andl $_TIF_WORK_MASK,%ebx
	jz paranoid_swapgs\trace
	movq %rsp,%rdi			/* &pt_regs */
	call sync_regs
	movq %rax,%rsp			/* switch stack for scheduling */
	testl $_TIF_NEED_RESCHED,%ebx
	jnz paranoid_schedule\trace
	movl %ebx,%edx			/* arg3: thread flags */
	.if \trace
	TRACE_IRQS_ON
	.endif
992
	ENABLE_INTERRUPTS(CLBR_NONE)
993 994 995
	xorl %esi,%esi 			/* arg2: oldset */
	movq %rsp,%rdi 			/* arg1: &pt_regs */
	call do_notify_resume
996
	DISABLE_INTERRUPTS(CLBR_NONE)
997 998 999 1000 1001 1002 1003 1004
	.if \trace
	TRACE_IRQS_OFF
	.endif
	jmp paranoid_userspace\trace
paranoid_schedule\trace:
	.if \trace
	TRACE_IRQS_ON
	.endif
1005
	ENABLE_INTERRUPTS(CLBR_ANY)
1006
	call schedule
1007
	DISABLE_INTERRUPTS(CLBR_ANY)
1008 1009 1010 1011 1012 1013 1014
	.if \trace
	TRACE_IRQS_OFF
	.endif
	jmp paranoid_userspace\trace
	CFI_ENDPROC
	.endm

L
Linus Torvalds 已提交
1015 1016 1017 1018
/*
 * Exception entry point. This expects an error code/orig_rax on the stack
 * and the exception handler in %rax.	
 */ 		  				
1019
KPROBE_ENTRY(error_entry)
1020
	_frame RDI
1021
	CFI_REL_OFFSET rax,0
L
Linus Torvalds 已提交
1022 1023 1024 1025 1026 1027 1028
	/* rdi slot contains rax, oldrax contains error code */
	cld	
	subq  $14*8,%rsp
	CFI_ADJUST_CFA_OFFSET	(14*8)
	movq %rsi,13*8(%rsp)
	CFI_REL_OFFSET	rsi,RSI
	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */
1029
	CFI_REGISTER	rax,rsi
L
Linus Torvalds 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
	movq %rdx,12*8(%rsp)
	CFI_REL_OFFSET	rdx,RDX
	movq %rcx,11*8(%rsp)
	CFI_REL_OFFSET	rcx,RCX
	movq %rsi,10*8(%rsp)	/* store rax */ 
	CFI_REL_OFFSET	rax,RAX
	movq %r8, 9*8(%rsp)
	CFI_REL_OFFSET	r8,R8
	movq %r9, 8*8(%rsp)
	CFI_REL_OFFSET	r9,R9
	movq %r10,7*8(%rsp)
	CFI_REL_OFFSET	r10,R10
	movq %r11,6*8(%rsp)
	CFI_REL_OFFSET	r11,R11
	movq %rbx,5*8(%rsp) 
	CFI_REL_OFFSET	rbx,RBX
	movq %rbp,4*8(%rsp) 
	CFI_REL_OFFSET	rbp,RBP
	movq %r12,3*8(%rsp) 
	CFI_REL_OFFSET	r12,R12
	movq %r13,2*8(%rsp) 
	CFI_REL_OFFSET	r13,R13
	movq %r14,1*8(%rsp) 
	CFI_REL_OFFSET	r14,R14
	movq %r15,(%rsp) 
	CFI_REL_OFFSET	r15,R15
	xorl %ebx,%ebx	
	testl $3,CS(%rsp)
	je  error_kernelspace
error_swapgs:	
1060
	SWAPGS
L
Linus Torvalds 已提交
1061 1062
error_sti:	
	movq %rdi,RDI(%rsp) 	
1063
	CFI_REL_OFFSET	rdi,RDI
L
Linus Torvalds 已提交
1064 1065 1066 1067
	movq %rsp,%rdi
	movq ORIG_RAX(%rsp),%rsi	/* get error code */ 
	movq $-1,ORIG_RAX(%rsp)
	call *%rax
1068 1069 1070
	/* ebx:	no swapgs flag (1: don't need swapgs, 0: need it) */
error_exit:
	movl %ebx,%eax
L
Linus Torvalds 已提交
1071
	RESTORE_REST
1072
	DISABLE_INTERRUPTS(CLBR_NONE)
1073
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
1074 1075 1076
	GET_THREAD_INFO(%rcx)	
	testl %eax,%eax
	jne  retint_kernel
1077
	LOCKDEP_SYS_EXIT_IRQ
G
Glauber Costa 已提交
1078
	movl  TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
1079 1080 1081
	movl  $_TIF_WORK_MASK,%edi
	andl  %edi,%edx
	jnz  retint_careful
1082
	jmp retint_swapgs
L
Linus Torvalds 已提交
1083 1084 1085 1086 1087 1088 1089 1090 1091
	CFI_ENDPROC

error_kernelspace:
	incl %ebx
       /* There are two places in the kernel that can potentially fault with
          usergs. Handle them here. The exception handlers after
	   iret run with kernel gs again, so don't set the user space flag.
	   B stepping K8s sometimes report an truncated RIP for IRET 
	   exceptions returning to compat mode. Check for these here too. */
1092 1093
	leaq irq_return(%rip),%rcx
	cmpq %rcx,RIP(%rsp)
L
Linus Torvalds 已提交
1094
	je   error_swapgs
1095 1096
	movl %ecx,%ecx	/* zero extend */
	cmpq %rcx,RIP(%rsp)
L
Linus Torvalds 已提交
1097 1098 1099 1100
	je   error_swapgs
	cmpq $gs_change,RIP(%rsp)
        je   error_swapgs
	jmp  error_sti
1101
KPROBE_END(error_entry)
L
Linus Torvalds 已提交
1102 1103 1104
	
       /* Reload gs selector with exception handling */
       /* edi:  new selector */ 
1105
ENTRY(native_load_gs_index)
1106
	CFI_STARTPROC
L
Linus Torvalds 已提交
1107
	pushf
1108
	CFI_ADJUST_CFA_OFFSET 8
1109 1110
	DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
        SWAPGS
L
Linus Torvalds 已提交
1111 1112 1113
gs_change:     
        movl %edi,%gs   
2:	mfence		/* workaround */
1114
	SWAPGS
L
Linus Torvalds 已提交
1115
        popf
1116
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
1117
        ret
1118
	CFI_ENDPROC
1119
ENDPROC(native_load_gs_index)
L
Linus Torvalds 已提交
1120 1121 1122 1123 1124 1125 1126 1127
       
        .section __ex_table,"a"
        .align 8
        .quad gs_change,bad_gs
        .previous
        .section .fixup,"ax"
	/* running with kernelgs */
bad_gs: 
1128
	SWAPGS			/* switch back to user gs */
L
Linus Torvalds 已提交
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	xorl %eax,%eax
        movl %eax,%gs
        jmp  2b
        .previous       
	
/*
 * Create a kernel thread.
 *
 * C extern interface:
 *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 *
 * asm input arguments:
 *	rdi: fn, rsi: arg, rdx: flags
 */
ENTRY(kernel_thread)
	CFI_STARTPROC
	FAKE_STACK_FRAME $child_rip
	SAVE_ALL

	# rdi: flags, rsi: usp, rdx: will be &pt_regs
	movq %rdx,%rdi
	orq  kernel_thread_flags(%rip),%rdi
	movq $-1, %rsi
	movq %rsp, %rdx

	xorl %r8d,%r8d
	xorl %r9d,%r9d
	
	# clone now
	call do_fork
	movq %rax,RAX(%rsp)
	xorl %edi,%edi

	/*
	 * It isn't worth to check for reschedule here,
	 * so internally to the x86_64 port you can rely on kernel_thread()
	 * not to reschedule the child before returning, this avoids the need
	 * of hacks for example to fork off the per-CPU idle tasks.
         * [Hopefully no generic code relies on the reschedule -AK]	
	 */
	RESTORE_ALL
	UNFAKE_STACK_FRAME
	ret
	CFI_ENDPROC
1173
ENDPROC(kernel_thread)
L
Linus Torvalds 已提交
1174 1175
	
child_rip:
1176 1177
	pushq $0		# fake return address
	CFI_STARTPROC
L
Linus Torvalds 已提交
1178 1179 1180 1181 1182 1183 1184 1185
	/*
	 * Here we are in the child and the registers are set as they were
	 * at kernel_thread() invocation in the parent.
	 */
	movq %rdi, %rax
	movq %rsi, %rdi
	call *%rax
	# exit
1186
	mov %eax, %edi
L
Linus Torvalds 已提交
1187
	call do_exit
1188
	CFI_ENDPROC
1189
ENDPROC(child_rip)
L
Linus Torvalds 已提交
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200

/*
 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
 *
 * C extern interface:
 *	 extern long execve(char *name, char **argv, char **envp)
 *
 * asm input arguments:
 *	rdi: name, rsi: argv, rdx: envp
 *
 * We want to fallback into:
1201
 *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
L
Linus Torvalds 已提交
1202 1203
 *
 * do_sys_execve asm fallback arguments:
1204
 *	rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
L
Linus Torvalds 已提交
1205
 */
1206
ENTRY(kernel_execve)
L
Linus Torvalds 已提交
1207 1208 1209
	CFI_STARTPROC
	FAKE_STACK_FRAME $0
	SAVE_ALL	
1210
	movq %rsp,%rcx
L
Linus Torvalds 已提交
1211 1212 1213 1214 1215 1216 1217 1218 1219
	call sys_execve
	movq %rax, RAX(%rsp)	
	RESTORE_REST
	testq %rax,%rax
	je int_ret_from_sys_call
	RESTORE_ARGS
	UNFAKE_STACK_FRAME
	ret
	CFI_ENDPROC
1220
ENDPROC(kernel_execve)
L
Linus Torvalds 已提交
1221

1222
KPROBE_ENTRY(page_fault)
L
Linus Torvalds 已提交
1223
	errorentry do_page_fault
1224
KPROBE_END(page_fault)
L
Linus Torvalds 已提交
1225 1226 1227

ENTRY(coprocessor_error)
	zeroentry do_coprocessor_error
1228
END(coprocessor_error)
L
Linus Torvalds 已提交
1229 1230 1231

ENTRY(simd_coprocessor_error)
	zeroentry do_simd_coprocessor_error	
1232
END(simd_coprocessor_error)
L
Linus Torvalds 已提交
1233 1234 1235

ENTRY(device_not_available)
	zeroentry math_state_restore
1236
END(device_not_available)
L
Linus Torvalds 已提交
1237 1238

	/* runs on exception stack */
1239
KPROBE_ENTRY(debug)
1240
 	INTR_FRAME
1241
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1242 1243
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8		
1244
	paranoidentry do_debug, DEBUG_STACK
1245
	paranoidexit
1246
KPROBE_END(debug)
L
Linus Torvalds 已提交
1247 1248

	/* runs on exception stack */	
1249
KPROBE_ENTRY(nmi)
1250
	INTR_FRAME
1251
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1252
	pushq $-1
1253
	CFI_ADJUST_CFA_OFFSET 8
1254 1255 1256 1257 1258 1259 1260
	paranoidentry do_nmi, 0, 0
#ifdef CONFIG_TRACE_IRQFLAGS
	paranoidexit 0
#else
	jmp paranoid_exit1
 	CFI_ENDPROC
#endif
1261
KPROBE_END(nmi)
1262

1263
KPROBE_ENTRY(int3)
1264
 	INTR_FRAME
1265
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1266 1267
 	pushq $0
 	CFI_ADJUST_CFA_OFFSET 8
1268
 	paranoidentry do_int3, DEBUG_STACK
1269
 	jmp paranoid_exit1
1270
 	CFI_ENDPROC
1271
KPROBE_END(int3)
L
Linus Torvalds 已提交
1272 1273 1274

ENTRY(overflow)
	zeroentry do_overflow
1275
END(overflow)
L
Linus Torvalds 已提交
1276 1277 1278

ENTRY(bounds)
	zeroentry do_bounds
1279
END(bounds)
L
Linus Torvalds 已提交
1280 1281 1282

ENTRY(invalid_op)
	zeroentry do_invalid_op	
1283
END(invalid_op)
L
Linus Torvalds 已提交
1284 1285 1286

ENTRY(coprocessor_segment_overrun)
	zeroentry do_coprocessor_segment_overrun
1287
END(coprocessor_segment_overrun)
L
Linus Torvalds 已提交
1288 1289 1290

	/* runs on exception stack */
ENTRY(double_fault)
1291
	XCPT_FRAME
1292
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1293
	paranoidentry do_double_fault
1294
	jmp paranoid_exit1
L
Linus Torvalds 已提交
1295
	CFI_ENDPROC
1296
END(double_fault)
L
Linus Torvalds 已提交
1297 1298 1299

ENTRY(invalid_TSS)
	errorentry do_invalid_TSS
1300
END(invalid_TSS)
L
Linus Torvalds 已提交
1301 1302 1303

ENTRY(segment_not_present)
	errorentry do_segment_not_present
1304
END(segment_not_present)
L
Linus Torvalds 已提交
1305 1306 1307

	/* runs on exception stack */
ENTRY(stack_segment)
1308
	XCPT_FRAME
1309
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1310
	paranoidentry do_stack_segment
1311
	jmp paranoid_exit1
L
Linus Torvalds 已提交
1312
	CFI_ENDPROC
1313
END(stack_segment)
L
Linus Torvalds 已提交
1314

1315
KPROBE_ENTRY(general_protection)
L
Linus Torvalds 已提交
1316
	errorentry do_general_protection
1317
KPROBE_END(general_protection)
L
Linus Torvalds 已提交
1318 1319 1320

ENTRY(alignment_check)
	errorentry do_alignment_check
1321
END(alignment_check)
L
Linus Torvalds 已提交
1322 1323 1324

ENTRY(divide_error)
	zeroentry do_divide_error
1325
END(divide_error)
L
Linus Torvalds 已提交
1326 1327 1328

ENTRY(spurious_interrupt_bug)
	zeroentry do_spurious_interrupt_bug
1329
END(spurious_interrupt_bug)
L
Linus Torvalds 已提交
1330 1331 1332 1333

#ifdef CONFIG_X86_MCE
	/* runs on exception stack */
ENTRY(machine_check)
1334
	INTR_FRAME
1335
	PARAVIRT_ADJUST_EXCEPTION_FRAME
L
Linus Torvalds 已提交
1336 1337 1338
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8	
	paranoidentry do_machine_check
1339
	jmp paranoid_exit1
L
Linus Torvalds 已提交
1340
	CFI_ENDPROC
1341
END(machine_check)
L
Linus Torvalds 已提交
1342 1343
#endif

1344
/* Call softirq on interrupt stack. Interrupts are off. */
1345
ENTRY(call_softirq)
1346
	CFI_STARTPROC
1347 1348 1349 1350 1351
	push %rbp
	CFI_ADJUST_CFA_OFFSET	8
	CFI_REL_OFFSET rbp,0
	mov  %rsp,%rbp
	CFI_DEF_CFA_REGISTER rbp
1352
	incl %gs:pda_irqcount
1353 1354
	cmove %gs:pda_irqstackptr,%rsp
	push  %rbp			# backlink for old unwinder
1355
	call __do_softirq
1356
	leaveq
1357
	CFI_DEF_CFA_REGISTER	rsp
1358
	CFI_ADJUST_CFA_OFFSET   -8
1359 1360
	decl %gs:pda_irqcount
	ret
1361
	CFI_ENDPROC
1362
ENDPROC(call_softirq)
1363 1364 1365 1366 1367 1368 1369

KPROBE_ENTRY(ignore_sysret)
	CFI_STARTPROC
	mov $-ENOSYS,%eax
	sysret
	CFI_ENDPROC
ENDPROC(ignore_sysret)
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422

#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
	zeroentry xen_do_hypervisor_callback
END(xen_hypervisor_callback)

/*
# A note on the "critical region" in our callback handler.
# We want to avoid stacking callback handlers due to events occurring
# during handling of the last event. To do this, we keep events disabled
# until we've done all processing. HOWEVER, we must enable events before
# popping the stack frame (can't be done atomically) and so it would still
# be possible to get enough handler activations to overflow the stack.
# Although unlikely, bugs of that kind are hard to track down, so we'd
# like to avoid the possibility.
# So, on entry to the handler we detect whether we interrupted an
# existing activation in its critical region -- if so, we pop the current
# activation and restart the handler using the previous one.
*/
ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
	CFI_STARTPROC
/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
   see the correct pointer to the pt_regs */
	movq %rdi, %rsp            # we don't return, adjust the stack frame
	CFI_ENDPROC
	CFI_DEFAULT_STACK
11:	incl %gs:pda_irqcount
	movq %rsp,%rbp
	CFI_DEF_CFA_REGISTER rbp
	cmovzq %gs:pda_irqstackptr,%rsp
	pushq %rbp			# backlink for old unwinder
	call xen_evtchn_do_upcall
	popq %rsp
	CFI_DEF_CFA_REGISTER rsp
	decl %gs:pda_irqcount
	jmp  error_exit
	CFI_ENDPROC
END(do_hypervisor_callback)

/*
# Hypervisor uses this for application faults while it executes.
# We get here for two reasons:
#  1. Fault while reloading DS, ES, FS or GS
#  2. Fault while executing IRET
# Category 1 we do not need to fix up as Xen has already reloaded all segment
# registers that could be reloaded and zeroed the others.
# Category 2 we fix up by killing the current process. We cannot use the
# normal Linux return path in this case because if we use the IRET hypercall
# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
# We distinguish between categories by comparing each saved segment register
# with its current contents: any discrepancy means we in category 1.
*/
ENTRY(xen_failsafe_callback)
1423 1424
	framesz = (RIP-0x30)	/* workaround buggy gas */
	_frame framesz
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
	CFI_REL_OFFSET rcx, 0
	CFI_REL_OFFSET r11, 8
	movw %ds,%cx
	cmpw %cx,0x10(%rsp)
	CFI_REMEMBER_STATE
	jne 1f
	movw %es,%cx
	cmpw %cx,0x18(%rsp)
	jne 1f
	movw %fs,%cx
	cmpw %cx,0x20(%rsp)
	jne 1f
	movw %gs,%cx
	cmpw %cx,0x28(%rsp)
	jne 1f
	/* All segments match their saved values => Category 2 (Bad IRET). */
	movq (%rsp),%rcx
	CFI_RESTORE rcx
	movq 8(%rsp),%r11
	CFI_RESTORE r11
	addq $0x30,%rsp
	CFI_ADJUST_CFA_OFFSET -0x30
1447 1448 1449 1450 1451 1452 1453
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8
	pushq %r11
	CFI_ADJUST_CFA_OFFSET 8
	pushq %rcx
	CFI_ADJUST_CFA_OFFSET 8
	jmp general_protection
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
	CFI_RESTORE_STATE
1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
	movq (%rsp),%rcx
	CFI_RESTORE rcx
	movq 8(%rsp),%r11
	CFI_RESTORE r11
	addq $0x30,%rsp
	CFI_ADJUST_CFA_OFFSET -0x30
	pushq $0
	CFI_ADJUST_CFA_OFFSET 8
	SAVE_ALL
	jmp error_exit
	CFI_ENDPROC
END(xen_failsafe_callback)

#endif /* CONFIG_XEN */