entry_64.S 38.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  linux/arch/x86_64/entry.S
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
 */

/*
 * entry.S contains the system-call and fault low-level handling routines.
 *
 * NOTE: This code handles signal-recognition, which happens every time
 * after an interrupt and after each system call.
14 15
 *
 * Normal syscalls and interrupts don't save a full stack frame, this is
L
Linus Torvalds 已提交
16
 * only done for syscall tracing, signals or fork/exec et.al.
17 18 19 20
 *
 * A note on terminology:
 * - top of stack: Architecture defined interrupt frame from SS to RIP
 * at the top of the kernel process stack.
L
Linus Torvalds 已提交
21
 * - partial stack frame: partially saved registers upto R11.
22
 * - full stack frame: Like partial stack frame, but all register saved.
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 *
 * Some macro usage:
 * - CFI macros are used to generate dwarf2 unwind information for better
 * backtraces. They don't change any code.
 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
 * There are unfortunately lots of special cases where some registers
 * not touched. The macro is a big mess that should be cleaned up.
 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
 * Gives a full stack frame.
 * - ENTRY/END Define functions in the symbol table.
 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
 * frame that is otherwise undefined after a SYSCALL
 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45
 */

#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/dwarf2.h>
#include <asm/calling.h>
46
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
47 48 49 50
#include <asm/msr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/hw_irq.h>
51
#include <asm/page.h>
52
#include <asm/irqflags.h>
53
#include <asm/paravirt.h>
54
#include <asm/ftrace.h>
55
#include <asm/percpu.h>
L
Linus Torvalds 已提交
56

R
Roland McGrath 已提交
57 58 59 60 61 62
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
#include <linux/elf-em.h>
#define AUDIT_ARCH_X86_64	(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define __AUDIT_ARCH_64BIT 0x80000000
#define __AUDIT_ARCH_LE	   0x40000000

L
Linus Torvalds 已提交
63
	.code64
64
#ifdef CONFIG_FUNCTION_TRACER
65 66 67 68 69 70
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
	retq
END(mcount)

ENTRY(ftrace_caller)
71 72
	cmpl $0, function_trace_stop
	jne  ftrace_stub
73

74
	MCOUNT_SAVE_FRAME
75 76 77

	movq 0x38(%rsp), %rdi
	movq 8(%rbp), %rsi
78
	subq $MCOUNT_INSN_SIZE, %rdi
79 80 81 82 83

.globl ftrace_call
ftrace_call:
	call ftrace_stub

84
	MCOUNT_RESTORE_FRAME
85

86 87 88 89 90
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
	jmp ftrace_stub
#endif
91 92 93 94 95 96 97

.globl ftrace_stub
ftrace_stub:
	retq
END(ftrace_caller)

#else /* ! CONFIG_DYNAMIC_FTRACE */
98
ENTRY(mcount)
99 100 101
	cmpl $0, function_trace_stop
	jne  ftrace_stub

102 103
	cmpq $ftrace_stub, ftrace_trace_function
	jnz trace
104 105 106 107

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	cmpq $ftrace_stub, ftrace_graph_return
	jnz ftrace_graph_caller
108 109 110

	cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
	jnz ftrace_graph_caller
111 112
#endif

113 114 115 116 117
.globl ftrace_stub
ftrace_stub:
	retq

trace:
118
	MCOUNT_SAVE_FRAME
119 120 121

	movq 0x38(%rsp), %rdi
	movq 8(%rbp), %rsi
122
	subq $MCOUNT_INSN_SIZE, %rdi
123 124 125

	call   *ftrace_trace_function

126
	MCOUNT_RESTORE_FRAME
127 128 129

	jmp ftrace_stub
END(mcount)
130
#endif /* CONFIG_DYNAMIC_FTRACE */
131
#endif /* CONFIG_FUNCTION_TRACER */
132

133 134 135 136 137
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
	cmpl $0, function_trace_stop
	jne ftrace_stub

138
	MCOUNT_SAVE_FRAME
139 140 141

	leaq 8(%rbp), %rdi
	movq 0x38(%rsp), %rsi
142
	subq $MCOUNT_INSN_SIZE, %rsi
143 144 145

	call	prepare_ftrace_return

146 147
	MCOUNT_RESTORE_FRAME

148 149 150 151 152 153 154 155
	retq
END(ftrace_graph_caller)


.globl return_to_handler
return_to_handler:
	subq  $80, %rsp

156 157 158 159 160 161 162
	movq %rax, (%rsp)
	movq %rcx, 8(%rsp)
	movq %rdx, 16(%rsp)
	movq %rsi, 24(%rsp)
	movq %rdi, 32(%rsp)
	movq %r8, 40(%rsp)
	movq %r9, 48(%rsp)
163 164
	movq %r10, 56(%rsp)
	movq %r11, 64(%rsp)
165

166
	call ftrace_return_to_handler
167

168 169 170
	movq %rax, 72(%rsp)
	movq 64(%rsp), %r11
	movq 56(%rsp), %r10
171 172 173 174 175 176 177
	movq 48(%rsp), %r9
	movq 40(%rsp), %r8
	movq 32(%rsp), %rdi
	movq 24(%rsp), %rsi
	movq 16(%rsp), %rdx
	movq 8(%rsp), %rcx
	movq (%rsp), %rax
178 179 180
	addq $72, %rsp
	retq
#endif
181 182


183
#ifndef CONFIG_PREEMPT
L
Linus Torvalds 已提交
184
#define retint_kernel retint_restore_args
185
#endif
186

187
#ifdef CONFIG_PARAVIRT
188
ENTRY(native_usergs_sysret64)
189 190 191 192
	swapgs
	sysretq
#endif /* CONFIG_PARAVIRT */

193 194 195 196 197 198 199 200 201 202

.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
	bt   $9,EFLAGS-\offset(%rsp)	/* interrupts off? */
	jnc  1f
	TRACE_IRQS_ON
1:
#endif
.endm

L
Linus Torvalds 已提交
203
/*
204 205
 * C code is not supposed to know about undefined top of stack. Every time
 * a C function with an pt_regs argument is called from the SYSCALL based
L
Linus Torvalds 已提交
206 207 208
 * fast path FIXUP_TOP_OF_STACK is needed.
 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
 * manipulation.
209 210 211
 */

	/* %rsp:at FRAMEEND */
212 213 214 215 216 217 218 219
	.macro FIXUP_TOP_OF_STACK tmp offset=0
	movq %gs:pda_oldrsp,\tmp
	movq \tmp,RSP+\offset(%rsp)
	movq $__USER_DS,SS+\offset(%rsp)
	movq $__USER_CS,CS+\offset(%rsp)
	movq $-1,RCX+\offset(%rsp)
	movq R11+\offset(%rsp),\tmp  /* get eflags */
	movq \tmp,EFLAGS+\offset(%rsp)
L
Linus Torvalds 已提交
220 221
	.endm

222 223 224 225 226
	.macro RESTORE_TOP_OF_STACK tmp offset=0
	movq RSP+\offset(%rsp),\tmp
	movq \tmp,%gs:pda_oldrsp
	movq EFLAGS+\offset(%rsp),\tmp
	movq \tmp,R11+\offset(%rsp)
L
Linus Torvalds 已提交
227 228 229 230
	.endm

	.macro FAKE_STACK_FRAME child_rip
	/* push in order ss, rsp, eflags, cs, rip */
231
	xorl %eax, %eax
232
	pushq $__KERNEL_DS /* ss */
L
Linus Torvalds 已提交
233
	CFI_ADJUST_CFA_OFFSET	8
234
	/*CFI_REL_OFFSET	ss,0*/
L
Linus Torvalds 已提交
235 236
	pushq %rax /* rsp */
	CFI_ADJUST_CFA_OFFSET	8
237
	CFI_REL_OFFSET	rsp,0
238
	pushq $X86_EFLAGS_IF /* eflags - interrupts on */
L
Linus Torvalds 已提交
239
	CFI_ADJUST_CFA_OFFSET	8
240
	/*CFI_REL_OFFSET	rflags,0*/
L
Linus Torvalds 已提交
241 242
	pushq $__KERNEL_CS /* cs */
	CFI_ADJUST_CFA_OFFSET	8
243
	/*CFI_REL_OFFSET	cs,0*/
L
Linus Torvalds 已提交
244 245
	pushq \child_rip /* rip */
	CFI_ADJUST_CFA_OFFSET	8
246
	CFI_REL_OFFSET	rip,0
L
Linus Torvalds 已提交
247 248 249 250 251 252 253 254 255
	pushq	%rax /* orig rax */
	CFI_ADJUST_CFA_OFFSET	8
	.endm

	.macro UNFAKE_STACK_FRAME
	addq $8*6, %rsp
	CFI_ADJUST_CFA_OFFSET	-(6*8)
	.endm

256 257 258 259
/*
 * initial frame state for interrupts (and exceptions without error code)
 */
	.macro EMPTY_FRAME start=1 offset=0
260
	.if \start
261
	CFI_STARTPROC simple
262
	CFI_SIGNAL_FRAME
263
	CFI_DEF_CFA rsp,8+\offset
264
	.else
265
	CFI_DEF_CFA_OFFSET 8+\offset
266
	.endif
L
Linus Torvalds 已提交
267
	.endm
268 269

/*
270
 * initial frame state for interrupts (and exceptions without error code)
271
 */
272
	.macro INTR_FRAME start=1 offset=0
273 274 275 276 277 278
	EMPTY_FRAME \start, SS+8+\offset-RIP
	/*CFI_REL_OFFSET ss, SS+\offset-RIP*/
	CFI_REL_OFFSET rsp, RSP+\offset-RIP
	/*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
	/*CFI_REL_OFFSET cs, CS+\offset-RIP*/
	CFI_REL_OFFSET rip, RIP+\offset-RIP
279 280 281 282 283 284
	.endm

/*
 * initial frame state for exceptions with error code (and interrupts
 * with vector already pushed)
 */
285
	.macro XCPT_FRAME start=1 offset=0
286
	INTR_FRAME \start, RIP+\offset-ORIG_RAX
287 288 289 290 291 292 293
	/*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
	.endm

/*
 * frame that enables calling into C.
 */
	.macro PARTIAL_FRAME start=1 offset=0
294 295 296 297 298 299 300 301 302 303
	XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
	CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
	CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
	CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
	CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
	CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
	CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
	CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
	CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
	CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
304 305 306 307 308 309
	.endm

/*
 * frame that enables passing a complete pt_regs to a C function.
 */
	.macro DEFAULT_FRAME start=1 offset=0
310
	PARTIAL_FRAME \start, R11+\offset-R15
311 312 313 314 315 316 317
	CFI_REL_OFFSET rbx, RBX+\offset
	CFI_REL_OFFSET rbp, RBP+\offset
	CFI_REL_OFFSET r12, R12+\offset
	CFI_REL_OFFSET r13, R13+\offset
	CFI_REL_OFFSET r14, R14+\offset
	CFI_REL_OFFSET r15, R15+\offset
	.endm
318 319 320 321 322

/* save partial stack frame */
ENTRY(save_args)
	XCPT_FRAME
	cld
I
Ingo Molnar 已提交
323 324 325 326 327 328 329 330 331 332
	movq_cfi rdi, RDI+16-ARGOFFSET
	movq_cfi rsi, RSI+16-ARGOFFSET
	movq_cfi rdx, RDX+16-ARGOFFSET
	movq_cfi rcx, RCX+16-ARGOFFSET
	movq_cfi rax, RAX+16-ARGOFFSET
	movq_cfi  r8,  R8+16-ARGOFFSET
	movq_cfi  r9,  R9+16-ARGOFFSET
	movq_cfi r10, R10+16-ARGOFFSET
	movq_cfi r11, R11+16-ARGOFFSET

333
	leaq -ARGOFFSET+16(%rsp),%rdi	/* arg1 for handler */
I
Ingo Molnar 已提交
334
	movq_cfi rbp, 8		/* push %rbp */
335 336 337 338 339 340 341 342 343 344 345 346
	leaq 8(%rsp), %rbp		/* mov %rsp, %ebp */
	testl $3, CS(%rdi)
	je 1f
	SWAPGS
	/*
	 * irqcount is used to check if a CPU is already on an interrupt stack
	 * or not. While this is essentially redundant with preempt_count it is
	 * a little cheaper to use a separate counter in the PDA (short of
	 * moving irq_enter into assembly, which would be too much work)
	 */
1:	incl %gs:pda_irqcount
	jne 2f
I
Ingo Molnar 已提交
347
	popq_cfi %rax			/* move return address... */
348
	mov %gs:pda_irqstackptr,%rsp
349
	EMPTY_FRAME 0
I
Ingo Molnar 已提交
350
	pushq_cfi %rax			/* ... to the new stack */
351 352 353 354 355 356 357 358
	/*
	 * We entered an interrupt context - irqs are off:
	 */
2:	TRACE_IRQS_OFF
	ret
	CFI_ENDPROC
END(save_args)

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
ENTRY(save_rest)
	PARTIAL_FRAME 1 REST_SKIP+8
	movq 5*8+16(%rsp), %r11	/* save return address */
	movq_cfi rbx, RBX+16
	movq_cfi rbp, RBP+16
	movq_cfi r12, R12+16
	movq_cfi r13, R13+16
	movq_cfi r14, R14+16
	movq_cfi r15, R15+16
	movq %r11, 8(%rsp)	/* return address */
	FIXUP_TOP_OF_STACK %r11, 16
	ret
	CFI_ENDPROC
END(save_rest)

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
/* save complete stack frame */
ENTRY(save_paranoid)
	XCPT_FRAME 1 RDI+8
	cld
	movq_cfi rdi, RDI+8
	movq_cfi rsi, RSI+8
	movq_cfi rdx, RDX+8
	movq_cfi rcx, RCX+8
	movq_cfi rax, RAX+8
	movq_cfi r8, R8+8
	movq_cfi r9, R9+8
	movq_cfi r10, R10+8
	movq_cfi r11, R11+8
	movq_cfi rbx, RBX+8
	movq_cfi rbp, RBP+8
	movq_cfi r12, R12+8
	movq_cfi r13, R13+8
	movq_cfi r14, R14+8
	movq_cfi r15, R15+8
	movl $1,%ebx
	movl $MSR_GS_BASE,%ecx
	rdmsr
	testl %edx,%edx
	js 1f	/* negative -> in kernel */
	SWAPGS
	xorl %ebx,%ebx
1:	ret
	CFI_ENDPROC
END(save_paranoid)

L
Linus Torvalds 已提交
404
/*
405 406 407
 * A newly forked process directly context switches into this address.
 *
 * rdi: prev task we switched from
408
 */
L
Linus Torvalds 已提交
409
ENTRY(ret_from_fork)
410
	DEFAULT_FRAME
411

412
	push kernel_eflags(%rip)
413
	CFI_ADJUST_CFA_OFFSET 8
414
	popf					# reset kernel eflags
415
	CFI_ADJUST_CFA_OFFSET -8
416 417 418

	call schedule_tail			# rdi: 'prev' task parameter

L
Linus Torvalds 已提交
419
	GET_THREAD_INFO(%rcx)
420

421
	CFI_REMEMBER_STATE
L
Linus Torvalds 已提交
422
	RESTORE_REST
423 424

	testl $3, CS-ARGOFFSET(%rsp)		# from kernel_thread?
L
Linus Torvalds 已提交
425
	je   int_ret_from_sys_call
426 427

	testl $_TIF_IA32, TI_flags(%rcx)	# 32-bit compat task needs IRET
L
Linus Torvalds 已提交
428
	jnz  int_ret_from_sys_call
429

430
	RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
431 432
	jmp ret_from_sys_call			# go to the SYSRET fastpath

433
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
434
	CFI_ENDPROC
435
END(ret_from_fork)
L
Linus Torvalds 已提交
436 437 438 439 440 441 442

/*
 * System call entry. Upto 6 arguments in registers are supported.
 *
 * SYSCALL does not save anything on the stack and does not change the
 * stack pointer.
 */
443

L
Linus Torvalds 已提交
444
/*
445
 * Register setup:
L
Linus Torvalds 已提交
446 447
 * rax  system call number
 * rdi  arg0
448
 * rcx  return address for syscall/sysret, C arg3
L
Linus Torvalds 已提交
449
 * rsi  arg1
450
 * rdx  arg2
L
Linus Torvalds 已提交
451 452 453 454
 * r10  arg3 	(--> moved to rcx for C)
 * r8   arg4
 * r9   arg5
 * r11  eflags for syscall/sysret, temporary for C
455 456
 * r12-r15,rbp,rbx saved by C code, not touched.
 *
L
Linus Torvalds 已提交
457 458 459 460 461
 * Interrupts are off on entry.
 * Only called from user space.
 *
 * XXX	if we had a free scratch register we could save the RSP into the stack frame
 *      and report it properly in ps. Unfortunately we haven't.
462 463 464 465
 *
 * When user can change the frames always force IRET. That is because
 * it deals with uncanonical addresses better. SYSRET has trouble
 * with them due to bugs in both AMD and Intel CPUs.
466
 */
L
Linus Torvalds 已提交
467 468

ENTRY(system_call)
469
	CFI_STARTPROC	simple
470
	CFI_SIGNAL_FRAME
471
	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
472 473
	CFI_REGISTER	rip,rcx
	/*CFI_REGISTER	rflags,r11*/
474 475 476 477 478 479 480 481
	SWAPGS_UNSAFE_STACK
	/*
	 * A hypervisor implementation might want to use a label
	 * after the swapgs, so that it can do the swapgs
	 * for the guest and jump here on syscall.
	 */
ENTRY(system_call_after_swapgs)

482
	movq	%rsp,%gs:pda_oldrsp
L
Linus Torvalds 已提交
483
	movq	%gs:pda_kernelstack,%rsp
484 485 486 487
	/*
	 * No need to follow this irqs off/on section - it's straight
	 * and short:
	 */
488
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
489
	SAVE_ARGS 8,1
490
	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
491 492
	movq  %rcx,RIP-ARGOFFSET(%rsp)
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
L
Linus Torvalds 已提交
493
	GET_THREAD_INFO(%rcx)
494
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
L
Linus Torvalds 已提交
495
	jnz tracesys
R
Roland McGrath 已提交
496
system_call_fastpath:
L
Linus Torvalds 已提交
497 498 499 500 501 502 503
	cmpq $__NR_syscall_max,%rax
	ja badsys
	movq %r10,%rcx
	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
	movq %rax,RAX-ARGOFFSET(%rsp)
/*
 * Syscall return path ending with SYSRET (fast path)
504 505
 * Has incomplete stack frame and undefined top of stack.
 */
L
Linus Torvalds 已提交
506
ret_from_sys_call:
507
	movl $_TIF_ALLWORK_MASK,%edi
L
Linus Torvalds 已提交
508
	/* edi:	flagmask */
509
sysret_check:
510
	LOCKDEP_SYS_EXIT
L
Linus Torvalds 已提交
511
	GET_THREAD_INFO(%rcx)
512
	DISABLE_INTERRUPTS(CLBR_NONE)
513
	TRACE_IRQS_OFF
G
Glauber Costa 已提交
514
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
515
	andl %edi,%edx
516
	jnz  sysret_careful
517
	CFI_REMEMBER_STATE
518 519 520 521
	/*
	 * sysretq will re-enable interrupts:
	 */
	TRACE_IRQS_ON
L
Linus Torvalds 已提交
522
	movq RIP-ARGOFFSET(%rsp),%rcx
523
	CFI_REGISTER	rip,rcx
L
Linus Torvalds 已提交
524
	RESTORE_ARGS 0,-ARG_SKIP,1
525
	/*CFI_REGISTER	rflags,r11*/
526
	movq	%gs:pda_oldrsp, %rsp
527
	USERGS_SYSRET64
L
Linus Torvalds 已提交
528

529
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
530
	/* Handle reschedules */
531
	/* edx:	work, edi: workmask */
L
Linus Torvalds 已提交
532 533 534
sysret_careful:
	bt $TIF_NEED_RESCHED,%edx
	jnc sysret_signal
535
	TRACE_IRQS_ON
536
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
537
	pushq %rdi
538
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
539 540
	call schedule
	popq  %rdi
541
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
542 543
	jmp sysret_check

544
	/* Handle a signal */
L
Linus Torvalds 已提交
545
sysret_signal:
546
	TRACE_IRQS_ON
547
	ENABLE_INTERRUPTS(CLBR_NONE)
R
Roland McGrath 已提交
548 549 550 551
#ifdef CONFIG_AUDITSYSCALL
	bt $TIF_SYSCALL_AUDIT,%edx
	jc sysret_audit
#endif
552
	/* edx:	work flags (arg3) */
L
Linus Torvalds 已提交
553 554
	leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
	xorl %esi,%esi # oldset -> arg2
555 556 557 558 559
	SAVE_REST
	FIXUP_TOP_OF_STACK %r11
	call do_notify_resume
	RESTORE_TOP_OF_STACK %r11
	RESTORE_REST
560
	movl $_TIF_WORK_MASK,%edi
561 562
	/* Use IRET because user could have changed frame. This
	   works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
563
	DISABLE_INTERRUPTS(CLBR_NONE)
564
	TRACE_IRQS_OFF
565
	jmp int_with_check
566

567 568 569 570
badsys:
	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
	jmp ret_from_sys_call

R
Roland McGrath 已提交
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
#ifdef CONFIG_AUDITSYSCALL
	/*
	 * Fast path for syscall audit without full syscall trace.
	 * We just call audit_syscall_entry() directly, and then
	 * jump back to the normal fast path.
	 */
auditsys:
	movq %r10,%r9			/* 6th arg: 4th syscall arg */
	movq %rdx,%r8			/* 5th arg: 3rd syscall arg */
	movq %rsi,%rcx			/* 4th arg: 2nd syscall arg */
	movq %rdi,%rdx			/* 3rd arg: 1st syscall arg */
	movq %rax,%rsi			/* 2nd arg: syscall number */
	movl $AUDIT_ARCH_X86_64,%edi	/* 1st arg: audit arch */
	call audit_syscall_entry
	LOAD_ARGS 0		/* reload call-clobbered registers */
	jmp system_call_fastpath

	/*
	 * Return fast path for syscall audit.  Call audit_syscall_exit()
	 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
	 * masked off.
	 */
sysret_audit:
	movq %rax,%rsi		/* second arg, syscall return value */
	cmpq $0,%rax		/* is it < 0? */
	setl %al		/* 1 if so, 0 if not */
	movzbl %al,%edi		/* zero-extend that into %edi */
	inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
	call audit_syscall_exit
	movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
	jmp sysret_check
#endif	/* CONFIG_AUDITSYSCALL */

L
Linus Torvalds 已提交
604
	/* Do syscall tracing */
605
tracesys:
R
Roland McGrath 已提交
606 607 608 609
#ifdef CONFIG_AUDITSYSCALL
	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
	jz auditsys
#endif
L
Linus Torvalds 已提交
610
	SAVE_REST
R
Roland McGrath 已提交
611
	movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
L
Linus Torvalds 已提交
612 613 614
	FIXUP_TOP_OF_STACK %rdi
	movq %rsp,%rdi
	call syscall_trace_enter
615 616 617 618 619 620
	/*
	 * Reload arg registers from stack in case ptrace changed them.
	 * We don't reload %rax because syscall_trace_enter() returned
	 * the value it wants us to use in the table lookup.
	 */
	LOAD_ARGS ARGOFFSET, 1
L
Linus Torvalds 已提交
621 622
	RESTORE_REST
	cmpq $__NR_syscall_max,%rax
R
Roland McGrath 已提交
623
	ja   int_ret_from_sys_call	/* RAX(%rsp) set to -ENOSYS above */
L
Linus Torvalds 已提交
624 625
	movq %r10,%rcx	/* fixup for C */
	call *sys_call_table(,%rax,8)
R
Roland McGrath 已提交
626
	movq %rax,RAX-ARGOFFSET(%rsp)
627
	/* Use IRET because user could have changed frame */
628 629

/*
L
Linus Torvalds 已提交
630 631
 * Syscall return path ending with IRET.
 * Has correct top of stack, but partial stack frame.
632 633
 */
	.globl int_ret_from_sys_call
634
	.globl int_with_check
635
int_ret_from_sys_call:
636
	DISABLE_INTERRUPTS(CLBR_NONE)
637
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
638 639 640 641 642
	testl $3,CS-ARGOFFSET(%rsp)
	je retint_restore_args
	movl $_TIF_ALLWORK_MASK,%edi
	/* edi:	mask to check */
int_with_check:
643
	LOCKDEP_SYS_EXIT_IRQ
L
Linus Torvalds 已提交
644
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
645
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
646 647
	andl %edi,%edx
	jnz   int_careful
G
Glauber Costa 已提交
648
	andl    $~TS_COMPAT,TI_status(%rcx)
L
Linus Torvalds 已提交
649 650 651 652 653 654 655 656
	jmp   retint_swapgs

	/* Either reschedule or signal or syscall exit tracking needed. */
	/* First do a reschedule test. */
	/* edx:	work, edi: workmask */
int_careful:
	bt $TIF_NEED_RESCHED,%edx
	jnc  int_very_careful
657
	TRACE_IRQS_ON
658
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
659
	pushq %rdi
660
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
661 662
	call schedule
	popq %rdi
663
	CFI_ADJUST_CFA_OFFSET -8
664
	DISABLE_INTERRUPTS(CLBR_NONE)
665
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
666 667 668 669
	jmp int_with_check

	/* handle signals and tracing -- both require a full stack frame */
int_very_careful:
670
	TRACE_IRQS_ON
671
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
672
	SAVE_REST
673
	/* Check for syscall exit trace */
674
	testl $_TIF_WORK_SYSCALL_EXIT,%edx
L
Linus Torvalds 已提交
675 676
	jz int_signal
	pushq %rdi
677
	CFI_ADJUST_CFA_OFFSET 8
678
	leaq 8(%rsp),%rdi	# &ptregs -> arg1
L
Linus Torvalds 已提交
679 680
	call syscall_trace_leave
	popq %rdi
681
	CFI_ADJUST_CFA_OFFSET -8
682
	andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
L
Linus Torvalds 已提交
683
	jmp int_restore_rest
684

L
Linus Torvalds 已提交
685
int_signal:
P
Peter Zijlstra 已提交
686
	testl $_TIF_DO_NOTIFY_MASK,%edx
L
Linus Torvalds 已提交
687 688 689 690
	jz 1f
	movq %rsp,%rdi		# &ptregs -> arg1
	xorl %esi,%esi		# oldset -> arg2
	call do_notify_resume
R
Roland McGrath 已提交
691
1:	movl $_TIF_WORK_MASK,%edi
L
Linus Torvalds 已提交
692 693
int_restore_rest:
	RESTORE_REST
694
	DISABLE_INTERRUPTS(CLBR_NONE)
695
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
696 697
	jmp int_with_check
	CFI_ENDPROC
698
END(system_call)
699 700

/*
L
Linus Torvalds 已提交
701
 * Certain special system calls that need to save a complete full stack frame.
702
 */
L
Linus Torvalds 已提交
703
	.macro PTREGSCALL label,func,arg
704 705 706 707 708 709 710 711 712 713
ENTRY(\label)
	PARTIAL_FRAME 1 8		/* offset 8: return address */
	subq $REST_SKIP, %rsp
	CFI_ADJUST_CFA_OFFSET REST_SKIP
	call save_rest
	DEFAULT_FRAME 0 8		/* offset 8: return address */
	leaq 8(%rsp), \arg	/* pt_regs pointer */
	call \func
	jmp ptregscall_common
	CFI_ENDPROC
714
END(\label)
L
Linus Torvalds 已提交
715 716 717 718 719 720 721 722 723
	.endm

	PTREGSCALL stub_clone, sys_clone, %r8
	PTREGSCALL stub_fork, sys_fork, %rdi
	PTREGSCALL stub_vfork, sys_vfork, %rdi
	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
	PTREGSCALL stub_iopl, sys_iopl, %rsi

ENTRY(ptregscall_common)
724 725 726 727 728 729 730 731 732
	DEFAULT_FRAME 1 8	/* offset 8: return address */
	RESTORE_TOP_OF_STACK %r11, 8
	movq_cfi_restore R15+8, r15
	movq_cfi_restore R14+8, r14
	movq_cfi_restore R13+8, r13
	movq_cfi_restore R12+8, r12
	movq_cfi_restore RBP+8, rbp
	movq_cfi_restore RBX+8, rbx
	ret $REST_SKIP		/* pop extended registers */
L
Linus Torvalds 已提交
733
	CFI_ENDPROC
734
END(ptregscall_common)
735

L
Linus Torvalds 已提交
736 737 738
ENTRY(stub_execve)
	CFI_STARTPROC
	popq %r11
739 740
	CFI_ADJUST_CFA_OFFSET -8
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
741 742
	SAVE_REST
	FIXUP_TOP_OF_STACK %r11
743
	movq %rsp, %rcx
L
Linus Torvalds 已提交
744 745 746 747 748 749
	call sys_execve
	RESTORE_TOP_OF_STACK %r11
	movq %rax,RAX(%rsp)
	RESTORE_REST
	jmp int_ret_from_sys_call
	CFI_ENDPROC
750
END(stub_execve)
751

L
Linus Torvalds 已提交
752 753 754
/*
 * sigreturn is special because it needs to restore all registers on return.
 * This cannot be done with SYSRET, so use the IRET return path instead.
755
 */
L
Linus Torvalds 已提交
756 757
ENTRY(stub_rt_sigreturn)
	CFI_STARTPROC
758 759
	addq $8, %rsp
	CFI_ADJUST_CFA_OFFSET	-8
L
Linus Torvalds 已提交
760 761 762 763 764 765 766 767
	SAVE_REST
	movq %rsp,%rdi
	FIXUP_TOP_OF_STACK %r11
	call sys_rt_sigreturn
	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
	RESTORE_REST
	jmp int_ret_from_sys_call
	CFI_ENDPROC
768
END(stub_rt_sigreturn)
L
Linus Torvalds 已提交
769

770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
/*
 * Build the entry stubs and pointer table with some assembler magic.
 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
 * single cache line on all modern x86 implementations.
 */
	.section .init.rodata,"a"
ENTRY(interrupt)
	.text
	.p2align 5
	.p2align CONFIG_X86_L1_CACHE_SHIFT
ENTRY(irq_entries_start)
	INTR_FRAME
vector=FIRST_EXTERNAL_VECTOR
.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
	.balign 32
  .rept	7
    .if vector < NR_VECTORS
787
      .if vector <> FIRST_EXTERNAL_VECTOR
788 789 790 791
	CFI_ADJUST_CFA_OFFSET -8
      .endif
1:	pushq $(~vector+0x80)	/* Note: always in signed byte range */
	CFI_ADJUST_CFA_OFFSET 8
792
      .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
	jmp 2f
      .endif
      .previous
	.quad 1b
      .text
vector=vector+1
    .endif
  .endr
2:	jmp common_interrupt
.endr
	CFI_ENDPROC
END(irq_entries_start)

.previous
END(interrupt)
.previous

810
/*
L
Linus Torvalds 已提交
811 812 813
 * Interrupt entry/exit.
 *
 * Interrupt entry points save only callee clobbered registers in fast path.
814 815 816
 *
 * Entry runs with interrupts off.
 */
L
Linus Torvalds 已提交
817

818
/* 0(%rsp): ~(interrupt number) */
L
Linus Torvalds 已提交
819
	.macro interrupt func
820 821 822
	subq $10*8, %rsp
	CFI_ADJUST_CFA_OFFSET 10*8
	call save_args
823
	PARTIAL_FRAME 0
L
Linus Torvalds 已提交
824 825 826
	call \func
	.endm

827 828 829 830
	/*
	 * The interrupt stubs push (~vector+0x80) onto the stack and
	 * then jump to common_interrupt.
	 */
831 832
	.p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt:
833
	XCPT_FRAME
834
	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
L
Linus Torvalds 已提交
835 836
	interrupt do_IRQ
	/* 0(%rsp): oldrsp-ARGOFFSET */
837
ret_from_intr:
838
	DISABLE_INTERRUPTS(CLBR_NONE)
839
	TRACE_IRQS_OFF
840
	decl %gs:pda_irqcount
841
	leaveq
842
	CFI_DEF_CFA_REGISTER	rsp
843
	CFI_ADJUST_CFA_OFFSET	-8
844
exit_intr:
L
Linus Torvalds 已提交
845 846 847
	GET_THREAD_INFO(%rcx)
	testl $3,CS-ARGOFFSET(%rsp)
	je retint_kernel
848

L
Linus Torvalds 已提交
849 850 851 852
	/* Interrupt came from user space */
	/*
	 * Has a correct top of stack, but a partial stack frame
	 * %rcx: thread info. Interrupts off.
853
	 */
L
Linus Torvalds 已提交
854 855
retint_with_reschedule:
	movl $_TIF_WORK_MASK,%edi
856
retint_check:
857
	LOCKDEP_SYS_EXIT_IRQ
G
Glauber Costa 已提交
858
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
859
	andl %edi,%edx
860
	CFI_REMEMBER_STATE
L
Linus Torvalds 已提交
861
	jnz  retint_careful
862 863

retint_swapgs:		/* return to user-space */
864 865 866
	/*
	 * The iretq could re-enable interrupts:
	 */
867
	DISABLE_INTERRUPTS(CLBR_ANY)
868
	TRACE_IRQS_IRETQ
869
	SWAPGS
870 871
	jmp restore_args

872
retint_restore_args:	/* return to kernel space */
873
	DISABLE_INTERRUPTS(CLBR_ANY)
874 875 876 877 878
	/*
	 * The iretq could re-enable interrupts:
	 */
	TRACE_IRQS_IRETQ
restore_args:
I
Ingo Molnar 已提交
879 880
	RESTORE_ARGS 0,8,0

A
Adrian Bunk 已提交
881
irq_return:
882
	INTERRUPT_RETURN
I
Ingo Molnar 已提交
883 884 885 886 887 888

	.section __ex_table, "a"
	.quad irq_return, bad_iret
	.previous

#ifdef CONFIG_PARAVIRT
889
ENTRY(native_iret)
L
Linus Torvalds 已提交
890 891 892
	iretq

	.section __ex_table,"a"
893
	.quad native_iret, bad_iret
L
Linus Torvalds 已提交
894
	.previous
I
Ingo Molnar 已提交
895 896
#endif

L
Linus Torvalds 已提交
897 898
	.section .fixup,"ax"
bad_iret:
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
	/*
	 * The iret traps when the %cs or %ss being restored is bogus.
	 * We've lost the original trap vector and error code.
	 * #GPF is the most likely one to get for an invalid selector.
	 * So pretend we completed the iret and took the #GPF in user mode.
	 *
	 * We are now running with the kernel GS after exception recovery.
	 * But error_entry expects us to have user GS to match the user %cs,
	 * so swap back.
	 */
	pushq $0

	SWAPGS
	jmp general_protection

914 915
	.previous

916
	/* edi: workmask, edx: work */
L
Linus Torvalds 已提交
917
retint_careful:
918
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
919 920
	bt    $TIF_NEED_RESCHED,%edx
	jnc   retint_signal
921
	TRACE_IRQS_ON
922
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
923
	pushq %rdi
924
	CFI_ADJUST_CFA_OFFSET	8
L
Linus Torvalds 已提交
925
	call  schedule
926
	popq %rdi
927
	CFI_ADJUST_CFA_OFFSET	-8
L
Linus Torvalds 已提交
928
	GET_THREAD_INFO(%rcx)
929
	DISABLE_INTERRUPTS(CLBR_NONE)
930
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
931
	jmp retint_check
932

L
Linus Torvalds 已提交
933
retint_signal:
P
Peter Zijlstra 已提交
934
	testl $_TIF_DO_NOTIFY_MASK,%edx
935
	jz    retint_swapgs
936
	TRACE_IRQS_ON
937
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
938
	SAVE_REST
939
	movq $-1,ORIG_RAX(%rsp)
940
	xorl %esi,%esi		# oldset
L
Linus Torvalds 已提交
941 942 943
	movq %rsp,%rdi		# &pt_regs
	call do_notify_resume
	RESTORE_REST
944
	DISABLE_INTERRUPTS(CLBR_NONE)
945
	TRACE_IRQS_OFF
946
	GET_THREAD_INFO(%rcx)
R
Roland McGrath 已提交
947
	jmp retint_with_reschedule
L
Linus Torvalds 已提交
948 949 950 951

#ifdef CONFIG_PREEMPT
	/* Returning to kernel space. Check if we need preemption */
	/* rcx:	 threadinfo. interrupts off. */
952
ENTRY(retint_kernel)
G
Glauber Costa 已提交
953
	cmpl $0,TI_preempt_count(%rcx)
L
Linus Torvalds 已提交
954
	jnz  retint_restore_args
G
Glauber Costa 已提交
955
	bt  $TIF_NEED_RESCHED,TI_flags(%rcx)
L
Linus Torvalds 已提交
956 957 958 959 960
	jnc  retint_restore_args
	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
	jnc  retint_restore_args
	call preempt_schedule_irq
	jmp exit_intr
961
#endif
962

L
Linus Torvalds 已提交
963
	CFI_ENDPROC
964
END(common_interrupt)
965

L
Linus Torvalds 已提交
966 967
/*
 * APIC interrupts.
968
 */
969 970
.macro apicinterrupt num sym do_sym
ENTRY(\sym)
971
	INTR_FRAME
972
	pushq $~(\num)
973
	CFI_ADJUST_CFA_OFFSET 8
974
	interrupt \do_sym
L
Linus Torvalds 已提交
975 976
	jmp ret_from_intr
	CFI_ENDPROC
977 978
END(\sym)
.endm
L
Linus Torvalds 已提交
979

980 981 982 983
#ifdef CONFIG_SMP
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
	irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
#endif
L
Linus Torvalds 已提交
984

985
apicinterrupt UV_BAU_MESSAGE \
986 987 988
	uv_bau_message_intr1 uv_bau_message_interrupt
apicinterrupt LOCAL_TIMER_VECTOR \
	apic_timer_interrupt smp_apic_timer_interrupt
989

990
#ifdef CONFIG_SMP
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
	invalidate_interrupt0 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \
	invalidate_interrupt1 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
	invalidate_interrupt2 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
	invalidate_interrupt3 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
	invalidate_interrupt4 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \
	invalidate_interrupt5 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \
	invalidate_interrupt6 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
	invalidate_interrupt7 smp_invalidate_interrupt
L
Linus Torvalds 已提交
1007 1008
#endif

1009 1010 1011 1012
apicinterrupt THRESHOLD_APIC_VECTOR \
	threshold_interrupt mce_threshold_interrupt
apicinterrupt THERMAL_APIC_VECTOR \
	thermal_interrupt smp_thermal_interrupt
1013

1014 1015 1016 1017 1018 1019 1020 1021
#ifdef CONFIG_SMP
apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
	call_function_single_interrupt smp_call_function_single_interrupt
apicinterrupt CALL_FUNCTION_VECTOR \
	call_function_interrupt smp_call_function_interrupt
apicinterrupt RESCHEDULE_VECTOR \
	reschedule_interrupt smp_reschedule_interrupt
#endif
L
Linus Torvalds 已提交
1022

1023 1024 1025 1026
apicinterrupt ERROR_APIC_VECTOR \
	error_interrupt smp_error_interrupt
apicinterrupt SPURIOUS_APIC_VECTOR \
	spurious_interrupt smp_spurious_interrupt
1027

L
Linus Torvalds 已提交
1028 1029
/*
 * Exception entry points.
1030
 */
1031 1032
.macro zeroentry sym do_sym
ENTRY(\sym)
1033
	INTR_FRAME
1034
	PARAVIRT_ADJUST_EXCEPTION_FRAME
I
Ingo Molnar 已提交
1035
	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
1036 1037 1038
	subq $15*8,%rsp
	CFI_ADJUST_CFA_OFFSET 15*8
	call error_entry
1039
	DEFAULT_FRAME 0
1040 1041
	movq %rsp,%rdi		/* pt_regs pointer */
	xorl %esi,%esi		/* no error code */
1042
	call \do_sym
1043
	jmp error_exit		/* %ebx: no swapgs flag */
1044
	CFI_ENDPROC
1045 1046
END(\sym)
.endm
L
Linus Torvalds 已提交
1047

1048
.macro paranoidzeroentry sym do_sym
1049
ENTRY(\sym)
1050 1051 1052 1053 1054 1055 1056 1057 1058
	INTR_FRAME
	PARAVIRT_ADJUST_EXCEPTION_FRAME
	pushq $-1		/* ORIG_RAX: no syscall to restart */
	CFI_ADJUST_CFA_OFFSET 8
	subq $15*8, %rsp
	call save_paranoid
	TRACE_IRQS_OFF
	movq %rsp,%rdi		/* pt_regs pointer */
	xorl %esi,%esi		/* no error code */
1059
	call \do_sym
1060 1061
	jmp paranoid_exit	/* %ebx: no swapgs flag */
	CFI_ENDPROC
1062
END(\sym)
1063
.endm
1064

1065
.macro paranoidzeroentry_ist sym do_sym ist
1066
ENTRY(\sym)
1067
	INTR_FRAME
1068 1069 1070 1071 1072 1073 1074 1075
	PARAVIRT_ADJUST_EXCEPTION_FRAME
	pushq $-1		/* ORIG_RAX: no syscall to restart */
	CFI_ADJUST_CFA_OFFSET 8
	subq $15*8, %rsp
	call save_paranoid
	TRACE_IRQS_OFF
	movq %rsp,%rdi		/* pt_regs pointer */
	xorl %esi,%esi		/* no error code */
1076 1077
	PER_CPU(init_tss, %rbp)
	subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1078
	call \do_sym
1079
	addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1080 1081
	jmp paranoid_exit	/* %ebx: no swapgs flag */
	CFI_ENDPROC
1082
END(\sym)
1083
.endm
1084

1085
.macro errorentry sym do_sym
1086
ENTRY(\sym)
1087
	XCPT_FRAME
1088
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1089 1090 1091
	subq $15*8,%rsp
	CFI_ADJUST_CFA_OFFSET 15*8
	call error_entry
1092
	DEFAULT_FRAME 0
1093 1094 1095
	movq %rsp,%rdi			/* pt_regs pointer */
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1096
	call \do_sym
1097
	jmp error_exit			/* %ebx: no swapgs flag */
1098
	CFI_ENDPROC
1099 1100
END(\sym)
.endm
L
Linus Torvalds 已提交
1101 1102

	/* error code is on the stack already */
1103
.macro paranoiderrorentry sym do_sym
1104
ENTRY(\sym)
1105 1106 1107
	XCPT_FRAME
	PARAVIRT_ADJUST_EXCEPTION_FRAME
	subq $15*8,%rsp
1108 1109 1110
	CFI_ADJUST_CFA_OFFSET 15*8
	call save_paranoid
	DEFAULT_FRAME 0
1111
	TRACE_IRQS_OFF
1112 1113 1114
	movq %rsp,%rdi			/* pt_regs pointer */
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1115
	call \do_sym
1116 1117
	jmp paranoid_exit		/* %ebx: no swapgs flag */
	CFI_ENDPROC
1118 1119 1120 1121 1122 1123 1124 1125
END(\sym)
.endm

zeroentry divide_error do_divide_error
zeroentry overflow do_overflow
zeroentry bounds do_bounds
zeroentry invalid_op do_invalid_op
zeroentry device_not_available do_device_not_available
1126
paranoiderrorentry double_fault do_double_fault
1127 1128 1129 1130 1131 1132 1133
zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
errorentry invalid_TSS do_invalid_TSS
errorentry segment_not_present do_segment_not_present
zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
zeroentry coprocessor_error do_coprocessor_error
errorentry alignment_check do_alignment_check
zeroentry simd_coprocessor_error do_simd_coprocessor_error
1134

1135 1136
	/* Reload gs selector with exception handling */
	/* edi:  new selector */
1137
ENTRY(native_load_gs_index)
1138
	CFI_STARTPROC
L
Linus Torvalds 已提交
1139
	pushf
1140
	CFI_ADJUST_CFA_OFFSET 8
1141
	DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1142
	SWAPGS
1143
gs_change:
1144
	movl %edi,%gs
L
Linus Torvalds 已提交
1145
2:	mfence		/* workaround */
1146
	SWAPGS
1147
	popf
1148
	CFI_ADJUST_CFA_OFFSET -8
1149
	ret
1150
	CFI_ENDPROC
1151
END(native_load_gs_index)
1152

1153 1154 1155 1156 1157
	.section __ex_table,"a"
	.align 8
	.quad gs_change,bad_gs
	.previous
	.section .fixup,"ax"
L
Linus Torvalds 已提交
1158
	/* running with kernelgs */
1159
bad_gs:
1160
	SWAPGS			/* switch back to user gs */
L
Linus Torvalds 已提交
1161
	xorl %eax,%eax
1162 1163 1164
	movl %eax,%gs
	jmp  2b
	.previous
1165

L
Linus Torvalds 已提交
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
/*
 * Create a kernel thread.
 *
 * C extern interface:
 *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 *
 * asm input arguments:
 *	rdi: fn, rsi: arg, rdx: flags
 */
ENTRY(kernel_thread)
	CFI_STARTPROC
	FAKE_STACK_FRAME $child_rip
	SAVE_ALL

	# rdi: flags, rsi: usp, rdx: will be &pt_regs
	movq %rdx,%rdi
	orq  kernel_thread_flags(%rip),%rdi
	movq $-1, %rsi
	movq %rsp, %rdx

	xorl %r8d,%r8d
	xorl %r9d,%r9d
1188

L
Linus Torvalds 已提交
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
	# clone now
	call do_fork
	movq %rax,RAX(%rsp)
	xorl %edi,%edi

	/*
	 * It isn't worth to check for reschedule here,
	 * so internally to the x86_64 port you can rely on kernel_thread()
	 * not to reschedule the child before returning, this avoids the need
	 * of hacks for example to fork off the per-CPU idle tasks.
1199
	 * [Hopefully no generic code relies on the reschedule -AK]
L
Linus Torvalds 已提交
1200 1201 1202 1203 1204
	 */
	RESTORE_ALL
	UNFAKE_STACK_FRAME
	ret
	CFI_ENDPROC
1205
END(kernel_thread)
1206

1207
ENTRY(child_rip)
1208 1209
	pushq $0		# fake return address
	CFI_STARTPROC
L
Linus Torvalds 已提交
1210 1211 1212 1213 1214 1215 1216 1217
	/*
	 * Here we are in the child and the registers are set as they were
	 * at kernel_thread() invocation in the parent.
	 */
	movq %rdi, %rax
	movq %rsi, %rdi
	call *%rax
	# exit
1218
	mov %eax, %edi
L
Linus Torvalds 已提交
1219
	call do_exit
1220
	ud2			# padding for call trace
1221
	CFI_ENDPROC
1222
END(child_rip)
L
Linus Torvalds 已提交
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233

/*
 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
 *
 * C extern interface:
 *	 extern long execve(char *name, char **argv, char **envp)
 *
 * asm input arguments:
 *	rdi: name, rsi: argv, rdx: envp
 *
 * We want to fallback into:
1234
 *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
L
Linus Torvalds 已提交
1235 1236
 *
 * do_sys_execve asm fallback arguments:
1237
 *	rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
L
Linus Torvalds 已提交
1238
 */
1239
ENTRY(kernel_execve)
L
Linus Torvalds 已提交
1240 1241
	CFI_STARTPROC
	FAKE_STACK_FRAME $0
1242
	SAVE_ALL
1243
	movq %rsp,%rcx
L
Linus Torvalds 已提交
1244
	call sys_execve
1245
	movq %rax, RAX(%rsp)
L
Linus Torvalds 已提交
1246 1247 1248 1249 1250 1251 1252
	RESTORE_REST
	testq %rax,%rax
	je int_ret_from_sys_call
	RESTORE_ARGS
	UNFAKE_STACK_FRAME
	ret
	CFI_ENDPROC
1253
END(kernel_execve)
L
Linus Torvalds 已提交
1254

1255
/* Call softirq on interrupt stack. Interrupts are off. */
1256
ENTRY(call_softirq)
1257
	CFI_STARTPROC
1258 1259 1260 1261 1262
	push %rbp
	CFI_ADJUST_CFA_OFFSET	8
	CFI_REL_OFFSET rbp,0
	mov  %rsp,%rbp
	CFI_DEF_CFA_REGISTER rbp
1263
	incl %gs:pda_irqcount
1264 1265
	cmove %gs:pda_irqstackptr,%rsp
	push  %rbp			# backlink for old unwinder
1266
	call __do_softirq
1267
	leaveq
1268
	CFI_DEF_CFA_REGISTER	rsp
1269
	CFI_ADJUST_CFA_OFFSET   -8
1270 1271
	decl %gs:pda_irqcount
	ret
1272
	CFI_ENDPROC
1273
END(call_softirq)
1274

1275
#ifdef CONFIG_XEN
1276
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
1277 1278

/*
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
 * A note on the "critical region" in our callback handler.
 * We want to avoid stacking callback handlers due to events occurring
 * during handling of the last event. To do this, we keep events disabled
 * until we've done all processing. HOWEVER, we must enable events before
 * popping the stack frame (can't be done atomically) and so it would still
 * be possible to get enough handler activations to overflow the stack.
 * Although unlikely, bugs of that kind are hard to track down, so we'd
 * like to avoid the possibility.
 * So, on entry to the handler we detect whether we interrupted an
 * existing activation in its critical region -- if so, we pop the current
 * activation and restart the handler using the previous one.
 */
1291 1292
ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
	CFI_STARTPROC
1293 1294 1295 1296
/*
 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
 * see the correct pointer to the pt_regs
 */
1297 1298
	movq %rdi, %rsp            # we don't return, adjust the stack frame
	CFI_ENDPROC
1299
	DEFAULT_FRAME
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
11:	incl %gs:pda_irqcount
	movq %rsp,%rbp
	CFI_DEF_CFA_REGISTER rbp
	cmovzq %gs:pda_irqstackptr,%rsp
	pushq %rbp			# backlink for old unwinder
	call xen_evtchn_do_upcall
	popq %rsp
	CFI_DEF_CFA_REGISTER rsp
	decl %gs:pda_irqcount
	jmp  error_exit
	CFI_ENDPROC
END(do_hypervisor_callback)

/*
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
 * Hypervisor uses this for application faults while it executes.
 * We get here for two reasons:
 *  1. Fault while reloading DS, ES, FS or GS
 *  2. Fault while executing IRET
 * Category 1 we do not need to fix up as Xen has already reloaded all segment
 * registers that could be reloaded and zeroed the others.
 * Category 2 we fix up by killing the current process. We cannot use the
 * normal Linux return path in this case because if we use the IRET hypercall
 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 * We distinguish between categories by comparing each saved segment register
 * with its current contents: any discrepancy means we in category 1.
 */
1326
ENTRY(xen_failsafe_callback)
1327 1328 1329 1330 1331 1332 1333
	INTR_FRAME 1 (6*8)
	/*CFI_REL_OFFSET gs,GS*/
	/*CFI_REL_OFFSET fs,FS*/
	/*CFI_REL_OFFSET es,ES*/
	/*CFI_REL_OFFSET ds,DS*/
	CFI_REL_OFFSET r11,8
	CFI_REL_OFFSET rcx,0
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
	movw %ds,%cx
	cmpw %cx,0x10(%rsp)
	CFI_REMEMBER_STATE
	jne 1f
	movw %es,%cx
	cmpw %cx,0x18(%rsp)
	jne 1f
	movw %fs,%cx
	cmpw %cx,0x20(%rsp)
	jne 1f
	movw %gs,%cx
	cmpw %cx,0x28(%rsp)
	jne 1f
	/* All segments match their saved values => Category 2 (Bad IRET). */
	movq (%rsp),%rcx
	CFI_RESTORE rcx
	movq 8(%rsp),%r11
	CFI_RESTORE r11
	addq $0x30,%rsp
	CFI_ADJUST_CFA_OFFSET -0x30
I
Ingo Molnar 已提交
1354 1355 1356
	pushq_cfi $0	/* RIP */
	pushq_cfi %r11
	pushq_cfi %rcx
1357
	jmp general_protection
1358 1359 1360 1361 1362 1363 1364 1365
	CFI_RESTORE_STATE
1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
	movq (%rsp),%rcx
	CFI_RESTORE rcx
	movq 8(%rsp),%r11
	CFI_RESTORE r11
	addq $0x30,%rsp
	CFI_ADJUST_CFA_OFFSET -0x30
I
Ingo Molnar 已提交
1366
	pushq_cfi $0
1367 1368 1369 1370 1371 1372
	SAVE_ALL
	jmp error_exit
	CFI_ENDPROC
END(xen_failsafe_callback)

#endif /* CONFIG_XEN */
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388

/*
 * Some functions should be protected against kprobes
 */
	.pushsection .kprobes.text, "ax"

paranoidzeroentry_ist debug do_debug DEBUG_STACK
paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
paranoiderrorentry stack_segment do_stack_segment
errorentry general_protection do_general_protection
errorentry page_fault do_page_fault
#ifdef CONFIG_X86_MCE
paranoidzeroentry machine_check do_machine_check
#endif

	/*
1389 1390
	 * "Paranoid" exit path from exception stack.
	 * Paranoid because this is used by NMIs and cannot take
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
	 * any kernel state for granted.
	 * We don't do kernel preemption checks here, because only
	 * NMI should be common and it does not enable IRQs and
	 * cannot get reschedule ticks.
	 *
	 * "trace" is 0 for the NMI handler only, because irq-tracing
	 * is fundamentally NMI-unsafe. (we cannot change the soft and
	 * hard flags at once, atomically)
	 */

	/* ebx:	no swapgs flag */
ENTRY(paranoid_exit)
	INTR_FRAME
	DISABLE_INTERRUPTS(CLBR_NONE)
	TRACE_IRQS_OFF
	testl %ebx,%ebx				/* swapgs needed? */
	jnz paranoid_restore
	testl $3,CS(%rsp)
	jnz   paranoid_userspace
paranoid_swapgs:
	TRACE_IRQS_IRETQ 0
	SWAPGS_UNSAFE_STACK
paranoid_restore:
	RESTORE_ALL 8
	jmp irq_return
paranoid_userspace:
	GET_THREAD_INFO(%rcx)
	movl TI_flags(%rcx),%ebx
	andl $_TIF_WORK_MASK,%ebx
	jz paranoid_swapgs
	movq %rsp,%rdi			/* &pt_regs */
	call sync_regs
	movq %rax,%rsp			/* switch stack for scheduling */
	testl $_TIF_NEED_RESCHED,%ebx
	jnz paranoid_schedule
	movl %ebx,%edx			/* arg3: thread flags */
	TRACE_IRQS_ON
	ENABLE_INTERRUPTS(CLBR_NONE)
	xorl %esi,%esi 			/* arg2: oldset */
	movq %rsp,%rdi 			/* arg1: &pt_regs */
	call do_notify_resume
	DISABLE_INTERRUPTS(CLBR_NONE)
	TRACE_IRQS_OFF
	jmp paranoid_userspace
paranoid_schedule:
	TRACE_IRQS_ON
	ENABLE_INTERRUPTS(CLBR_ANY)
	call schedule
	DISABLE_INTERRUPTS(CLBR_ANY)
	TRACE_IRQS_OFF
	jmp paranoid_userspace
	CFI_ENDPROC
END(paranoid_exit)

/*
 * Exception entry point. This expects an error code/orig_rax on the stack.
 * returns in "no swapgs flag" in %ebx.
 */
ENTRY(error_entry)
	XCPT_FRAME
	CFI_ADJUST_CFA_OFFSET 15*8
	/* oldrax contains error code */
	cld
	movq_cfi rdi, RDI+8
	movq_cfi rsi, RSI+8
	movq_cfi rdx, RDX+8
	movq_cfi rcx, RCX+8
	movq_cfi rax, RAX+8
	movq_cfi  r8,  R8+8
	movq_cfi  r9,  R9+8
	movq_cfi r10, R10+8
	movq_cfi r11, R11+8
	movq_cfi rbx, RBX+8
	movq_cfi rbp, RBP+8
	movq_cfi r12, R12+8
	movq_cfi r13, R13+8
	movq_cfi r14, R14+8
	movq_cfi r15, R15+8
	xorl %ebx,%ebx
	testl $3,CS+8(%rsp)
	je error_kernelspace
error_swapgs:
	SWAPGS
error_sti:
	TRACE_IRQS_OFF
	ret
	CFI_ENDPROC

/*
 * There are two places in the kernel that can potentially fault with
 * usergs. Handle them here. The exception handlers after iret run with
 * kernel gs again, so don't set the user space flag. B stepping K8s
 * sometimes report an truncated RIP for IRET exceptions returning to
 * compat mode. Check for these here too.
 */
error_kernelspace:
	incl %ebx
	leaq irq_return(%rip),%rcx
	cmpq %rcx,RIP+8(%rsp)
	je error_swapgs
	movl %ecx,%ecx	/* zero extend */
	cmpq %rcx,RIP+8(%rsp)
	je error_swapgs
	cmpq $gs_change,RIP+8(%rsp)
1495
	je error_swapgs
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
	jmp error_sti
END(error_entry)


/* ebx:	no swapgs flag (1: don't need swapgs, 0: need it) */
ENTRY(error_exit)
	DEFAULT_FRAME
	movl %ebx,%eax
	RESTORE_REST
	DISABLE_INTERRUPTS(CLBR_NONE)
	TRACE_IRQS_OFF
	GET_THREAD_INFO(%rcx)
	testl %eax,%eax
	jne retint_kernel
	LOCKDEP_SYS_EXIT_IRQ
	movl TI_flags(%rcx),%edx
	movl $_TIF_WORK_MASK,%edi
	andl %edi,%edx
	jnz retint_careful
	jmp retint_swapgs
	CFI_ENDPROC
END(error_exit)


	/* runs on exception stack */
ENTRY(nmi)
	INTR_FRAME
	PARAVIRT_ADJUST_EXCEPTION_FRAME
	pushq_cfi $-1
	subq $15*8, %rsp
	CFI_ADJUST_CFA_OFFSET 15*8
	call save_paranoid
	DEFAULT_FRAME 0
	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
	movq %rsp,%rdi
	movq $-1,%rsi
	call do_nmi
#ifdef CONFIG_TRACE_IRQFLAGS
	/* paranoidexit; without TRACE_IRQS_OFF */
	/* ebx:	no swapgs flag */
	DISABLE_INTERRUPTS(CLBR_NONE)
	testl %ebx,%ebx				/* swapgs needed? */
	jnz nmi_restore
	testl $3,CS(%rsp)
	jnz nmi_userspace
nmi_swapgs:
	SWAPGS_UNSAFE_STACK
nmi_restore:
	RESTORE_ALL 8
	jmp irq_return
nmi_userspace:
	GET_THREAD_INFO(%rcx)
	movl TI_flags(%rcx),%ebx
	andl $_TIF_WORK_MASK,%ebx
	jz nmi_swapgs
	movq %rsp,%rdi			/* &pt_regs */
	call sync_regs
	movq %rax,%rsp			/* switch stack for scheduling */
	testl $_TIF_NEED_RESCHED,%ebx
	jnz nmi_schedule
	movl %ebx,%edx			/* arg3: thread flags */
	ENABLE_INTERRUPTS(CLBR_NONE)
	xorl %esi,%esi 			/* arg2: oldset */
	movq %rsp,%rdi 			/* arg1: &pt_regs */
	call do_notify_resume
	DISABLE_INTERRUPTS(CLBR_NONE)
	jmp nmi_userspace
nmi_schedule:
	ENABLE_INTERRUPTS(CLBR_ANY)
	call schedule
	DISABLE_INTERRUPTS(CLBR_ANY)
	jmp nmi_userspace
	CFI_ENDPROC
#else
	jmp paranoid_exit
1571
	CFI_ENDPROC
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
#endif
END(nmi)

ENTRY(ignore_sysret)
	CFI_STARTPROC
	mov $-ENOSYS,%eax
	sysret
	CFI_ENDPROC
END(ignore_sysret)

/*
 * End of kprobes section
 */
	.popsection