entry_64.S 37.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  linux/arch/x86_64/entry.S
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
 */

/*
 * entry.S contains the system-call and fault low-level handling routines.
 *
 * NOTE: This code handles signal-recognition, which happens every time
 * after an interrupt and after each system call.
14 15
 *
 * Normal syscalls and interrupts don't save a full stack frame, this is
L
Linus Torvalds 已提交
16
 * only done for syscall tracing, signals or fork/exec et.al.
17 18 19 20
 *
 * A note on terminology:
 * - top of stack: Architecture defined interrupt frame from SS to RIP
 * at the top of the kernel process stack.
L
Linus Torvalds 已提交
21
 * - partial stack frame: partially saved registers upto R11.
22
 * - full stack frame: Like partial stack frame, but all register saved.
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 *
 * Some macro usage:
 * - CFI macros are used to generate dwarf2 unwind information for better
 * backtraces. They don't change any code.
 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
 * There are unfortunately lots of special cases where some registers
 * not touched. The macro is a big mess that should be cleaned up.
 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
 * Gives a full stack frame.
 * - ENTRY/END Define functions in the symbol table.
 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
 * frame that is otherwise undefined after a SYSCALL
 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45
 */

#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/dwarf2.h>
#include <asm/calling.h>
46
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
47 48 49 50
#include <asm/msr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/hw_irq.h>
51
#include <asm/page.h>
52
#include <asm/irqflags.h>
53
#include <asm/paravirt.h>
54
#include <asm/ftrace.h>
L
Linus Torvalds 已提交
55

R
Roland McGrath 已提交
56 57 58 59 60 61
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
#include <linux/elf-em.h>
#define AUDIT_ARCH_X86_64	(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define __AUDIT_ARCH_64BIT 0x80000000
#define __AUDIT_ARCH_LE	   0x40000000

L
Linus Torvalds 已提交
62
	.code64
63
#ifdef CONFIG_FUNCTION_TRACER
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
	retq
END(mcount)

ENTRY(ftrace_caller)

	/* taken from glibc */
	subq $0x38, %rsp
	movq %rax, (%rsp)
	movq %rcx, 8(%rsp)
	movq %rdx, 16(%rsp)
	movq %rsi, 24(%rsp)
	movq %rdi, 32(%rsp)
	movq %r8, 40(%rsp)
	movq %r9, 48(%rsp)

	movq 0x38(%rsp), %rdi
	movq 8(%rbp), %rsi
83
	subq $MCOUNT_INSN_SIZE, %rdi
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

.globl ftrace_call
ftrace_call:
	call ftrace_stub

	movq 48(%rsp), %r9
	movq 40(%rsp), %r8
	movq 32(%rsp), %rdi
	movq 24(%rsp), %rsi
	movq 16(%rsp), %rdx
	movq 8(%rsp), %rcx
	movq (%rsp), %rax
	addq $0x38, %rsp

.globl ftrace_stub
ftrace_stub:
	retq
END(ftrace_caller)

#else /* ! CONFIG_DYNAMIC_FTRACE */
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
ENTRY(mcount)
	cmpq $ftrace_stub, ftrace_trace_function
	jnz trace
.globl ftrace_stub
ftrace_stub:
	retq

trace:
	/* taken from glibc */
	subq $0x38, %rsp
	movq %rax, (%rsp)
	movq %rcx, 8(%rsp)
	movq %rdx, 16(%rsp)
	movq %rsi, 24(%rsp)
	movq %rdi, 32(%rsp)
	movq %r8, 40(%rsp)
	movq %r9, 48(%rsp)

	movq 0x38(%rsp), %rdi
	movq 8(%rbp), %rsi
124
	subq $MCOUNT_INSN_SIZE, %rdi
125 126 127 128 129 130 131 132 133 134 135 136 137 138

	call   *ftrace_trace_function

	movq 48(%rsp), %r9
	movq 40(%rsp), %r8
	movq 32(%rsp), %rdi
	movq 24(%rsp), %rsi
	movq 16(%rsp), %rdx
	movq 8(%rsp), %rcx
	movq (%rsp), %rax
	addq $0x38, %rsp

	jmp ftrace_stub
END(mcount)
139
#endif /* CONFIG_DYNAMIC_FTRACE */
140
#endif /* CONFIG_FUNCTION_TRACER */
141

142
#ifndef CONFIG_PREEMPT
L
Linus Torvalds 已提交
143
#define retint_kernel retint_restore_args
144
#endif
145

146
#ifdef CONFIG_PARAVIRT
147
ENTRY(native_usergs_sysret64)
148 149 150 151
	swapgs
	sysretq
#endif /* CONFIG_PARAVIRT */

152 153 154 155 156 157 158 159 160 161

.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
	bt   $9,EFLAGS-\offset(%rsp)	/* interrupts off? */
	jnc  1f
	TRACE_IRQS_ON
1:
#endif
.endm

L
Linus Torvalds 已提交
162
/*
163 164
 * C code is not supposed to know about undefined top of stack. Every time
 * a C function with an pt_regs argument is called from the SYSCALL based
L
Linus Torvalds 已提交
165 166 167
 * fast path FIXUP_TOP_OF_STACK is needed.
 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
 * manipulation.
168 169 170
 */

	/* %rsp:at FRAMEEND */
171 172 173 174 175 176 177 178
	.macro FIXUP_TOP_OF_STACK tmp offset=0
	movq %gs:pda_oldrsp,\tmp
	movq \tmp,RSP+\offset(%rsp)
	movq $__USER_DS,SS+\offset(%rsp)
	movq $__USER_CS,CS+\offset(%rsp)
	movq $-1,RCX+\offset(%rsp)
	movq R11+\offset(%rsp),\tmp  /* get eflags */
	movq \tmp,EFLAGS+\offset(%rsp)
L
Linus Torvalds 已提交
179 180
	.endm

181 182 183 184 185
	.macro RESTORE_TOP_OF_STACK tmp offset=0
	movq RSP+\offset(%rsp),\tmp
	movq \tmp,%gs:pda_oldrsp
	movq EFLAGS+\offset(%rsp),\tmp
	movq \tmp,R11+\offset(%rsp)
L
Linus Torvalds 已提交
186 187 188 189
	.endm

	.macro FAKE_STACK_FRAME child_rip
	/* push in order ss, rsp, eflags, cs, rip */
190
	xorl %eax, %eax
191
	pushq $__KERNEL_DS /* ss */
L
Linus Torvalds 已提交
192
	CFI_ADJUST_CFA_OFFSET	8
193
	/*CFI_REL_OFFSET	ss,0*/
L
Linus Torvalds 已提交
194 195
	pushq %rax /* rsp */
	CFI_ADJUST_CFA_OFFSET	8
196
	CFI_REL_OFFSET	rsp,0
197
	pushq $X86_EFLAGS_IF /* eflags - interrupts on */
L
Linus Torvalds 已提交
198
	CFI_ADJUST_CFA_OFFSET	8
199
	/*CFI_REL_OFFSET	rflags,0*/
L
Linus Torvalds 已提交
200 201
	pushq $__KERNEL_CS /* cs */
	CFI_ADJUST_CFA_OFFSET	8
202
	/*CFI_REL_OFFSET	cs,0*/
L
Linus Torvalds 已提交
203 204
	pushq \child_rip /* rip */
	CFI_ADJUST_CFA_OFFSET	8
205
	CFI_REL_OFFSET	rip,0
L
Linus Torvalds 已提交
206 207 208 209 210 211 212 213 214
	pushq	%rax /* orig rax */
	CFI_ADJUST_CFA_OFFSET	8
	.endm

	.macro UNFAKE_STACK_FRAME
	addq $8*6, %rsp
	CFI_ADJUST_CFA_OFFSET	-(6*8)
	.endm

215 216 217 218
/*
 * initial frame state for interrupts (and exceptions without error code)
 */
	.macro EMPTY_FRAME start=1 offset=0
219
	.if \start
220
	CFI_STARTPROC simple
221
	CFI_SIGNAL_FRAME
222
	CFI_DEF_CFA rsp,8+\offset
223
	.else
224
	CFI_DEF_CFA_OFFSET 8+\offset
225
	.endif
L
Linus Torvalds 已提交
226
	.endm
227 228

/*
229
 * initial frame state for interrupts (and exceptions without error code)
230
 */
231
	.macro INTR_FRAME start=1 offset=0
232 233 234 235 236 237
	EMPTY_FRAME \start, SS+8+\offset-RIP
	/*CFI_REL_OFFSET ss, SS+\offset-RIP*/
	CFI_REL_OFFSET rsp, RSP+\offset-RIP
	/*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
	/*CFI_REL_OFFSET cs, CS+\offset-RIP*/
	CFI_REL_OFFSET rip, RIP+\offset-RIP
238 239 240 241 242 243
	.endm

/*
 * initial frame state for exceptions with error code (and interrupts
 * with vector already pushed)
 */
244
	.macro XCPT_FRAME start=1 offset=0
245
	INTR_FRAME \start, RIP+\offset-ORIG_RAX
246 247 248 249 250 251 252
	/*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
	.endm

/*
 * frame that enables calling into C.
 */
	.macro PARTIAL_FRAME start=1 offset=0
253 254 255 256 257 258 259 260 261 262
	XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
	CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
	CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
	CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
	CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
	CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
	CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
	CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
	CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
	CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
263 264 265 266 267 268
	.endm

/*
 * frame that enables passing a complete pt_regs to a C function.
 */
	.macro DEFAULT_FRAME start=1 offset=0
269
	PARTIAL_FRAME \start, R11+\offset-R15
270 271 272 273 274 275 276
	CFI_REL_OFFSET rbx, RBX+\offset
	CFI_REL_OFFSET rbp, RBP+\offset
	CFI_REL_OFFSET r12, R12+\offset
	CFI_REL_OFFSET r13, R13+\offset
	CFI_REL_OFFSET r14, R14+\offset
	CFI_REL_OFFSET r15, R15+\offset
	.endm
277 278 279 280 281

/* save partial stack frame */
ENTRY(save_args)
	XCPT_FRAME
	cld
I
Ingo Molnar 已提交
282 283 284 285 286 287 288 289 290 291
	movq_cfi rdi, RDI+16-ARGOFFSET
	movq_cfi rsi, RSI+16-ARGOFFSET
	movq_cfi rdx, RDX+16-ARGOFFSET
	movq_cfi rcx, RCX+16-ARGOFFSET
	movq_cfi rax, RAX+16-ARGOFFSET
	movq_cfi  r8,  R8+16-ARGOFFSET
	movq_cfi  r9,  R9+16-ARGOFFSET
	movq_cfi r10, R10+16-ARGOFFSET
	movq_cfi r11, R11+16-ARGOFFSET

292
	leaq -ARGOFFSET+16(%rsp),%rdi	/* arg1 for handler */
I
Ingo Molnar 已提交
293
	movq_cfi rbp, 8		/* push %rbp */
294 295 296 297 298 299 300 301 302 303 304 305
	leaq 8(%rsp), %rbp		/* mov %rsp, %ebp */
	testl $3, CS(%rdi)
	je 1f
	SWAPGS
	/*
	 * irqcount is used to check if a CPU is already on an interrupt stack
	 * or not. While this is essentially redundant with preempt_count it is
	 * a little cheaper to use a separate counter in the PDA (short of
	 * moving irq_enter into assembly, which would be too much work)
	 */
1:	incl %gs:pda_irqcount
	jne 2f
I
Ingo Molnar 已提交
306
	popq_cfi %rax			/* move return address... */
307
	mov %gs:pda_irqstackptr,%rsp
308
	EMPTY_FRAME 0
I
Ingo Molnar 已提交
309
	pushq_cfi %rax			/* ... to the new stack */
310 311 312 313 314 315 316 317
	/*
	 * We entered an interrupt context - irqs are off:
	 */
2:	TRACE_IRQS_OFF
	ret
	CFI_ENDPROC
END(save_args)

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
ENTRY(save_rest)
	PARTIAL_FRAME 1 REST_SKIP+8
	movq 5*8+16(%rsp), %r11	/* save return address */
	movq_cfi rbx, RBX+16
	movq_cfi rbp, RBP+16
	movq_cfi r12, R12+16
	movq_cfi r13, R13+16
	movq_cfi r14, R14+16
	movq_cfi r15, R15+16
	movq %r11, 8(%rsp)	/* return address */
	FIXUP_TOP_OF_STACK %r11, 16
	ret
	CFI_ENDPROC
END(save_rest)

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
/* save complete stack frame */
ENTRY(save_paranoid)
	XCPT_FRAME 1 RDI+8
	cld
	movq_cfi rdi, RDI+8
	movq_cfi rsi, RSI+8
	movq_cfi rdx, RDX+8
	movq_cfi rcx, RCX+8
	movq_cfi rax, RAX+8
	movq_cfi r8, R8+8
	movq_cfi r9, R9+8
	movq_cfi r10, R10+8
	movq_cfi r11, R11+8
	movq_cfi rbx, RBX+8
	movq_cfi rbp, RBP+8
	movq_cfi r12, R12+8
	movq_cfi r13, R13+8
	movq_cfi r14, R14+8
	movq_cfi r15, R15+8
	movl $1,%ebx
	movl $MSR_GS_BASE,%ecx
	rdmsr
	testl %edx,%edx
	js 1f	/* negative -> in kernel */
	SWAPGS
	xorl %ebx,%ebx
1:	ret
	CFI_ENDPROC
END(save_paranoid)

L
Linus Torvalds 已提交
363 364
/*
 * A newly forked process directly context switches into this.
365 366
 */
/* rdi:	prev */
L
Linus Torvalds 已提交
367
ENTRY(ret_from_fork)
368
	DEFAULT_FRAME
369
	push kernel_eflags(%rip)
370
	CFI_ADJUST_CFA_OFFSET 8
371
	popf				# reset kernel eflags
372
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
373 374
	call schedule_tail
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
375
	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
L
Linus Torvalds 已提交
376
	jnz rff_trace
377
rff_action:
L
Linus Torvalds 已提交
378 379 380
	RESTORE_REST
	testl $3,CS-ARGOFFSET(%rsp)	# from kernel_thread?
	je   int_ret_from_sys_call
G
Glauber Costa 已提交
381
	testl $_TIF_IA32,TI_flags(%rcx)
L
Linus Torvalds 已提交
382
	jnz  int_ret_from_sys_call
383
	RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
L
Linus Torvalds 已提交
384 385 386 387
	jmp ret_from_sys_call
rff_trace:
	movq %rsp,%rdi
	call syscall_trace_leave
388
	GET_THREAD_INFO(%rcx)
L
Linus Torvalds 已提交
389 390
	jmp rff_action
	CFI_ENDPROC
391
END(ret_from_fork)
L
Linus Torvalds 已提交
392 393 394 395 396 397 398

/*
 * System call entry. Upto 6 arguments in registers are supported.
 *
 * SYSCALL does not save anything on the stack and does not change the
 * stack pointer.
 */
399

L
Linus Torvalds 已提交
400
/*
401
 * Register setup:
L
Linus Torvalds 已提交
402 403
 * rax  system call number
 * rdi  arg0
404
 * rcx  return address for syscall/sysret, C arg3
L
Linus Torvalds 已提交
405
 * rsi  arg1
406
 * rdx  arg2
L
Linus Torvalds 已提交
407 408 409 410
 * r10  arg3 	(--> moved to rcx for C)
 * r8   arg4
 * r9   arg5
 * r11  eflags for syscall/sysret, temporary for C
411 412
 * r12-r15,rbp,rbx saved by C code, not touched.
 *
L
Linus Torvalds 已提交
413 414 415 416 417
 * Interrupts are off on entry.
 * Only called from user space.
 *
 * XXX	if we had a free scratch register we could save the RSP into the stack frame
 *      and report it properly in ps. Unfortunately we haven't.
418 419 420 421
 *
 * When user can change the frames always force IRET. That is because
 * it deals with uncanonical addresses better. SYSRET has trouble
 * with them due to bugs in both AMD and Intel CPUs.
422
 */
L
Linus Torvalds 已提交
423 424

ENTRY(system_call)
425
	CFI_STARTPROC	simple
426
	CFI_SIGNAL_FRAME
427
	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
428 429
	CFI_REGISTER	rip,rcx
	/*CFI_REGISTER	rflags,r11*/
430 431 432 433 434 435 436 437
	SWAPGS_UNSAFE_STACK
	/*
	 * A hypervisor implementation might want to use a label
	 * after the swapgs, so that it can do the swapgs
	 * for the guest and jump here on syscall.
	 */
ENTRY(system_call_after_swapgs)

438
	movq	%rsp,%gs:pda_oldrsp
L
Linus Torvalds 已提交
439
	movq	%gs:pda_kernelstack,%rsp
440 441 442 443
	/*
	 * No need to follow this irqs off/on section - it's straight
	 * and short:
	 */
444
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
445
	SAVE_ARGS 8,1
446
	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
447 448
	movq  %rcx,RIP-ARGOFFSET(%rsp)
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
L
Linus Torvalds 已提交
449
	GET_THREAD_INFO(%rcx)
450
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
L
Linus Torvalds 已提交
451
	jnz tracesys
R
Roland McGrath 已提交
452
system_call_fastpath:
L
Linus Torvalds 已提交
453 454 455 456 457 458 459
	cmpq $__NR_syscall_max,%rax
	ja badsys
	movq %r10,%rcx
	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
	movq %rax,RAX-ARGOFFSET(%rsp)
/*
 * Syscall return path ending with SYSRET (fast path)
460 461
 * Has incomplete stack frame and undefined top of stack.
 */
L
Linus Torvalds 已提交
462
ret_from_sys_call:
463
	movl $_TIF_ALLWORK_MASK,%edi
L
Linus Torvalds 已提交
464
	/* edi:	flagmask */
465
sysret_check:
466
	LOCKDEP_SYS_EXIT
L
Linus Torvalds 已提交
467
	GET_THREAD_INFO(%rcx)
468
	DISABLE_INTERRUPTS(CLBR_NONE)
469
	TRACE_IRQS_OFF
G
Glauber Costa 已提交
470
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
471
	andl %edi,%edx
472
	jnz  sysret_careful
473
	CFI_REMEMBER_STATE
474 475 476 477
	/*
	 * sysretq will re-enable interrupts:
	 */
	TRACE_IRQS_ON
L
Linus Torvalds 已提交
478
	movq RIP-ARGOFFSET(%rsp),%rcx
479
	CFI_REGISTER	rip,rcx
L
Linus Torvalds 已提交
480
	RESTORE_ARGS 0,-ARG_SKIP,1
481
	/*CFI_REGISTER	rflags,r11*/
482
	movq	%gs:pda_oldrsp, %rsp
483
	USERGS_SYSRET64
L
Linus Torvalds 已提交
484

485
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
486
	/* Handle reschedules */
487
	/* edx:	work, edi: workmask */
L
Linus Torvalds 已提交
488 489 490
sysret_careful:
	bt $TIF_NEED_RESCHED,%edx
	jnc sysret_signal
491
	TRACE_IRQS_ON
492
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
493
	pushq %rdi
494
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
495 496
	call schedule
	popq  %rdi
497
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
498 499
	jmp sysret_check

500
	/* Handle a signal */
L
Linus Torvalds 已提交
501
sysret_signal:
502
	TRACE_IRQS_ON
503
	ENABLE_INTERRUPTS(CLBR_NONE)
R
Roland McGrath 已提交
504 505 506 507
#ifdef CONFIG_AUDITSYSCALL
	bt $TIF_SYSCALL_AUDIT,%edx
	jc sysret_audit
#endif
508
	/* edx:	work flags (arg3) */
L
Linus Torvalds 已提交
509 510
	leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
	xorl %esi,%esi # oldset -> arg2
511 512 513 514 515
	SAVE_REST
	FIXUP_TOP_OF_STACK %r11
	call do_notify_resume
	RESTORE_TOP_OF_STACK %r11
	RESTORE_REST
516
	movl $_TIF_WORK_MASK,%edi
517 518
	/* Use IRET because user could have changed frame. This
	   works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
519
	DISABLE_INTERRUPTS(CLBR_NONE)
520
	TRACE_IRQS_OFF
521
	jmp int_with_check
522

523 524 525 526
badsys:
	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
	jmp ret_from_sys_call

R
Roland McGrath 已提交
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
#ifdef CONFIG_AUDITSYSCALL
	/*
	 * Fast path for syscall audit without full syscall trace.
	 * We just call audit_syscall_entry() directly, and then
	 * jump back to the normal fast path.
	 */
auditsys:
	movq %r10,%r9			/* 6th arg: 4th syscall arg */
	movq %rdx,%r8			/* 5th arg: 3rd syscall arg */
	movq %rsi,%rcx			/* 4th arg: 2nd syscall arg */
	movq %rdi,%rdx			/* 3rd arg: 1st syscall arg */
	movq %rax,%rsi			/* 2nd arg: syscall number */
	movl $AUDIT_ARCH_X86_64,%edi	/* 1st arg: audit arch */
	call audit_syscall_entry
	LOAD_ARGS 0		/* reload call-clobbered registers */
	jmp system_call_fastpath

	/*
	 * Return fast path for syscall audit.  Call audit_syscall_exit()
	 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
	 * masked off.
	 */
sysret_audit:
	movq %rax,%rsi		/* second arg, syscall return value */
	cmpq $0,%rax		/* is it < 0? */
	setl %al		/* 1 if so, 0 if not */
	movzbl %al,%edi		/* zero-extend that into %edi */
	inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
	call audit_syscall_exit
	movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
	jmp sysret_check
#endif	/* CONFIG_AUDITSYSCALL */

L
Linus Torvalds 已提交
560
	/* Do syscall tracing */
561
tracesys:
R
Roland McGrath 已提交
562 563 564 565
#ifdef CONFIG_AUDITSYSCALL
	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
	jz auditsys
#endif
L
Linus Torvalds 已提交
566
	SAVE_REST
R
Roland McGrath 已提交
567
	movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
L
Linus Torvalds 已提交
568 569 570
	FIXUP_TOP_OF_STACK %rdi
	movq %rsp,%rdi
	call syscall_trace_enter
571 572 573 574 575 576
	/*
	 * Reload arg registers from stack in case ptrace changed them.
	 * We don't reload %rax because syscall_trace_enter() returned
	 * the value it wants us to use in the table lookup.
	 */
	LOAD_ARGS ARGOFFSET, 1
L
Linus Torvalds 已提交
577 578
	RESTORE_REST
	cmpq $__NR_syscall_max,%rax
R
Roland McGrath 已提交
579
	ja   int_ret_from_sys_call	/* RAX(%rsp) set to -ENOSYS above */
L
Linus Torvalds 已提交
580 581
	movq %r10,%rcx	/* fixup for C */
	call *sys_call_table(,%rax,8)
R
Roland McGrath 已提交
582
	movq %rax,RAX-ARGOFFSET(%rsp)
583
	/* Use IRET because user could have changed frame */
584 585

/*
L
Linus Torvalds 已提交
586 587
 * Syscall return path ending with IRET.
 * Has correct top of stack, but partial stack frame.
588 589
 */
	.globl int_ret_from_sys_call
590
	.globl int_with_check
591
int_ret_from_sys_call:
592
	DISABLE_INTERRUPTS(CLBR_NONE)
593
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
594 595 596 597 598
	testl $3,CS-ARGOFFSET(%rsp)
	je retint_restore_args
	movl $_TIF_ALLWORK_MASK,%edi
	/* edi:	mask to check */
int_with_check:
599
	LOCKDEP_SYS_EXIT_IRQ
L
Linus Torvalds 已提交
600
	GET_THREAD_INFO(%rcx)
G
Glauber Costa 已提交
601
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
602 603
	andl %edi,%edx
	jnz   int_careful
G
Glauber Costa 已提交
604
	andl    $~TS_COMPAT,TI_status(%rcx)
L
Linus Torvalds 已提交
605 606 607 608 609 610 611 612
	jmp   retint_swapgs

	/* Either reschedule or signal or syscall exit tracking needed. */
	/* First do a reschedule test. */
	/* edx:	work, edi: workmask */
int_careful:
	bt $TIF_NEED_RESCHED,%edx
	jnc  int_very_careful
613
	TRACE_IRQS_ON
614
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
615
	pushq %rdi
616
	CFI_ADJUST_CFA_OFFSET 8
L
Linus Torvalds 已提交
617 618
	call schedule
	popq %rdi
619
	CFI_ADJUST_CFA_OFFSET -8
620
	DISABLE_INTERRUPTS(CLBR_NONE)
621
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
622 623 624 625
	jmp int_with_check

	/* handle signals and tracing -- both require a full stack frame */
int_very_careful:
626
	TRACE_IRQS_ON
627
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
628
	SAVE_REST
629
	/* Check for syscall exit trace */
630
	testl $_TIF_WORK_SYSCALL_EXIT,%edx
L
Linus Torvalds 已提交
631 632
	jz int_signal
	pushq %rdi
633
	CFI_ADJUST_CFA_OFFSET 8
634
	leaq 8(%rsp),%rdi	# &ptregs -> arg1
L
Linus Torvalds 已提交
635 636
	call syscall_trace_leave
	popq %rdi
637
	CFI_ADJUST_CFA_OFFSET -8
638
	andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
L
Linus Torvalds 已提交
639
	jmp int_restore_rest
640

L
Linus Torvalds 已提交
641
int_signal:
P
Peter Zijlstra 已提交
642
	testl $_TIF_DO_NOTIFY_MASK,%edx
L
Linus Torvalds 已提交
643 644 645 646
	jz 1f
	movq %rsp,%rdi		# &ptregs -> arg1
	xorl %esi,%esi		# oldset -> arg2
	call do_notify_resume
R
Roland McGrath 已提交
647
1:	movl $_TIF_WORK_MASK,%edi
L
Linus Torvalds 已提交
648 649
int_restore_rest:
	RESTORE_REST
650
	DISABLE_INTERRUPTS(CLBR_NONE)
651
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
652 653
	jmp int_with_check
	CFI_ENDPROC
654
END(system_call)
655 656

/*
L
Linus Torvalds 已提交
657
 * Certain special system calls that need to save a complete full stack frame.
658
 */
L
Linus Torvalds 已提交
659
	.macro PTREGSCALL label,func,arg
660 661 662 663 664 665 666 667 668 669
ENTRY(\label)
	PARTIAL_FRAME 1 8		/* offset 8: return address */
	subq $REST_SKIP, %rsp
	CFI_ADJUST_CFA_OFFSET REST_SKIP
	call save_rest
	DEFAULT_FRAME 0 8		/* offset 8: return address */
	leaq 8(%rsp), \arg	/* pt_regs pointer */
	call \func
	jmp ptregscall_common
	CFI_ENDPROC
670
END(\label)
L
Linus Torvalds 已提交
671 672 673 674 675 676 677 678 679
	.endm

	PTREGSCALL stub_clone, sys_clone, %r8
	PTREGSCALL stub_fork, sys_fork, %rdi
	PTREGSCALL stub_vfork, sys_vfork, %rdi
	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
	PTREGSCALL stub_iopl, sys_iopl, %rsi

ENTRY(ptregscall_common)
680 681 682 683 684 685 686 687 688
	DEFAULT_FRAME 1 8	/* offset 8: return address */
	RESTORE_TOP_OF_STACK %r11, 8
	movq_cfi_restore R15+8, r15
	movq_cfi_restore R14+8, r14
	movq_cfi_restore R13+8, r13
	movq_cfi_restore R12+8, r12
	movq_cfi_restore RBP+8, rbp
	movq_cfi_restore RBX+8, rbx
	ret $REST_SKIP		/* pop extended registers */
L
Linus Torvalds 已提交
689
	CFI_ENDPROC
690
END(ptregscall_common)
691

L
Linus Torvalds 已提交
692 693 694
ENTRY(stub_execve)
	CFI_STARTPROC
	popq %r11
695 696
	CFI_ADJUST_CFA_OFFSET -8
	CFI_REGISTER rip, r11
L
Linus Torvalds 已提交
697 698
	SAVE_REST
	FIXUP_TOP_OF_STACK %r11
699
	movq %rsp, %rcx
L
Linus Torvalds 已提交
700 701 702 703 704 705
	call sys_execve
	RESTORE_TOP_OF_STACK %r11
	movq %rax,RAX(%rsp)
	RESTORE_REST
	jmp int_ret_from_sys_call
	CFI_ENDPROC
706
END(stub_execve)
707

L
Linus Torvalds 已提交
708 709 710
/*
 * sigreturn is special because it needs to restore all registers on return.
 * This cannot be done with SYSRET, so use the IRET return path instead.
711
 */
L
Linus Torvalds 已提交
712 713
ENTRY(stub_rt_sigreturn)
	CFI_STARTPROC
714 715
	addq $8, %rsp
	CFI_ADJUST_CFA_OFFSET	-8
L
Linus Torvalds 已提交
716 717 718 719 720 721 722 723
	SAVE_REST
	movq %rsp,%rdi
	FIXUP_TOP_OF_STACK %r11
	call sys_rt_sigreturn
	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
	RESTORE_REST
	jmp int_ret_from_sys_call
	CFI_ENDPROC
724
END(stub_rt_sigreturn)
L
Linus Torvalds 已提交
725

726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
/*
 * Build the entry stubs and pointer table with some assembler magic.
 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
 * single cache line on all modern x86 implementations.
 */
	.section .init.rodata,"a"
ENTRY(interrupt)
	.text
	.p2align 5
	.p2align CONFIG_X86_L1_CACHE_SHIFT
ENTRY(irq_entries_start)
	INTR_FRAME
vector=FIRST_EXTERNAL_VECTOR
.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
	.balign 32
  .rept	7
    .if vector < NR_VECTORS
743
      .if vector <> FIRST_EXTERNAL_VECTOR
744 745 746 747
	CFI_ADJUST_CFA_OFFSET -8
      .endif
1:	pushq $(~vector+0x80)	/* Note: always in signed byte range */
	CFI_ADJUST_CFA_OFFSET 8
748
      .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
	jmp 2f
      .endif
      .previous
	.quad 1b
      .text
vector=vector+1
    .endif
  .endr
2:	jmp common_interrupt
.endr
	CFI_ENDPROC
END(irq_entries_start)

.previous
END(interrupt)
.previous

766
/*
L
Linus Torvalds 已提交
767 768 769
 * Interrupt entry/exit.
 *
 * Interrupt entry points save only callee clobbered registers in fast path.
770 771 772
 *
 * Entry runs with interrupts off.
 */
L
Linus Torvalds 已提交
773

774
/* 0(%rsp): ~(interrupt number) */
L
Linus Torvalds 已提交
775
	.macro interrupt func
776 777 778
	subq $10*8, %rsp
	CFI_ADJUST_CFA_OFFSET 10*8
	call save_args
779
	PARTIAL_FRAME 0
L
Linus Torvalds 已提交
780 781 782
	call \func
	.endm

783 784 785 786
	/*
	 * The interrupt stubs push (~vector+0x80) onto the stack and
	 * then jump to common_interrupt.
	 */
787 788
	.p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt:
789
	XCPT_FRAME
790
	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
L
Linus Torvalds 已提交
791 792
	interrupt do_IRQ
	/* 0(%rsp): oldrsp-ARGOFFSET */
793
ret_from_intr:
794
	DISABLE_INTERRUPTS(CLBR_NONE)
795
	TRACE_IRQS_OFF
796
	decl %gs:pda_irqcount
797
	leaveq
798
	CFI_DEF_CFA_REGISTER	rsp
799
	CFI_ADJUST_CFA_OFFSET	-8
800
exit_intr:
L
Linus Torvalds 已提交
801 802 803
	GET_THREAD_INFO(%rcx)
	testl $3,CS-ARGOFFSET(%rsp)
	je retint_kernel
804

L
Linus Torvalds 已提交
805 806 807 808
	/* Interrupt came from user space */
	/*
	 * Has a correct top of stack, but a partial stack frame
	 * %rcx: thread info. Interrupts off.
809
	 */
L
Linus Torvalds 已提交
810 811
retint_with_reschedule:
	movl $_TIF_WORK_MASK,%edi
812
retint_check:
813
	LOCKDEP_SYS_EXIT_IRQ
G
Glauber Costa 已提交
814
	movl TI_flags(%rcx),%edx
L
Linus Torvalds 已提交
815
	andl %edi,%edx
816
	CFI_REMEMBER_STATE
L
Linus Torvalds 已提交
817
	jnz  retint_careful
818 819

retint_swapgs:		/* return to user-space */
820 821 822
	/*
	 * The iretq could re-enable interrupts:
	 */
823
	DISABLE_INTERRUPTS(CLBR_ANY)
824
	TRACE_IRQS_IRETQ
825
	SWAPGS
826 827
	jmp restore_args

828
retint_restore_args:	/* return to kernel space */
829
	DISABLE_INTERRUPTS(CLBR_ANY)
830 831 832 833 834
	/*
	 * The iretq could re-enable interrupts:
	 */
	TRACE_IRQS_IRETQ
restore_args:
I
Ingo Molnar 已提交
835 836
	RESTORE_ARGS 0,8,0

A
Adrian Bunk 已提交
837
irq_return:
838
	INTERRUPT_RETURN
I
Ingo Molnar 已提交
839 840 841 842 843 844

	.section __ex_table, "a"
	.quad irq_return, bad_iret
	.previous

#ifdef CONFIG_PARAVIRT
845
ENTRY(native_iret)
L
Linus Torvalds 已提交
846 847 848
	iretq

	.section __ex_table,"a"
849
	.quad native_iret, bad_iret
L
Linus Torvalds 已提交
850
	.previous
I
Ingo Molnar 已提交
851 852
#endif

L
Linus Torvalds 已提交
853 854
	.section .fixup,"ax"
bad_iret:
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
	/*
	 * The iret traps when the %cs or %ss being restored is bogus.
	 * We've lost the original trap vector and error code.
	 * #GPF is the most likely one to get for an invalid selector.
	 * So pretend we completed the iret and took the #GPF in user mode.
	 *
	 * We are now running with the kernel GS after exception recovery.
	 * But error_entry expects us to have user GS to match the user %cs,
	 * so swap back.
	 */
	pushq $0

	SWAPGS
	jmp general_protection

870 871
	.previous

872
	/* edi: workmask, edx: work */
L
Linus Torvalds 已提交
873
retint_careful:
874
	CFI_RESTORE_STATE
L
Linus Torvalds 已提交
875 876
	bt    $TIF_NEED_RESCHED,%edx
	jnc   retint_signal
877
	TRACE_IRQS_ON
878
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
879
	pushq %rdi
880
	CFI_ADJUST_CFA_OFFSET	8
L
Linus Torvalds 已提交
881
	call  schedule
882
	popq %rdi
883
	CFI_ADJUST_CFA_OFFSET	-8
L
Linus Torvalds 已提交
884
	GET_THREAD_INFO(%rcx)
885
	DISABLE_INTERRUPTS(CLBR_NONE)
886
	TRACE_IRQS_OFF
L
Linus Torvalds 已提交
887
	jmp retint_check
888

L
Linus Torvalds 已提交
889
retint_signal:
P
Peter Zijlstra 已提交
890
	testl $_TIF_DO_NOTIFY_MASK,%edx
891
	jz    retint_swapgs
892
	TRACE_IRQS_ON
893
	ENABLE_INTERRUPTS(CLBR_NONE)
L
Linus Torvalds 已提交
894
	SAVE_REST
895
	movq $-1,ORIG_RAX(%rsp)
896
	xorl %esi,%esi		# oldset
L
Linus Torvalds 已提交
897 898 899
	movq %rsp,%rdi		# &pt_regs
	call do_notify_resume
	RESTORE_REST
900
	DISABLE_INTERRUPTS(CLBR_NONE)
901
	TRACE_IRQS_OFF
902
	GET_THREAD_INFO(%rcx)
R
Roland McGrath 已提交
903
	jmp retint_with_reschedule
L
Linus Torvalds 已提交
904 905 906 907

#ifdef CONFIG_PREEMPT
	/* Returning to kernel space. Check if we need preemption */
	/* rcx:	 threadinfo. interrupts off. */
908
ENTRY(retint_kernel)
G
Glauber Costa 已提交
909
	cmpl $0,TI_preempt_count(%rcx)
L
Linus Torvalds 已提交
910
	jnz  retint_restore_args
G
Glauber Costa 已提交
911
	bt  $TIF_NEED_RESCHED,TI_flags(%rcx)
L
Linus Torvalds 已提交
912 913 914 915 916
	jnc  retint_restore_args
	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
	jnc  retint_restore_args
	call preempt_schedule_irq
	jmp exit_intr
917
#endif
918

L
Linus Torvalds 已提交
919
	CFI_ENDPROC
920
END(common_interrupt)
921

L
Linus Torvalds 已提交
922 923
/*
 * APIC interrupts.
924
 */
925 926
.macro apicinterrupt num sym do_sym
ENTRY(\sym)
927
	INTR_FRAME
928
	pushq $~(\num)
929
	CFI_ADJUST_CFA_OFFSET 8
930
	interrupt \do_sym
L
Linus Torvalds 已提交
931 932
	jmp ret_from_intr
	CFI_ENDPROC
933 934
END(\sym)
.endm
L
Linus Torvalds 已提交
935

936 937 938 939
#ifdef CONFIG_SMP
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
	irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
#endif
L
Linus Torvalds 已提交
940

941 942 943 944
apicinterrupt 220 \
	uv_bau_message_intr1 uv_bau_message_interrupt
apicinterrupt LOCAL_TIMER_VECTOR \
	apic_timer_interrupt smp_apic_timer_interrupt
945

946
#ifdef CONFIG_SMP
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
	invalidate_interrupt0 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \
	invalidate_interrupt1 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
	invalidate_interrupt2 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
	invalidate_interrupt3 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
	invalidate_interrupt4 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \
	invalidate_interrupt5 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \
	invalidate_interrupt6 smp_invalidate_interrupt
apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
	invalidate_interrupt7 smp_invalidate_interrupt
L
Linus Torvalds 已提交
963 964
#endif

965 966 967 968
apicinterrupt THRESHOLD_APIC_VECTOR \
	threshold_interrupt mce_threshold_interrupt
apicinterrupt THERMAL_APIC_VECTOR \
	thermal_interrupt smp_thermal_interrupt
969

970 971 972 973 974 975 976 977
#ifdef CONFIG_SMP
apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
	call_function_single_interrupt smp_call_function_single_interrupt
apicinterrupt CALL_FUNCTION_VECTOR \
	call_function_interrupt smp_call_function_interrupt
apicinterrupt RESCHEDULE_VECTOR \
	reschedule_interrupt smp_reschedule_interrupt
#endif
L
Linus Torvalds 已提交
978

979 980 981 982
apicinterrupt ERROR_APIC_VECTOR \
	error_interrupt smp_error_interrupt
apicinterrupt SPURIOUS_APIC_VECTOR \
	spurious_interrupt smp_spurious_interrupt
983

L
Linus Torvalds 已提交
984 985
/*
 * Exception entry points.
986
 */
987 988
.macro zeroentry sym do_sym
ENTRY(\sym)
989
	INTR_FRAME
990
	PARAVIRT_ADJUST_EXCEPTION_FRAME
I
Ingo Molnar 已提交
991
	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
992 993 994
	subq $15*8,%rsp
	CFI_ADJUST_CFA_OFFSET 15*8
	call error_entry
995
	DEFAULT_FRAME 0
996 997
	movq %rsp,%rdi		/* pt_regs pointer */
	xorl %esi,%esi		/* no error code */
998
	call \do_sym
999
	jmp error_exit		/* %ebx: no swapgs flag */
1000
	CFI_ENDPROC
1001 1002
END(\sym)
.endm
L
Linus Torvalds 已提交
1003

1004
.macro paranoidzeroentry sym do_sym
1005
ENTRY(\sym)
1006 1007 1008 1009 1010 1011 1012 1013 1014
	INTR_FRAME
	PARAVIRT_ADJUST_EXCEPTION_FRAME
	pushq $-1		/* ORIG_RAX: no syscall to restart */
	CFI_ADJUST_CFA_OFFSET 8
	subq $15*8, %rsp
	call save_paranoid
	TRACE_IRQS_OFF
	movq %rsp,%rdi		/* pt_regs pointer */
	xorl %esi,%esi		/* no error code */
1015
	call \do_sym
1016 1017
	jmp paranoid_exit	/* %ebx: no swapgs flag */
	CFI_ENDPROC
1018
END(\sym)
1019
.endm
1020

1021
.macro paranoidzeroentry_ist sym do_sym ist
1022
ENTRY(\sym)
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
 	INTR_FRAME
	PARAVIRT_ADJUST_EXCEPTION_FRAME
	pushq $-1		/* ORIG_RAX: no syscall to restart */
	CFI_ADJUST_CFA_OFFSET 8
	subq $15*8, %rsp
	call save_paranoid
	TRACE_IRQS_OFF
	movq %rsp,%rdi		/* pt_regs pointer */
	xorl %esi,%esi		/* no error code */
	movq %gs:pda_data_offset, %rbp
	subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1034
	call \do_sym
1035 1036 1037
	addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
	jmp paranoid_exit	/* %ebx: no swapgs flag */
	CFI_ENDPROC
1038
END(\sym)
1039
.endm
1040

1041
.macro errorentry sym do_sym
1042
ENTRY(\sym)
1043
	XCPT_FRAME
1044
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1045 1046 1047
	subq $15*8,%rsp
	CFI_ADJUST_CFA_OFFSET 15*8
	call error_entry
1048
	DEFAULT_FRAME 0
1049 1050 1051
	movq %rsp,%rdi			/* pt_regs pointer */
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1052
	call \do_sym
1053
	jmp error_exit			/* %ebx: no swapgs flag */
1054
	CFI_ENDPROC
1055 1056
END(\sym)
.endm
L
Linus Torvalds 已提交
1057 1058

	/* error code is on the stack already */
1059
.macro paranoiderrorentry sym do_sym
1060
ENTRY(\sym)
1061 1062 1063
	XCPT_FRAME
	PARAVIRT_ADJUST_EXCEPTION_FRAME
	subq $15*8,%rsp
1064 1065 1066
	CFI_ADJUST_CFA_OFFSET 15*8
	call save_paranoid
	DEFAULT_FRAME 0
1067
	TRACE_IRQS_OFF
1068 1069 1070
	movq %rsp,%rdi			/* pt_regs pointer */
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1071
	call \do_sym
1072 1073
	jmp paranoid_exit		/* %ebx: no swapgs flag */
	CFI_ENDPROC
1074 1075 1076 1077 1078 1079 1080 1081
END(\sym)
.endm

zeroentry divide_error do_divide_error
zeroentry overflow do_overflow
zeroentry bounds do_bounds
zeroentry invalid_op do_invalid_op
zeroentry device_not_available do_device_not_available
1082
paranoiderrorentry double_fault do_double_fault
1083 1084 1085 1086 1087 1088 1089
zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
errorentry invalid_TSS do_invalid_TSS
errorentry segment_not_present do_segment_not_present
zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
zeroentry coprocessor_error do_coprocessor_error
errorentry alignment_check do_alignment_check
zeroentry simd_coprocessor_error do_simd_coprocessor_error
1090

L
Linus Torvalds 已提交
1091
       /* Reload gs selector with exception handling */
1092
       /* edi:  new selector */
1093
ENTRY(native_load_gs_index)
1094
	CFI_STARTPROC
L
Linus Torvalds 已提交
1095
	pushf
1096
	CFI_ADJUST_CFA_OFFSET 8
1097 1098
	DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
        SWAPGS
1099 1100
gs_change:
        movl %edi,%gs
L
Linus Torvalds 已提交
1101
2:	mfence		/* workaround */
1102
	SWAPGS
L
Linus Torvalds 已提交
1103
        popf
1104
	CFI_ADJUST_CFA_OFFSET -8
L
Linus Torvalds 已提交
1105
        ret
1106
	CFI_ENDPROC
1107
END(native_load_gs_index)
1108

L
Linus Torvalds 已提交
1109 1110 1111 1112 1113 1114
        .section __ex_table,"a"
        .align 8
        .quad gs_change,bad_gs
        .previous
        .section .fixup,"ax"
	/* running with kernelgs */
1115
bad_gs:
1116
	SWAPGS			/* switch back to user gs */
L
Linus Torvalds 已提交
1117 1118 1119
	xorl %eax,%eax
        movl %eax,%gs
        jmp  2b
1120 1121
        .previous

L
Linus Torvalds 已提交
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
/*
 * Create a kernel thread.
 *
 * C extern interface:
 *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 *
 * asm input arguments:
 *	rdi: fn, rsi: arg, rdx: flags
 */
ENTRY(kernel_thread)
	CFI_STARTPROC
	FAKE_STACK_FRAME $child_rip
	SAVE_ALL

	# rdi: flags, rsi: usp, rdx: will be &pt_regs
	movq %rdx,%rdi
	orq  kernel_thread_flags(%rip),%rdi
	movq $-1, %rsi
	movq %rsp, %rdx

	xorl %r8d,%r8d
	xorl %r9d,%r9d
1144

L
Linus Torvalds 已提交
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
	# clone now
	call do_fork
	movq %rax,RAX(%rsp)
	xorl %edi,%edi

	/*
	 * It isn't worth to check for reschedule here,
	 * so internally to the x86_64 port you can rely on kernel_thread()
	 * not to reschedule the child before returning, this avoids the need
	 * of hacks for example to fork off the per-CPU idle tasks.
1155
         * [Hopefully no generic code relies on the reschedule -AK]
L
Linus Torvalds 已提交
1156 1157 1158 1159 1160
	 */
	RESTORE_ALL
	UNFAKE_STACK_FRAME
	ret
	CFI_ENDPROC
1161
END(kernel_thread)
1162

L
Linus Torvalds 已提交
1163
child_rip:
1164 1165
	pushq $0		# fake return address
	CFI_STARTPROC
L
Linus Torvalds 已提交
1166 1167 1168 1169 1170 1171 1172 1173
	/*
	 * Here we are in the child and the registers are set as they were
	 * at kernel_thread() invocation in the parent.
	 */
	movq %rdi, %rax
	movq %rsi, %rdi
	call *%rax
	# exit
1174
	mov %eax, %edi
L
Linus Torvalds 已提交
1175
	call do_exit
1176
	CFI_ENDPROC
1177
END(child_rip)
L
Linus Torvalds 已提交
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188

/*
 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
 *
 * C extern interface:
 *	 extern long execve(char *name, char **argv, char **envp)
 *
 * asm input arguments:
 *	rdi: name, rsi: argv, rdx: envp
 *
 * We want to fallback into:
1189
 *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
L
Linus Torvalds 已提交
1190 1191
 *
 * do_sys_execve asm fallback arguments:
1192
 *	rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
L
Linus Torvalds 已提交
1193
 */
1194
ENTRY(kernel_execve)
L
Linus Torvalds 已提交
1195 1196
	CFI_STARTPROC
	FAKE_STACK_FRAME $0
1197
	SAVE_ALL
1198
	movq %rsp,%rcx
L
Linus Torvalds 已提交
1199
	call sys_execve
1200
	movq %rax, RAX(%rsp)
L
Linus Torvalds 已提交
1201 1202 1203 1204 1205 1206 1207
	RESTORE_REST
	testq %rax,%rax
	je int_ret_from_sys_call
	RESTORE_ARGS
	UNFAKE_STACK_FRAME
	ret
	CFI_ENDPROC
1208
END(kernel_execve)
L
Linus Torvalds 已提交
1209

1210
/* Call softirq on interrupt stack. Interrupts are off. */
1211
ENTRY(call_softirq)
1212
	CFI_STARTPROC
1213 1214 1215 1216 1217
	push %rbp
	CFI_ADJUST_CFA_OFFSET	8
	CFI_REL_OFFSET rbp,0
	mov  %rsp,%rbp
	CFI_DEF_CFA_REGISTER rbp
1218
	incl %gs:pda_irqcount
1219 1220
	cmove %gs:pda_irqstackptr,%rsp
	push  %rbp			# backlink for old unwinder
1221
	call __do_softirq
1222
	leaveq
1223
	CFI_DEF_CFA_REGISTER	rsp
1224
	CFI_ADJUST_CFA_OFFSET   -8
1225 1226
	decl %gs:pda_irqcount
	ret
1227
	CFI_ENDPROC
1228
END(call_softirq)
1229

1230
#ifdef CONFIG_XEN
1231
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251

/*
# A note on the "critical region" in our callback handler.
# We want to avoid stacking callback handlers due to events occurring
# during handling of the last event. To do this, we keep events disabled
# until we've done all processing. HOWEVER, we must enable events before
# popping the stack frame (can't be done atomically) and so it would still
# be possible to get enough handler activations to overflow the stack.
# Although unlikely, bugs of that kind are hard to track down, so we'd
# like to avoid the possibility.
# So, on entry to the handler we detect whether we interrupted an
# existing activation in its critical region -- if so, we pop the current
# activation and restart the handler using the previous one.
*/
ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
	CFI_STARTPROC
/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
   see the correct pointer to the pt_regs */
	movq %rdi, %rsp            # we don't return, adjust the stack frame
	CFI_ENDPROC
1252
	DEFAULT_FRAME
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
11:	incl %gs:pda_irqcount
	movq %rsp,%rbp
	CFI_DEF_CFA_REGISTER rbp
	cmovzq %gs:pda_irqstackptr,%rsp
	pushq %rbp			# backlink for old unwinder
	call xen_evtchn_do_upcall
	popq %rsp
	CFI_DEF_CFA_REGISTER rsp
	decl %gs:pda_irqcount
	jmp  error_exit
	CFI_ENDPROC
END(do_hypervisor_callback)

/*
# Hypervisor uses this for application faults while it executes.
# We get here for two reasons:
#  1. Fault while reloading DS, ES, FS or GS
#  2. Fault while executing IRET
# Category 1 we do not need to fix up as Xen has already reloaded all segment
# registers that could be reloaded and zeroed the others.
# Category 2 we fix up by killing the current process. We cannot use the
# normal Linux return path in this case because if we use the IRET hypercall
# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
# We distinguish between categories by comparing each saved segment register
# with its current contents: any discrepancy means we in category 1.
*/
ENTRY(xen_failsafe_callback)
1280 1281 1282 1283 1284 1285 1286
	INTR_FRAME 1 (6*8)
	/*CFI_REL_OFFSET gs,GS*/
	/*CFI_REL_OFFSET fs,FS*/
	/*CFI_REL_OFFSET es,ES*/
	/*CFI_REL_OFFSET ds,DS*/
	CFI_REL_OFFSET r11,8
	CFI_REL_OFFSET rcx,0
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
	movw %ds,%cx
	cmpw %cx,0x10(%rsp)
	CFI_REMEMBER_STATE
	jne 1f
	movw %es,%cx
	cmpw %cx,0x18(%rsp)
	jne 1f
	movw %fs,%cx
	cmpw %cx,0x20(%rsp)
	jne 1f
	movw %gs,%cx
	cmpw %cx,0x28(%rsp)
	jne 1f
	/* All segments match their saved values => Category 2 (Bad IRET). */
	movq (%rsp),%rcx
	CFI_RESTORE rcx
	movq 8(%rsp),%r11
	CFI_RESTORE r11
	addq $0x30,%rsp
	CFI_ADJUST_CFA_OFFSET -0x30
I
Ingo Molnar 已提交
1307 1308 1309
	pushq_cfi $0	/* RIP */
	pushq_cfi %r11
	pushq_cfi %rcx
1310
	jmp general_protection
1311 1312 1313 1314 1315 1316 1317 1318
	CFI_RESTORE_STATE
1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
	movq (%rsp),%rcx
	CFI_RESTORE rcx
	movq 8(%rsp),%r11
	CFI_RESTORE r11
	addq $0x30,%rsp
	CFI_ADJUST_CFA_OFFSET -0x30
I
Ingo Molnar 已提交
1319
	pushq_cfi $0
1320 1321 1322 1323 1324 1325
	SAVE_ALL
	jmp error_exit
	CFI_ENDPROC
END(xen_failsafe_callback)

#endif /* CONFIG_XEN */
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538

/*
 * Some functions should be protected against kprobes
 */
	.pushsection .kprobes.text, "ax"

paranoidzeroentry_ist debug do_debug DEBUG_STACK
paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
paranoiderrorentry stack_segment do_stack_segment
errorentry general_protection do_general_protection
errorentry page_fault do_page_fault
#ifdef CONFIG_X86_MCE
paranoidzeroentry machine_check do_machine_check
#endif

	/*
 	 * "Paranoid" exit path from exception stack.
  	 * Paranoid because this is used by NMIs and cannot take
	 * any kernel state for granted.
	 * We don't do kernel preemption checks here, because only
	 * NMI should be common and it does not enable IRQs and
	 * cannot get reschedule ticks.
	 *
	 * "trace" is 0 for the NMI handler only, because irq-tracing
	 * is fundamentally NMI-unsafe. (we cannot change the soft and
	 * hard flags at once, atomically)
	 */

	/* ebx:	no swapgs flag */
ENTRY(paranoid_exit)
	INTR_FRAME
	DISABLE_INTERRUPTS(CLBR_NONE)
	TRACE_IRQS_OFF
	testl %ebx,%ebx				/* swapgs needed? */
	jnz paranoid_restore
	testl $3,CS(%rsp)
	jnz   paranoid_userspace
paranoid_swapgs:
	TRACE_IRQS_IRETQ 0
	SWAPGS_UNSAFE_STACK
paranoid_restore:
	RESTORE_ALL 8
	jmp irq_return
paranoid_userspace:
	GET_THREAD_INFO(%rcx)
	movl TI_flags(%rcx),%ebx
	andl $_TIF_WORK_MASK,%ebx
	jz paranoid_swapgs
	movq %rsp,%rdi			/* &pt_regs */
	call sync_regs
	movq %rax,%rsp			/* switch stack for scheduling */
	testl $_TIF_NEED_RESCHED,%ebx
	jnz paranoid_schedule
	movl %ebx,%edx			/* arg3: thread flags */
	TRACE_IRQS_ON
	ENABLE_INTERRUPTS(CLBR_NONE)
	xorl %esi,%esi 			/* arg2: oldset */
	movq %rsp,%rdi 			/* arg1: &pt_regs */
	call do_notify_resume
	DISABLE_INTERRUPTS(CLBR_NONE)
	TRACE_IRQS_OFF
	jmp paranoid_userspace
paranoid_schedule:
	TRACE_IRQS_ON
	ENABLE_INTERRUPTS(CLBR_ANY)
	call schedule
	DISABLE_INTERRUPTS(CLBR_ANY)
	TRACE_IRQS_OFF
	jmp paranoid_userspace
	CFI_ENDPROC
END(paranoid_exit)

/*
 * Exception entry point. This expects an error code/orig_rax on the stack.
 * returns in "no swapgs flag" in %ebx.
 */
ENTRY(error_entry)
	XCPT_FRAME
	CFI_ADJUST_CFA_OFFSET 15*8
	/* oldrax contains error code */
	cld
	movq_cfi rdi, RDI+8
	movq_cfi rsi, RSI+8
	movq_cfi rdx, RDX+8
	movq_cfi rcx, RCX+8
	movq_cfi rax, RAX+8
	movq_cfi  r8,  R8+8
	movq_cfi  r9,  R9+8
	movq_cfi r10, R10+8
	movq_cfi r11, R11+8
	movq_cfi rbx, RBX+8
	movq_cfi rbp, RBP+8
	movq_cfi r12, R12+8
	movq_cfi r13, R13+8
	movq_cfi r14, R14+8
	movq_cfi r15, R15+8
	xorl %ebx,%ebx
	testl $3,CS+8(%rsp)
	je error_kernelspace
error_swapgs:
	SWAPGS
error_sti:
	TRACE_IRQS_OFF
	ret
	CFI_ENDPROC

/*
 * There are two places in the kernel that can potentially fault with
 * usergs. Handle them here. The exception handlers after iret run with
 * kernel gs again, so don't set the user space flag. B stepping K8s
 * sometimes report an truncated RIP for IRET exceptions returning to
 * compat mode. Check for these here too.
 */
error_kernelspace:
	incl %ebx
	leaq irq_return(%rip),%rcx
	cmpq %rcx,RIP+8(%rsp)
	je error_swapgs
	movl %ecx,%ecx	/* zero extend */
	cmpq %rcx,RIP+8(%rsp)
	je error_swapgs
	cmpq $gs_change,RIP+8(%rsp)
        je error_swapgs
	jmp error_sti
END(error_entry)


/* ebx:	no swapgs flag (1: don't need swapgs, 0: need it) */
ENTRY(error_exit)
	DEFAULT_FRAME
	movl %ebx,%eax
	RESTORE_REST
	DISABLE_INTERRUPTS(CLBR_NONE)
	TRACE_IRQS_OFF
	GET_THREAD_INFO(%rcx)
	testl %eax,%eax
	jne retint_kernel
	LOCKDEP_SYS_EXIT_IRQ
	movl TI_flags(%rcx),%edx
	movl $_TIF_WORK_MASK,%edi
	andl %edi,%edx
	jnz retint_careful
	jmp retint_swapgs
	CFI_ENDPROC
END(error_exit)


	/* runs on exception stack */
ENTRY(nmi)
	INTR_FRAME
	PARAVIRT_ADJUST_EXCEPTION_FRAME
	pushq_cfi $-1
	subq $15*8, %rsp
	CFI_ADJUST_CFA_OFFSET 15*8
	call save_paranoid
	DEFAULT_FRAME 0
	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
	movq %rsp,%rdi
	movq $-1,%rsi
	call do_nmi
#ifdef CONFIG_TRACE_IRQFLAGS
	/* paranoidexit; without TRACE_IRQS_OFF */
	/* ebx:	no swapgs flag */
	DISABLE_INTERRUPTS(CLBR_NONE)
	testl %ebx,%ebx				/* swapgs needed? */
	jnz nmi_restore
	testl $3,CS(%rsp)
	jnz nmi_userspace
nmi_swapgs:
	SWAPGS_UNSAFE_STACK
nmi_restore:
	RESTORE_ALL 8
	jmp irq_return
nmi_userspace:
	GET_THREAD_INFO(%rcx)
	movl TI_flags(%rcx),%ebx
	andl $_TIF_WORK_MASK,%ebx
	jz nmi_swapgs
	movq %rsp,%rdi			/* &pt_regs */
	call sync_regs
	movq %rax,%rsp			/* switch stack for scheduling */
	testl $_TIF_NEED_RESCHED,%ebx
	jnz nmi_schedule
	movl %ebx,%edx			/* arg3: thread flags */
	ENABLE_INTERRUPTS(CLBR_NONE)
	xorl %esi,%esi 			/* arg2: oldset */
	movq %rsp,%rdi 			/* arg1: &pt_regs */
	call do_notify_resume
	DISABLE_INTERRUPTS(CLBR_NONE)
	jmp nmi_userspace
nmi_schedule:
	ENABLE_INTERRUPTS(CLBR_ANY)
	call schedule
	DISABLE_INTERRUPTS(CLBR_ANY)
	jmp nmi_userspace
	CFI_ENDPROC
#else
	jmp paranoid_exit
 	CFI_ENDPROC
#endif
END(nmi)

ENTRY(ignore_sysret)
	CFI_STARTPROC
	mov $-ENOSYS,%eax
	sysret
	CFI_ENDPROC
END(ignore_sysret)

/*
 * End of kprobes section
 */
	.popsection