entry_64.S 37.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  linux/arch/x86_64/entry.S
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
7
 *
L
Linus Torvalds 已提交
8 9
 * entry.S contains the system-call and fault low-level handling routines.
 *
10 11
 * Some of this is documented in Documentation/x86/entry_64.txt
 *
12
 * A note on terminology:
13 14
 * - iret frame:	Architecture defined interrupt frame from SS to RIP
 *			at the top of the kernel process stack.
15 16
 *
 * Some macro usage:
17 18 19
 * - ENTRY/END:		Define functions in the symbol table.
 * - TRACE_IRQ_*:	Trace hardirq state for lock debugging.
 * - idtentry:		Define exception entry points.
L
Linus Torvalds 已提交
20 21 22 23 24
 */
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/errno.h>
25
#include "calling.h"
26
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
27 28 29 30
#include <asm/msr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/hw_irq.h>
31
#include <asm/page_types.h>
32
#include <asm/irqflags.h>
33
#include <asm/paravirt.h>
34
#include <asm/percpu.h>
35
#include <asm/asm.h>
36
#include <asm/context_tracking.h>
37
#include <asm/smap.h>
38
#include <asm/pgtable_types.h>
39
#include <linux/err.h>
L
Linus Torvalds 已提交
40

R
Roland McGrath 已提交
41 42
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
#include <linux/elf-em.h>
43 44 45
#define AUDIT_ARCH_X86_64			(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define __AUDIT_ARCH_64BIT			0x80000000
#define __AUDIT_ARCH_LE				0x40000000
J
Jiri Olsa 已提交
46

47 48
.code64
.section .entry.text, "ax"
49

50
#ifdef CONFIG_PARAVIRT
51
ENTRY(native_usergs_sysret64)
52 53
	swapgs
	sysretq
54
ENDPROC(native_usergs_sysret64)
55 56
#endif /* CONFIG_PARAVIRT */

57
.macro TRACE_IRQS_IRETQ
58
#ifdef CONFIG_TRACE_IRQFLAGS
59 60
	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
	jnc	1f
61 62 63 64 65
	TRACE_IRQS_ON
1:
#endif
.endm

66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * When dynamic function tracer is enabled it will add a breakpoint
 * to all locations that it is about to modify, sync CPUs, update
 * all the code, sync CPUs, then remove the breakpoints. In this time
 * if lockdep is enabled, it might jump back into the debug handler
 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
 *
 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
 * make sure the stack pointer does not get reset back to the top
 * of the debug stack, and instead just reuses the current stack.
 */
#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)

.macro TRACE_IRQS_OFF_DEBUG
80
	call	debug_stack_set_zero
81
	TRACE_IRQS_OFF
82
	call	debug_stack_reset
83 84 85
.endm

.macro TRACE_IRQS_ON_DEBUG
86
	call	debug_stack_set_zero
87
	TRACE_IRQS_ON
88
	call	debug_stack_reset
89 90
.endm

91
.macro TRACE_IRQS_IRETQ_DEBUG
92 93
	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
	jnc	1f
94 95 96 97 98
	TRACE_IRQS_ON_DEBUG
1:
.endm

#else
99 100 101
# define TRACE_IRQS_OFF_DEBUG			TRACE_IRQS_OFF
# define TRACE_IRQS_ON_DEBUG			TRACE_IRQS_ON
# define TRACE_IRQS_IRETQ_DEBUG			TRACE_IRQS_IRETQ
102 103
#endif

L
Linus Torvalds 已提交
104
/*
105
 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
L
Linus Torvalds 已提交
106
 *
107
 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
108 109 110 111 112 113
 * then loads new ss, cs, and rip from previously programmed MSRs.
 * rflags gets masked by a value from another MSR (so CLD and CLAC
 * are not needed). SYSCALL does not save anything on the stack
 * and does not change rsp.
 *
 * Registers on entry:
L
Linus Torvalds 已提交
114
 * rax  system call number
115 116
 * rcx  return address
 * r11  saved rflags (note: r11 is callee-clobbered register in C ABI)
L
Linus Torvalds 已提交
117 118
 * rdi  arg0
 * rsi  arg1
119
 * rdx  arg2
120
 * r10  arg3 (needs to be moved to rcx to conform to C ABI)
L
Linus Torvalds 已提交
121 122
 * r8   arg4
 * r9   arg5
123
 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
124
 *
L
Linus Torvalds 已提交
125 126
 * Only called from user space.
 *
127
 * When user can change pt_regs->foo always force IRET. That is because
128 129
 * it deals with uncanonical addresses better. SYSRET has trouble
 * with them due to bugs in both AMD and Intel CPUs.
130
 */
L
Linus Torvalds 已提交
131

132
ENTRY(entry_SYSCALL_64)
133 134 135 136 137
	/*
	 * Interrupts are off on entry.
	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
	 * it is too small to ever cause noticeable irq latency.
	 */
138 139 140 141 142 143
	SWAPGS_UNSAFE_STACK
	/*
	 * A hypervisor implementation might want to use a label
	 * after the swapgs, so that it can do the swapgs
	 * for the guest and jump here on syscall.
	 */
144
GLOBAL(entry_SYSCALL_64_after_swapgs)
145

146 147
	movq	%rsp, PER_CPU_VAR(rsp_scratch)
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
148 149

	/* Construct struct pt_regs on stack */
150 151
	pushq	$__USER_DS			/* pt_regs->ss */
	pushq	PER_CPU_VAR(rsp_scratch)	/* pt_regs->sp */
152
	/*
153 154 155 156 157
	 * Re-enable interrupts.
	 * We use 'rsp_scratch' as a scratch space, hence irq-off block above
	 * must execute atomically in the face of possible interrupt-driven
	 * task preemption. We must enable interrupts only after we're done
	 * with using rsp_scratch:
158 159
	 */
	ENABLE_INTERRUPTS(CLBR_NONE)
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
	pushq	%r11				/* pt_regs->flags */
	pushq	$__USER_CS			/* pt_regs->cs */
	pushq	%rcx				/* pt_regs->ip */
	pushq	%rax				/* pt_regs->orig_ax */
	pushq	%rdi				/* pt_regs->di */
	pushq	%rsi				/* pt_regs->si */
	pushq	%rdx				/* pt_regs->dx */
	pushq	%rcx				/* pt_regs->cx */
	pushq	$-ENOSYS			/* pt_regs->ax */
	pushq	%r8				/* pt_regs->r8 */
	pushq	%r9				/* pt_regs->r9 */
	pushq	%r10				/* pt_regs->r10 */
	pushq	%r11				/* pt_regs->r11 */
	sub	$(6*8), %rsp			/* pt_regs->bp, bx, r12-15 not saved */

	testl	$_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
	jnz	tracesys
177
entry_SYSCALL_64_fastpath:
178
#if __SYSCALL_MASK == ~0
179
	cmpq	$__NR_syscall_max, %rax
180
#else
181 182
	andl	$__SYSCALL_MASK, %eax
	cmpl	$__NR_syscall_max, %eax
183
#endif
184 185 186 187
	ja	1f				/* return -ENOSYS (already in pt_regs->ax) */
	movq	%r10, %rcx
	call	*sys_call_table(, %rax, 8)
	movq	%rax, RAX(%rsp)
188
1:
L
Linus Torvalds 已提交
189
/*
190 191
 * Syscall return path ending with SYSRET (fast path).
 * Has incompletely filled pt_regs.
192
 */
193
	LOCKDEP_SYS_EXIT
194 195 196 197
	/*
	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
	 * it is too small to ever cause noticeable irq latency.
	 */
198
	DISABLE_INTERRUPTS(CLBR_NONE)
199 200 201 202 203 204 205 206 207

	/*
	 * We must check ti flags with interrupts (or at least preemption)
	 * off because we must *never* return to userspace without
	 * processing exit work that is enqueued if we're preempted here.
	 * In particular, returning to userspace with any of the one-shot
	 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
	 * very bad.
	 */
208 209
	testl	$_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
	jnz	int_ret_from_sys_call_irqs_off	/* Go to the slow path */
210

211
	RESTORE_C_REGS_EXCEPT_RCX_R11
212 213 214
	movq	RIP(%rsp), %rcx
	movq	EFLAGS(%rsp), %r11
	movq	RSP(%rsp), %rsp
215
	/*
216
	 * 64-bit SYSRET restores rip from rcx,
217 218
	 * rflags from r11 (but RF and VM bits are forced to 0),
	 * cs and ss are loaded from MSRs.
219
	 * Restoration of rflags re-enables interrupts.
220 221 222 223 224 225 226 227 228
	 *
	 * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
	 * descriptor is not reinitialized.  This means that we should
	 * avoid SYSRET with SS == NULL, which could happen if we schedule,
	 * exit the kernel, and re-enter using an interrupt vector.  (All
	 * interrupt entries on x86_64 set SS to NULL.)  We prevent that
	 * from happening by reloading SS in __switch_to.  (Actually
	 * detecting the failure in 64-bit userspace is tricky but can be
	 * done.)
229
	 */
230
	USERGS_SYSRET64
L
Linus Torvalds 已提交
231

232 233 234 235 236
GLOBAL(int_ret_from_sys_call_irqs_off)
	TRACE_IRQS_ON
	ENABLE_INTERRUPTS(CLBR_NONE)
	jmp int_ret_from_sys_call

237
	/* Do syscall entry tracing */
238
tracesys:
239 240 241 242 243 244 245 246
	movq	%rsp, %rdi
	movl	$AUDIT_ARCH_X86_64, %esi
	call	syscall_trace_enter_phase1
	test	%rax, %rax
	jnz	tracesys_phase2			/* if needed, run the slow path */
	RESTORE_C_REGS_EXCEPT_RAX		/* else restore clobbered regs */
	movq	ORIG_RAX(%rsp), %rax
	jmp	entry_SYSCALL_64_fastpath	/* and return to the fast path */
247 248

tracesys_phase2:
249
	SAVE_EXTRA_REGS
250 251 252 253
	movq	%rsp, %rdi
	movl	$AUDIT_ARCH_X86_64, %esi
	movq	%rax, %rdx
	call	syscall_trace_enter_phase2
254

255
	/*
D
Denys Vlasenko 已提交
256
	 * Reload registers from stack in case ptrace changed them.
257
	 * We don't reload %rax because syscall_trace_entry_phase2() returned
258 259
	 * the value it wants us to use in the table lookup.
	 */
260 261
	RESTORE_C_REGS_EXCEPT_RAX
	RESTORE_EXTRA_REGS
262
#if __SYSCALL_MASK == ~0
263
	cmpq	$__NR_syscall_max, %rax
264
#else
265 266
	andl	$__SYSCALL_MASK, %eax
	cmpl	$__NR_syscall_max, %eax
267
#endif
268 269 270 271
	ja	1f				/* return -ENOSYS (already in pt_regs->ax) */
	movq	%r10, %rcx			/* fixup for C */
	call	*sys_call_table(, %rax, 8)
	movq	%rax, RAX(%rsp)
272
1:
273
	/* Use IRET because user could have changed pt_regs->foo */
274 275

/*
L
Linus Torvalds 已提交
276
 * Syscall return path ending with IRET.
277
 * Has correct iret frame.
278
 */
279
GLOBAL(int_ret_from_sys_call)
280
	SAVE_EXTRA_REGS
281 282
	movq	%rsp, %rdi
	call	syscall_return_slowpath	/* returns with IRQs disabled */
283
	RESTORE_EXTRA_REGS
284
	TRACE_IRQS_IRETQ		/* we're about to change IF */
285 286 287 288 289

	/*
	 * Try to use SYSRET instead of IRET if we're returning to
	 * a completely clean 64-bit userspace context.
	 */
290 291 292 293
	movq	RCX(%rsp), %rcx
	movq	RIP(%rsp), %r11
	cmpq	%rcx, %r11			/* RCX == RIP */
	jne	opportunistic_sysret_failed
294 295 296 297

	/*
	 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
	 * in kernel space.  This essentially lets the user take over
298
	 * the kernel, since userspace controls RSP.
299
	 *
300
	 * If width of "canonical tail" ever becomes variable, this will need
301 302 303 304 305
	 * to be updated to remain correct on both old and new CPUs.
	 */
	.ifne __VIRTUAL_MASK_SHIFT - 47
	.error "virtual address width changed -- SYSRET checks need update"
	.endif
306

307 308 309
	/* Change top 16 bits to be the sign-extension of 47th bit */
	shl	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
	sar	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
310

311 312 313
	/* If this changed %rcx, it was not canonical */
	cmpq	%rcx, %r11
	jne	opportunistic_sysret_failed
314

315 316
	cmpq	$__USER_CS, CS(%rsp)		/* CS must match SYSRET */
	jne	opportunistic_sysret_failed
317

318 319 320
	movq	R11(%rsp), %r11
	cmpq	%r11, EFLAGS(%rsp)		/* R11 == RFLAGS */
	jne	opportunistic_sysret_failed
321 322 323 324 325 326 327 328

	/*
	 * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
	 * restoring TF results in a trap from userspace immediately after
	 * SYSRET.  This would cause an infinite loop whenever #DB happens
	 * with register state that satisfies the opportunistic SYSRET
	 * conditions.  For example, single-stepping this user code:
	 *
329
	 *           movq	$stuck_here, %rcx
330 331 332 333 334 335
	 *           pushfq
	 *           popq %r11
	 *   stuck_here:
	 *
	 * would never get past 'stuck_here'.
	 */
336 337
	testq	$(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
	jnz	opportunistic_sysret_failed
338 339 340

	/* nothing to check for RSP */

341 342
	cmpq	$__USER_DS, SS(%rsp)		/* SS must match SYSRET */
	jne	opportunistic_sysret_failed
343 344

	/*
345 346
	 * We win! This label is here just for ease of understanding
	 * perf profiles. Nothing jumps here.
347 348
	 */
syscall_return_via_sysret:
349 350
	/* rcx and r11 are already restored (see code above) */
	RESTORE_C_REGS_EXCEPT_RCX_R11
351
	movq	RSP(%rsp), %rsp
352 353 354 355 356
	USERGS_SYSRET64

opportunistic_sysret_failed:
	SWAPGS
	jmp	restore_c_regs_and_iret
357
END(entry_SYSCALL_64)
358

359

360 361
	.macro FORK_LIKE func
ENTRY(stub_\func)
362
	SAVE_EXTRA_REGS 8
363
	jmp	sys_\func
364 365 366 367 368 369
END(stub_\func)
	.endm

	FORK_LIKE  clone
	FORK_LIKE  fork
	FORK_LIKE  vfork
L
Linus Torvalds 已提交
370 371

ENTRY(stub_execve)
372 373 374 375 376 377 378 379 380 381
	call	sys_execve
return_from_execve:
	testl	%eax, %eax
	jz	1f
	/* exec failed, can use fast SYSRET code path in this case */
	ret
1:
	/* must use IRET code path (pt_regs->cs may have changed) */
	addq	$8, %rsp
	ZERO_EXTRA_REGS
382
	movq	%rax, RAX(%rsp)
383
	jmp	int_ret_from_sys_call
384
END(stub_execve)
385 386 387 388 389 390
/*
 * Remaining execve stubs are only 7 bytes long.
 * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
 */
	.align	8
GLOBAL(stub_execveat)
391 392
	call	sys_execveat
	jmp	return_from_execve
D
David Drysdale 已提交
393 394
END(stub_execveat)

395
#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
396 397
	.align	8
GLOBAL(stub_x32_execve)
398
GLOBAL(stub32_execve)
399 400
	call	compat_sys_execve
	jmp	return_from_execve
401
END(stub32_execve)
402
END(stub_x32_execve)
403 404 405
	.align	8
GLOBAL(stub_x32_execveat)
GLOBAL(stub32_execveat)
406 407 408
	call	compat_sys_execveat
	jmp	return_from_execve
END(stub32_execveat)
409
END(stub_x32_execveat)
410 411
#endif

L
Linus Torvalds 已提交
412 413 414
/*
 * sigreturn is special because it needs to restore all registers on return.
 * This cannot be done with SYSRET, so use the IRET return path instead.
415
 */
L
Linus Torvalds 已提交
416
ENTRY(stub_rt_sigreturn)
417 418 419 420 421 422 423 424
	/*
	 * SAVE_EXTRA_REGS result is not normally needed:
	 * sigreturn overwrites all pt_regs->GPREGS.
	 * But sigreturn can fail (!), and there is no easy way to detect that.
	 * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error,
	 * we SAVE_EXTRA_REGS here.
	 */
	SAVE_EXTRA_REGS 8
425
	call	sys_rt_sigreturn
426 427
return_from_stub:
	addq	$8, %rsp
428
	RESTORE_EXTRA_REGS
429 430
	movq	%rax, RAX(%rsp)
	jmp	int_ret_from_sys_call
431
END(stub_rt_sigreturn)
L
Linus Torvalds 已提交
432

433 434
#ifdef CONFIG_X86_X32_ABI
ENTRY(stub_x32_rt_sigreturn)
435
	SAVE_EXTRA_REGS 8
436 437
	call	sys32_x32_rt_sigreturn
	jmp	return_from_stub
438 439 440
END(stub_x32_rt_sigreturn)
#endif

441 442 443 444 445 446 447
/*
 * A newly forked process directly context switches into this address.
 *
 * rdi: prev task we switched from
 */
ENTRY(ret_from_fork)

448
	LOCK ; btr $TIF_FORK, TI_flags(%r8)
449

450 451
	pushq	$0x0002
	popfq					/* reset kernel eflags */
452

453
	call	schedule_tail			/* rdi: 'prev' task parameter */
454 455 456

	RESTORE_EXTRA_REGS

457
	testb	$3, CS(%rsp)			/* from kernel_thread? */
458

459 460 461
	/*
	 * By the time we get here, we have no idea whether our pt_regs,
	 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
462
	 * the slow path, or one of the 32-bit compat paths.
463
	 * Use IRET code path to return, since it can safely handle
464 465
	 * all of the above.
	 */
466
	jnz	int_ret_from_sys_call
467

468 469 470 471 472 473 474
	/*
	 * We came from kernel_thread
	 * nb: we depend on RESTORE_EXTRA_REGS above
	 */
	movq	%rbp, %rdi
	call	*%rbx
	movl	$0, RAX(%rsp)
475
	RESTORE_EXTRA_REGS
476
	jmp	int_ret_from_sys_call
477 478
END(ret_from_fork)

479
/*
480 481
 * Build the entry stubs with some assembler magic.
 * We pack 1 stub into every 8-byte block.
482
 */
483
	.align 8
484
ENTRY(irq_entries_start)
485 486
    vector=FIRST_EXTERNAL_VECTOR
    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
487
	pushq	$(~vector+0x80)			/* Note: always in signed byte range */
488 489 490 491
    vector=vector+1
	jmp	common_interrupt
	.align	8
    .endr
492 493
END(irq_entries_start)

494
/*
L
Linus Torvalds 已提交
495 496 497
 * Interrupt entry/exit.
 *
 * Interrupt entry points save only callee clobbered registers in fast path.
498 499 500
 *
 * Entry runs with interrupts off.
 */
L
Linus Torvalds 已提交
501

502
/* 0(%rsp): ~(interrupt number) */
L
Linus Torvalds 已提交
503
	.macro interrupt func
504
	cld
505 506 507
	ALLOC_PT_GPREGS_ON_STACK
	SAVE_C_REGS
	SAVE_EXTRA_REGS
508

509
	movq	%rsp,%rdi	/* arg1 for \func (pointer to pt_regs) */
510

511
	testb	$3, CS(%rsp)
512
	jz	1f
513
	SWAPGS
514
1:
515
	/*
D
Denys Vlasenko 已提交
516
	 * Save previous stack pointer, optionally switch to interrupt stack.
517 518 519 520 521
	 * irq_count is used to check if a CPU is already on an interrupt stack
	 * or not. While this is essentially redundant with preempt_count it is
	 * a little cheaper to use a separate counter in the PDA (short of
	 * moving irq_enter into assembly, which would be too much work)
	 */
522 523 524 525
	movq	%rsp, %rsi
	incl	PER_CPU_VAR(irq_count)
	cmovzq	PER_CPU_VAR(irq_stack_ptr), %rsp
	pushq	%rsi
526 527 528
	/* We entered an interrupt context - irqs are off: */
	TRACE_IRQS_OFF

529
	call	\func
L
Linus Torvalds 已提交
530 531
	.endm

532 533 534 535
	/*
	 * The interrupt stubs push (~vector+0x80) onto the stack and
	 * then jump to common_interrupt.
	 */
536 537
	.p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt:
538
	ASM_CLAC
539
	addq	$-0x80, (%rsp)			/* Adjust vector to [-256, -1] range */
L
Linus Torvalds 已提交
540
	interrupt do_IRQ
541
	/* 0(%rsp): old RSP */
542
ret_from_intr:
543
	DISABLE_INTERRUPTS(CLBR_NONE)
544
	TRACE_IRQS_OFF
545
	decl	PER_CPU_VAR(irq_count)
546

547
	/* Restore saved previous stack */
548
	popq	%rsp
549

550
	testb	$3, CS(%rsp)
551
	jz	retint_kernel
L
Linus Torvalds 已提交
552
	/* Interrupt came from user space */
553
GLOBAL(retint_user)
554
	GET_THREAD_INFO(%rcx)
555 556

	/* %rcx: thread info. Interrupts are off. */
L
Linus Torvalds 已提交
557
retint_with_reschedule:
558
	movl	$_TIF_WORK_MASK, %edi
559
retint_check:
560
	LOCKDEP_SYS_EXIT_IRQ
561 562 563
	movl	TI_flags(%rcx), %edx
	andl	%edi, %edx
	jnz	retint_careful
564

565
retint_swapgs:					/* return to user-space */
566 567 568
	/*
	 * The iretq could re-enable interrupts:
	 */
569
	DISABLE_INTERRUPTS(CLBR_ANY)
570
	TRACE_IRQS_IRETQ
571

572
	SWAPGS
573
	jmp	restore_regs_and_iret
574

575
/* Returning to kernel space */
576
retint_kernel:
577 578 579
#ifdef CONFIG_PREEMPT
	/* Interrupts are off */
	/* Check if we need preemption */
580
	bt	$9, EFLAGS(%rsp)		/* were interrupts off? */
581
	jnc	1f
582
0:	cmpl	$0, PER_CPU_VAR(__preempt_count)
583
	jnz	1f
584
	call	preempt_schedule_irq
585
	jmp	0b
586
1:
587
#endif
588 589 590 591
	/*
	 * The iretq could re-enable interrupts:
	 */
	TRACE_IRQS_IRETQ
592 593 594 595 596

/*
 * At this label, code paths which return to kernel and to user,
 * which come from interrupts/exception and from syscalls, merge.
 */
597 598
restore_regs_and_iret:
	RESTORE_EXTRA_REGS
599
restore_c_regs_and_iret:
600 601
	RESTORE_C_REGS
	REMOVE_PT_GPREGS_FROM_STACK 8
602 603 604
	INTERRUPT_RETURN

ENTRY(native_iret)
605 606 607 608
	/*
	 * Are we returning to a stack segment from the LDT?  Note: in
	 * 64-bit mode SS:RSP on the exception stack is always valid.
	 */
609
#ifdef CONFIG_X86_ESPFIX64
610 611
	testb	$4, (SS-RIP)(%rsp)
	jnz	native_irq_return_ldt
612
#endif
613

614
.global native_irq_return_iret
615
native_irq_return_iret:
A
Andy Lutomirski 已提交
616 617 618 619 620 621
	/*
	 * This may fault.  Non-paranoid faults on return to userspace are
	 * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
	 * Double-faults due to espfix64 are handled in do_double_fault.
	 * Other faults here are fatal.
	 */
L
Linus Torvalds 已提交
622
	iretq
I
Ingo Molnar 已提交
623

624
#ifdef CONFIG_X86_ESPFIX64
625
native_irq_return_ldt:
626 627
	pushq	%rax
	pushq	%rdi
628
	SWAPGS
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
	movq	PER_CPU_VAR(espfix_waddr), %rdi
	movq	%rax, (0*8)(%rdi)		/* RAX */
	movq	(2*8)(%rsp), %rax		/* RIP */
	movq	%rax, (1*8)(%rdi)
	movq	(3*8)(%rsp), %rax		/* CS */
	movq	%rax, (2*8)(%rdi)
	movq	(4*8)(%rsp), %rax		/* RFLAGS */
	movq	%rax, (3*8)(%rdi)
	movq	(6*8)(%rsp), %rax		/* SS */
	movq	%rax, (5*8)(%rdi)
	movq	(5*8)(%rsp), %rax		/* RSP */
	movq	%rax, (4*8)(%rdi)
	andl	$0xffff0000, %eax
	popq	%rdi
	orq	PER_CPU_VAR(espfix_stack), %rax
644
	SWAPGS
645 646 647
	movq	%rax, %rsp
	popq	%rax
	jmp	native_irq_return_iret
648
#endif
649

650
	/* edi: workmask, edx: work */
L
Linus Torvalds 已提交
651
retint_careful:
652 653
	bt	$TIF_NEED_RESCHED, %edx
	jnc	retint_signal
654
	TRACE_IRQS_ON
655
	ENABLE_INTERRUPTS(CLBR_NONE)
656
	pushq	%rdi
657
	SCHEDULE_USER
658
	popq	%rdi
L
Linus Torvalds 已提交
659
	GET_THREAD_INFO(%rcx)
660
	DISABLE_INTERRUPTS(CLBR_NONE)
661
	TRACE_IRQS_OFF
662
	jmp	retint_check
663

L
Linus Torvalds 已提交
664
retint_signal:
665 666
	testl	$_TIF_DO_NOTIFY_MASK, %edx
	jz	retint_swapgs
667
	TRACE_IRQS_ON
668
	ENABLE_INTERRUPTS(CLBR_NONE)
669 670 671 672
	movq	$-1, ORIG_RAX(%rsp)
	xorl	%esi, %esi			/* oldset */
	movq	%rsp, %rdi			/* &pt_regs */
	call	do_notify_resume
673
	DISABLE_INTERRUPTS(CLBR_NONE)
674
	TRACE_IRQS_OFF
675
	GET_THREAD_INFO(%rcx)
676
	jmp	retint_with_reschedule
L
Linus Torvalds 已提交
677

678
END(common_interrupt)
679

L
Linus Torvalds 已提交
680 681
/*
 * APIC interrupts.
682
 */
683
.macro apicinterrupt3 num sym do_sym
684
ENTRY(\sym)
685
	ASM_CLAC
686
	pushq	$~(\num)
687
.Lcommon_\sym:
688
	interrupt \do_sym
689
	jmp	ret_from_intr
690 691
END(\sym)
.endm
L
Linus Torvalds 已提交
692

693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
#ifdef CONFIG_TRACING
#define trace(sym) trace_##sym
#define smp_trace(sym) smp_trace_##sym

.macro trace_apicinterrupt num sym
apicinterrupt3 \num trace(\sym) smp_trace(\sym)
.endm
#else
.macro trace_apicinterrupt num sym do_sym
.endm
#endif

.macro apicinterrupt num sym do_sym
apicinterrupt3 \num \sym \do_sym
trace_apicinterrupt \num \sym
.endm

710
#ifdef CONFIG_SMP
711 712
apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR		irq_move_cleanup_interrupt	smp_irq_move_cleanup_interrupt
apicinterrupt3 REBOOT_VECTOR			reboot_interrupt		smp_reboot_interrupt
713
#endif
L
Linus Torvalds 已提交
714

N
Nick Piggin 已提交
715
#ifdef CONFIG_X86_UV
716
apicinterrupt3 UV_BAU_MESSAGE			uv_bau_message_intr1		uv_bau_message_interrupt
N
Nick Piggin 已提交
717
#endif
718 719 720

apicinterrupt LOCAL_TIMER_VECTOR		apic_timer_interrupt		smp_apic_timer_interrupt
apicinterrupt X86_PLATFORM_IPI_VECTOR		x86_platform_ipi		smp_x86_platform_ipi
721

722
#ifdef CONFIG_HAVE_KVM
723 724
apicinterrupt3 POSTED_INTR_VECTOR		kvm_posted_intr_ipi		smp_kvm_posted_intr_ipi
apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR	kvm_posted_intr_wakeup_ipi	smp_kvm_posted_intr_wakeup_ipi
725 726
#endif

727
#ifdef CONFIG_X86_MCE_THRESHOLD
728
apicinterrupt THRESHOLD_APIC_VECTOR		threshold_interrupt		smp_threshold_interrupt
729 730
#endif

731
#ifdef CONFIG_X86_MCE_AMD
732
apicinterrupt DEFERRED_ERROR_VECTOR		deferred_error_interrupt	smp_deferred_error_interrupt
733 734
#endif

735
#ifdef CONFIG_X86_THERMAL_VECTOR
736
apicinterrupt THERMAL_APIC_VECTOR		thermal_interrupt		smp_thermal_interrupt
737
#endif
738

739
#ifdef CONFIG_SMP
740 741 742
apicinterrupt CALL_FUNCTION_SINGLE_VECTOR	call_function_single_interrupt	smp_call_function_single_interrupt
apicinterrupt CALL_FUNCTION_VECTOR		call_function_interrupt		smp_call_function_interrupt
apicinterrupt RESCHEDULE_VECTOR			reschedule_interrupt		smp_reschedule_interrupt
743
#endif
L
Linus Torvalds 已提交
744

745 746
apicinterrupt ERROR_APIC_VECTOR			error_interrupt			smp_error_interrupt
apicinterrupt SPURIOUS_APIC_VECTOR		spurious_interrupt		smp_spurious_interrupt
747

748
#ifdef CONFIG_IRQ_WORK
749
apicinterrupt IRQ_WORK_VECTOR			irq_work_interrupt		smp_irq_work_interrupt
I
Ingo Molnar 已提交
750 751
#endif

L
Linus Torvalds 已提交
752 753
/*
 * Exception entry points.
754
 */
755
#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
756 757

.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
758
ENTRY(\sym)
759 760 761 762 763
	/* Sanity check */
	.if \shift_ist != -1 && \paranoid == 0
	.error "using shift_ist requires paranoid=1"
	.endif

764
	ASM_CLAC
765
	PARAVIRT_ADJUST_EXCEPTION_FRAME
766 767

	.ifeq \has_error_code
768
	pushq	$-1				/* ORIG_RAX: no syscall to restart */
769 770
	.endif

771
	ALLOC_PT_GPREGS_ON_STACK
772 773

	.if \paranoid
774
	.if \paranoid == 1
775 776
	testb	$3, CS(%rsp)			/* If coming from userspace, switch stacks */
	jnz	1f
777
	.endif
778
	call	paranoid_entry
779
	.else
780
	call	error_entry
781
	.endif
782
	/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
783 784

	.if \paranoid
785
	.if \shift_ist != -1
786
	TRACE_IRQS_OFF_DEBUG			/* reload IDT in case of recursion */
787
	.else
788
	TRACE_IRQS_OFF
789
	.endif
790
	.endif
791

792
	movq	%rsp, %rdi			/* pt_regs pointer */
793 794

	.if \has_error_code
795 796
	movq	ORIG_RAX(%rsp), %rsi		/* get error code */
	movq	$-1, ORIG_RAX(%rsp)		/* no syscall to restart */
797
	.else
798
	xorl	%esi, %esi			/* no error code */
799 800
	.endif

801
	.if \shift_ist != -1
802
	subq	$EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
803 804
	.endif

805
	call	\do_sym
806

807
	.if \shift_ist != -1
808
	addq	$EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
809 810
	.endif

811
	/* these procedures expect "no swapgs" flag in ebx */
812
	.if \paranoid
813
	jmp	paranoid_exit
814
	.else
815
	jmp	error_exit
816 817
	.endif

818 819 820 821 822 823 824
	.if \paranoid == 1
	/*
	 * Paranoid entry from userspace.  Switch stacks and treat it
	 * as a normal entry.  This means that paranoid handlers
	 * run in real process context if user_mode(regs).
	 */
1:
825
	call	error_entry
826 827


828 829 830
	movq	%rsp, %rdi			/* pt_regs pointer */
	call	sync_regs
	movq	%rax, %rsp			/* switch stack */
831

832
	movq	%rsp, %rdi			/* pt_regs pointer */
833 834

	.if \has_error_code
835 836
	movq	ORIG_RAX(%rsp), %rsi		/* get error code */
	movq	$-1, ORIG_RAX(%rsp)		/* no syscall to restart */
837
	.else
838
	xorl	%esi, %esi			/* no error code */
839 840
	.endif

841
	call	\do_sym
842

843
	jmp	error_exit			/* %ebx: no swapgs flag */
844
	.endif
845
END(\sym)
846
.endm
847

848
#ifdef CONFIG_TRACING
849 850 851
.macro trace_idtentry sym do_sym has_error_code:req
idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
idtentry \sym \do_sym has_error_code=\has_error_code
852 853
.endm
#else
854 855
.macro trace_idtentry sym do_sym has_error_code:req
idtentry \sym \do_sym has_error_code=\has_error_code
856 857 858
.endm
#endif

859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
idtentry divide_error			do_divide_error			has_error_code=0
idtentry overflow			do_overflow			has_error_code=0
idtentry bounds				do_bounds			has_error_code=0
idtentry invalid_op			do_invalid_op			has_error_code=0
idtentry device_not_available		do_device_not_available		has_error_code=0
idtentry double_fault			do_double_fault			has_error_code=1 paranoid=2
idtentry coprocessor_segment_overrun	do_coprocessor_segment_overrun	has_error_code=0
idtentry invalid_TSS			do_invalid_TSS			has_error_code=1
idtentry segment_not_present		do_segment_not_present		has_error_code=1
idtentry spurious_interrupt_bug		do_spurious_interrupt_bug	has_error_code=0
idtentry coprocessor_error		do_coprocessor_error		has_error_code=0
idtentry alignment_check		do_alignment_check		has_error_code=1
idtentry simd_coprocessor_error		do_simd_coprocessor_error	has_error_code=0


	/*
	 * Reload gs selector with exception handling
	 * edi:  new selector
	 */
878
ENTRY(native_load_gs_index)
879
	pushfq
880
	DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
881
	SWAPGS
882
gs_change:
883 884
	movl	%edi, %gs
2:	mfence					/* workaround */
885
	SWAPGS
886
	popfq
887
	ret
888
END(native_load_gs_index)
889

890 891
	_ASM_EXTABLE(gs_change, bad_gs)
	.section .fixup, "ax"
L
Linus Torvalds 已提交
892
	/* running with kernelgs */
893
bad_gs:
894 895 896 897
	SWAPGS					/* switch back to user gs */
	xorl	%eax, %eax
	movl	%eax, %gs
	jmp	2b
898
	.previous
899

900
/* Call softirq on interrupt stack. Interrupts are off. */
901
ENTRY(do_softirq_own_stack)
902 903 904 905 906 907
	pushq	%rbp
	mov	%rsp, %rbp
	incl	PER_CPU_VAR(irq_count)
	cmove	PER_CPU_VAR(irq_stack_ptr), %rsp
	push	%rbp				/* frame pointer backlink */
	call	__do_softirq
908
	leaveq
909
	decl	PER_CPU_VAR(irq_count)
910
	ret
911
END(do_softirq_own_stack)
912

913
#ifdef CONFIG_XEN
914
idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
915 916

/*
917 918 919 920 921 922 923 924 925 926 927 928
 * A note on the "critical region" in our callback handler.
 * We want to avoid stacking callback handlers due to events occurring
 * during handling of the last event. To do this, we keep events disabled
 * until we've done all processing. HOWEVER, we must enable events before
 * popping the stack frame (can't be done atomically) and so it would still
 * be possible to get enough handler activations to overflow the stack.
 * Although unlikely, bugs of that kind are hard to track down, so we'd
 * like to avoid the possibility.
 * So, on entry to the handler we detect whether we interrupted an
 * existing activation in its critical region -- if so, we pop the current
 * activation and restart the handler using the previous one.
 */
929 930
ENTRY(xen_do_hypervisor_callback)		/* do_hypervisor_callback(struct *pt_regs) */

931 932 933 934
/*
 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
 * see the correct pointer to the pt_regs
 */
935 936 937 938 939 940 941 942
	movq	%rdi, %rsp			/* we don't return, adjust the stack frame */
11:	incl	PER_CPU_VAR(irq_count)
	movq	%rsp, %rbp
	cmovzq	PER_CPU_VAR(irq_stack_ptr), %rsp
	pushq	%rbp				/* frame pointer backlink */
	call	xen_evtchn_do_upcall
	popq	%rsp
	decl	PER_CPU_VAR(irq_count)
943
#ifndef CONFIG_PREEMPT
944
	call	xen_maybe_preempt_hcall
945
#endif
946
	jmp	error_exit
947
END(xen_do_hypervisor_callback)
948 949

/*
950 951 952 953 954 955 956 957 958 959 960 961
 * Hypervisor uses this for application faults while it executes.
 * We get here for two reasons:
 *  1. Fault while reloading DS, ES, FS or GS
 *  2. Fault while executing IRET
 * Category 1 we do not need to fix up as Xen has already reloaded all segment
 * registers that could be reloaded and zeroed the others.
 * Category 2 we fix up by killing the current process. We cannot use the
 * normal Linux return path in this case because if we use the IRET hypercall
 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 * We distinguish between categories by comparing each saved segment register
 * with its current contents: any discrepancy means we in category 1.
 */
962
ENTRY(xen_failsafe_callback)
963 964 965 966 967 968 969 970 971 972 973 974
	movl	%ds, %ecx
	cmpw	%cx, 0x10(%rsp)
	jne	1f
	movl	%es, %ecx
	cmpw	%cx, 0x18(%rsp)
	jne	1f
	movl	%fs, %ecx
	cmpw	%cx, 0x20(%rsp)
	jne	1f
	movl	%gs, %ecx
	cmpw	%cx, 0x28(%rsp)
	jne	1f
975
	/* All segments match their saved values => Category 2 (Bad IRET). */
976 977 978 979 980 981 982
	movq	(%rsp), %rcx
	movq	8(%rsp), %r11
	addq	$0x30, %rsp
	pushq	$0				/* RIP */
	pushq	%r11
	pushq	%rcx
	jmp	general_protection
983
1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
984 985 986 987
	movq	(%rsp), %rcx
	movq	8(%rsp), %r11
	addq	$0x30, %rsp
	pushq	$-1 /* orig_ax = -1 => not a system call */
988 989 990
	ALLOC_PT_GPREGS_ON_STACK
	SAVE_C_REGS
	SAVE_EXTRA_REGS
991
	jmp	error_exit
992 993
END(xen_failsafe_callback)

994
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
995 996
	xen_hvm_callback_vector xen_evtchn_do_upcall

997
#endif /* CONFIG_XEN */
998

999
#if IS_ENABLED(CONFIG_HYPERV)
1000
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1001 1002 1003
	hyperv_callback_vector hyperv_vector_handler
#endif /* CONFIG_HYPERV */

1004 1005 1006 1007
idtentry debug			do_debug		has_error_code=0	paranoid=1 shift_ist=DEBUG_STACK
idtentry int3			do_int3			has_error_code=0	paranoid=1 shift_ist=DEBUG_STACK
idtentry stack_segment		do_stack_segment	has_error_code=1

1008
#ifdef CONFIG_XEN
1009 1010 1011
idtentry xen_debug		do_debug		has_error_code=0
idtentry xen_int3		do_int3			has_error_code=0
idtentry xen_stack_segment	do_stack_segment	has_error_code=1
1012
#endif
1013 1014 1015 1016

idtentry general_protection	do_general_protection	has_error_code=1
trace_idtentry page_fault	do_page_fault		has_error_code=1

G
Gleb Natapov 已提交
1017
#ifdef CONFIG_KVM_GUEST
1018
idtentry async_page_fault	do_async_page_fault	has_error_code=1
G
Gleb Natapov 已提交
1019
#endif
1020

1021
#ifdef CONFIG_X86_MCE
1022
idtentry machine_check					has_error_code=0	paranoid=1 do_sym=*machine_check_vector(%rip)
1023 1024
#endif

1025 1026 1027 1028 1029 1030
/*
 * Save all registers in pt_regs, and switch gs if needed.
 * Use slow, but surefire "are we in kernel?" check.
 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
 */
ENTRY(paranoid_entry)
1031 1032 1033
	cld
	SAVE_C_REGS 8
	SAVE_EXTRA_REGS 8
1034 1035
	movl	$1, %ebx
	movl	$MSR_GS_BASE, %ecx
1036
	rdmsr
1037 1038
	testl	%edx, %edx
	js	1f				/* negative -> in kernel */
1039
	SWAPGS
1040
	xorl	%ebx, %ebx
1041
1:	ret
1042
END(paranoid_entry)
1043

1044 1045 1046 1047 1048 1049 1050 1051 1052
/*
 * "Paranoid" exit path from exception stack.  This is invoked
 * only on return from non-NMI IST interrupts that came
 * from kernel space.
 *
 * We may be returning to very strange contexts (e.g. very early
 * in syscall entry), so checking for preemption here would
 * be complicated.  Fortunately, we there's no good reason
 * to try to handle preemption here.
1053 1054
 *
 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
1055
 */
1056 1057
ENTRY(paranoid_exit)
	DISABLE_INTERRUPTS(CLBR_NONE)
1058
	TRACE_IRQS_OFF_DEBUG
1059 1060
	testl	%ebx, %ebx			/* swapgs needed? */
	jnz	paranoid_exit_no_swapgs
1061
	TRACE_IRQS_IRETQ
1062
	SWAPGS_UNSAFE_STACK
1063
	jmp	paranoid_exit_restore
1064
paranoid_exit_no_swapgs:
1065
	TRACE_IRQS_IRETQ_DEBUG
1066
paranoid_exit_restore:
1067 1068 1069
	RESTORE_EXTRA_REGS
	RESTORE_C_REGS
	REMOVE_PT_GPREGS_FROM_STACK 8
1070
	INTERRUPT_RETURN
1071 1072 1073
END(paranoid_exit)

/*
1074
 * Save all registers in pt_regs, and switch gs if needed.
1075
 * Return: EBX=0: came from user mode; EBX=1: otherwise
1076 1077 1078
 */
ENTRY(error_entry)
	cld
1079 1080
	SAVE_C_REGS 8
	SAVE_EXTRA_REGS 8
1081
	xorl	%ebx, %ebx
1082
	testb	$3, CS+8(%rsp)
1083
	jz	.Lerror_kernelspace
1084

1085 1086 1087 1088 1089
.Lerror_entry_from_usermode_swapgs:
	/*
	 * We entered from user mode or we're pretending to have entered
	 * from user mode due to an IRET fault.
	 */
1090
	SWAPGS
1091

1092 1093
.Lerror_entry_from_usermode_after_swapgs:
.Lerror_entry_done:
1094 1095 1096
	TRACE_IRQS_OFF
	ret

1097 1098 1099 1100 1101 1102
	/*
	 * There are two places in the kernel that can potentially fault with
	 * usergs. Handle them here.  B stepping K8s sometimes report a
	 * truncated RIP for IRET exceptions returning to compat mode. Check
	 * for these here too.
	 */
1103
.Lerror_kernelspace:
1104 1105 1106
	incl	%ebx
	leaq	native_irq_return_iret(%rip), %rcx
	cmpq	%rcx, RIP+8(%rsp)
1107
	je	.Lerror_bad_iret
1108 1109
	movl	%ecx, %eax			/* zero extend */
	cmpq	%rax, RIP+8(%rsp)
1110
	je	.Lbstep_iret
1111
	cmpq	$gs_change, RIP+8(%rsp)
1112
	jne	.Lerror_entry_done
1113 1114 1115 1116 1117 1118

	/*
	 * hack: gs_change can fail with user gsbase.  If this happens, fix up
	 * gsbase and proceed.  We'll fix up the exception and land in
	 * gs_change's error handler with kernel gsbase.
	 */
1119
	jmp	.Lerror_entry_from_usermode_swapgs
1120

1121
.Lbstep_iret:
1122
	/* Fix truncated RIP */
1123
	movq	%rcx, RIP+8(%rsp)
A
Andy Lutomirski 已提交
1124 1125
	/* fall through */

1126
.Lerror_bad_iret:
1127 1128 1129 1130
	/*
	 * We came from an IRET to user mode, so we have user gsbase.
	 * Switch to kernel gsbase:
	 */
A
Andy Lutomirski 已提交
1131
	SWAPGS
1132 1133 1134 1135 1136 1137

	/*
	 * Pretend that the exception came from user mode: set up pt_regs
	 * as if we faulted immediately after IRET and clear EBX so that
	 * error_exit knows that we will be returning to user mode.
	 */
1138 1139 1140
	mov	%rsp, %rdi
	call	fixup_bad_iret
	mov	%rax, %rsp
1141
	decl	%ebx
1142
	jmp	.Lerror_entry_from_usermode_after_swapgs
1143 1144 1145
END(error_entry)


1146 1147 1148 1149 1150
/*
 * On entry, EBS is a "return to kernel mode" flag:
 *   1: already in kernel mode, don't need SWAPGS
 *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
 */
1151
ENTRY(error_exit)
1152
	movl	%ebx, %eax
1153 1154
	DISABLE_INTERRUPTS(CLBR_NONE)
	TRACE_IRQS_OFF
1155 1156 1157
	testl	%eax, %eax
	jnz	retint_kernel
	jmp	retint_user
1158 1159
END(error_exit)

1160
/* Runs on exception stack */
1161 1162
ENTRY(nmi)
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
	/*
	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
	 * the iretq it performs will take us out of NMI context.
	 * This means that we can have nested NMIs where the next
	 * NMI is using the top of the stack of the previous NMI. We
	 * can't let it execute because the nested NMI will corrupt the
	 * stack of the previous NMI. NMI handlers are not re-entrant
	 * anyway.
	 *
	 * To handle this case we do the following:
	 *  Check the a special location on the stack that contains
	 *  a variable that is set when NMIs are executing.
	 *  The interrupted task's stack is also checked to see if it
	 *  is an NMI stack.
	 *  If the variable is not set and the stack is not the NMI
	 *  stack then:
	 *    o Set the special variable on the stack
	 *    o Copy the interrupt frame into a "saved" location on the stack
	 *    o Copy the interrupt frame into a "copy" location on the stack
	 *    o Continue processing the NMI
	 *  If the variable is set or the previous stack is the NMI stack:
	 *    o Modify the "copy" location to jump to the repeate_nmi
	 *    o return back to the first NMI
	 *
	 * Now on exit of the first NMI, we first clear the stack variable
	 * The NMI stack will tell any nested NMIs at that point that it is
	 * nested. Then we pop the stack normally with iret, and if there was
	 * a nested NMI that updated the copy interrupt stack frame, a
	 * jump will be made to the repeat_nmi code that will handle the second
	 * NMI.
	 */

1195
	/* Use %rdx as our temp variable throughout */
1196
	pushq	%rdx
1197

1198 1199 1200 1201
	/*
	 * If %cs was not the kernel segment, then the NMI triggered in user
	 * space, which means it is definitely not nested.
	 */
1202 1203
	cmpl	$__KERNEL_CS, 16(%rsp)
	jne	first_nmi
1204

1205 1206 1207 1208
	/*
	 * Check the special variable on the stack to see if NMIs are
	 * executing.
	 */
1209 1210
	cmpl	$1, -8(%rsp)
	je	nested_nmi
1211 1212 1213 1214 1215 1216 1217 1218

	/*
	 * Now test if the previous stack was an NMI stack.
	 * We need the double check. We check the NMI stack to satisfy the
	 * race when the first NMI clears the variable before returning.
	 * We check the variable because the first NMI could be in a
	 * breakpoint routine using a breakpoint stack.
	 */
1219 1220 1221 1222 1223
	lea	6*8(%rsp), %rdx
	/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
	cmpq	%rdx, 4*8(%rsp)
	/* If the stack pointer is above the NMI stack, this is a normal NMI */
	ja	first_nmi
1224

1225 1226 1227 1228 1229 1230
	subq	$EXCEPTION_STKSZ, %rdx
	cmpq	%rdx, 4*8(%rsp)
	/* If it is below the NMI stack, it is a normal NMI */
	jb	first_nmi
	/* Ah, it is within the NMI stack, treat it as nested */

1231 1232 1233 1234 1235 1236
nested_nmi:
	/*
	 * Do nothing if we interrupted the fixup in repeat_nmi.
	 * It's about to repeat the NMI handler, so we are fine
	 * with ignoring this one.
	 */
1237 1238 1239 1240 1241 1242
	movq	$repeat_nmi, %rdx
	cmpq	8(%rsp), %rdx
	ja	1f
	movq	$end_repeat_nmi, %rdx
	cmpq	8(%rsp), %rdx
	ja	nested_nmi_out
1243 1244 1245

1:
	/* Set up the interrupted NMIs stack to jump to repeat_nmi */
1246 1247 1248 1249 1250
	leaq	-1*8(%rsp), %rdx
	movq	%rdx, %rsp
	leaq	-10*8(%rsp), %rdx
	pushq	$__KERNEL_DS
	pushq	%rdx
1251
	pushfq
1252 1253
	pushq	$__KERNEL_CS
	pushq	$repeat_nmi
1254 1255

	/* Put stack back */
1256
	addq	$(6*8), %rsp
1257 1258

nested_nmi_out:
1259
	popq	%rdx
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285

	/* No need to check faults here */
	INTERRUPT_RETURN

first_nmi:
	/*
	 * Because nested NMIs will use the pushed location that we
	 * stored in rdx, we must keep that space available.
	 * Here's what our stack frame will look like:
	 * +-------------------------+
	 * | original SS             |
	 * | original Return RSP     |
	 * | original RFLAGS         |
	 * | original CS             |
	 * | original RIP            |
	 * +-------------------------+
	 * | temp storage for rdx    |
	 * +-------------------------+
	 * | NMI executing variable  |
	 * +-------------------------+
	 * | copied SS               |
	 * | copied Return RSP       |
	 * | copied RFLAGS           |
	 * | copied CS               |
	 * | copied RIP              |
	 * +-------------------------+
1286 1287 1288 1289 1290 1291
	 * | Saved SS                |
	 * | Saved Return RSP        |
	 * | Saved RFLAGS            |
	 * | Saved CS                |
	 * | Saved RIP               |
	 * +-------------------------+
1292 1293 1294
	 * | pt_regs                 |
	 * +-------------------------+
	 *
1295 1296 1297
	 * The saved stack frame is used to fix up the copied stack frame
	 * that a nested NMI may change to make the interrupted NMI iret jump
	 * to the repeat_nmi. The original stack frame and the temp storage
1298 1299
	 * is also used by nested NMIs and can not be trusted on exit.
	 */
1300
	/* Do not pop rdx, nested NMIs will corrupt that part of the stack */
1301
	movq	(%rsp), %rdx
1302

1303
	/* Set the NMI executing variable on the stack. */
1304
	pushq	$1
1305

1306 1307
	/* Leave room for the "copied" frame */
	subq	$(5*8), %rsp
1308

1309 1310
	/* Copy the stack frame to the Saved frame */
	.rept 5
1311
	pushq	11*8(%rsp)
1312
	.endr
1313

1314 1315
	/* Everything up to here is safe from nested NMIs */

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
	/*
	 * If there was a nested NMI, the first NMI's iret will return
	 * here. But NMIs are still enabled and we can take another
	 * nested NMI. The nested NMI checks the interrupted RIP to see
	 * if it is between repeat_nmi and end_repeat_nmi, and if so
	 * it will just return, as we are about to repeat an NMI anyway.
	 * This makes it safe to copy to the stack frame that a nested
	 * NMI will update.
	 */
repeat_nmi:
	/*
	 * Update the stack variable to say we are still in NMI (the update
	 * is benign for the non-repeat case, where 1 was pushed just above
	 * to this very stack slot).
	 */
1331
	movq	$1, 10*8(%rsp)
1332 1333

	/* Make another copy, this one may be modified by nested NMIs */
1334
	addq	$(10*8), %rsp
1335
	.rept 5
1336
	pushq	-6*8(%rsp)
1337
	.endr
1338
	subq	$(5*8), %rsp
1339
end_repeat_nmi:
1340 1341 1342

	/*
	 * Everything below this point can be preempted by a nested
1343 1344
	 * NMI if the first NMI took an exception and reset our iret stack
	 * so that we repeat another NMI.
1345
	 */
1346
	pushq	$-1				/* ORIG_RAX: no syscall to restart */
1347 1348
	ALLOC_PT_GPREGS_ON_STACK

1349
	/*
1350
	 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1351 1352 1353 1354 1355
	 * as we should not be calling schedule in NMI context.
	 * Even with normal interrupts enabled. An NMI should not be
	 * setting NEED_RESCHED or anything that normal interrupts and
	 * exceptions might do.
	 */
1356
	call	paranoid_entry
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366

	/*
	 * Save off the CR2 register. If we take a page fault in the NMI then
	 * it could corrupt the CR2 value. If the NMI preempts a page fault
	 * handler before it was able to read the CR2 register, and then the
	 * NMI itself takes a page fault, the page fault that was preempted
	 * will read the information from the NMI page fault and not the
	 * origin fault. Save it off and restore it if it changes.
	 * Use the r12 callee-saved register.
	 */
1367
	movq	%cr2, %r12
1368

1369
	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1370 1371 1372
	movq	%rsp, %rdi
	movq	$-1, %rsi
	call	do_nmi
1373 1374

	/* Did the NMI take a page fault? Restore cr2 if it did */
1375 1376 1377 1378
	movq	%cr2, %rcx
	cmpq	%rcx, %r12
	je	1f
	movq	%r12, %cr2
1379
1:
1380 1381
	testl	%ebx, %ebx			/* swapgs needed? */
	jnz	nmi_restore
1382 1383 1384
nmi_swapgs:
	SWAPGS_UNSAFE_STACK
nmi_restore:
1385 1386
	RESTORE_EXTRA_REGS
	RESTORE_C_REGS
1387
	/* Pop the extra iret frame at once */
1388
	REMOVE_PT_GPREGS_FROM_STACK 6*8
1389

1390
	/* Clear the NMI executing stack variable */
1391
	movq	$0, 5*8(%rsp)
1392
	INTERRUPT_RETURN
1393 1394 1395
END(nmi)

ENTRY(ignore_sysret)
1396
	mov	$-ENOSYS, %eax
1397 1398
	sysret
END(ignore_sysret)