entry_64.S 48.0 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7
/*
 *  linux/arch/x86_64/entry.S
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
8
 *
L
Linus Torvalds 已提交
9 10
 * entry.S contains the system-call and fault low-level handling routines.
 *
11
 * Some of this is documented in Documentation/x86/entry_64.rst
12
 *
13
 * A note on terminology:
14 15
 * - iret frame:	Architecture defined interrupt frame from SS to RIP
 *			at the top of the kernel process stack.
16 17
 *
 * Some macro usage:
18 19 20
 * - ENTRY/END:		Define functions in the symbol table.
 * - TRACE_IRQ_*:	Trace hardirq state for lock debugging.
 * - idtentry:		Define exception entry points.
L
Linus Torvalds 已提交
21 22 23 24 25
 */
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/errno.h>
26
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
27 28 29 30
#include <asm/msr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/hw_irq.h>
31
#include <asm/page_types.h>
32
#include <asm/irqflags.h>
33
#include <asm/paravirt.h>
34
#include <asm/percpu.h>
35
#include <asm/asm.h>
36
#include <asm/smap.h>
37
#include <asm/pgtable_types.h>
38
#include <asm/export.h>
39
#include <asm/frame.h>
40
#include <asm/nospec-branch.h>
41
#include <linux/err.h>
L
Linus Torvalds 已提交
42

43 44
#include "calling.h"

45 46
.code64
.section .entry.text, "ax"
47

48
#ifdef CONFIG_PARAVIRT
49
ENTRY(native_usergs_sysret64)
50
	UNWIND_HINT_EMPTY
51 52
	swapgs
	sysretq
53
END(native_usergs_sysret64)
54 55
#endif /* CONFIG_PARAVIRT */

56
.macro TRACE_IRQS_FLAGS flags:req
57
#ifdef CONFIG_TRACE_IRQFLAGS
58
	btl	$9, \flags		/* interrupts off? */
59
	jnc	1f
60 61 62 63 64
	TRACE_IRQS_ON
1:
#endif
.endm

65 66 67 68
.macro TRACE_IRQS_IRETQ
	TRACE_IRQS_FLAGS EFLAGS(%rsp)
.endm

69 70 71 72 73 74 75 76 77 78 79 80 81 82
/*
 * When dynamic function tracer is enabled it will add a breakpoint
 * to all locations that it is about to modify, sync CPUs, update
 * all the code, sync CPUs, then remove the breakpoints. In this time
 * if lockdep is enabled, it might jump back into the debug handler
 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
 *
 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
 * make sure the stack pointer does not get reset back to the top
 * of the debug stack, and instead just reuses the current stack.
 */
#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)

.macro TRACE_IRQS_OFF_DEBUG
83
	call	debug_stack_set_zero
84
	TRACE_IRQS_OFF
85
	call	debug_stack_reset
86 87 88
.endm

.macro TRACE_IRQS_ON_DEBUG
89
	call	debug_stack_set_zero
90
	TRACE_IRQS_ON
91
	call	debug_stack_reset
92 93
.endm

94
.macro TRACE_IRQS_IRETQ_DEBUG
95
	btl	$9, EFLAGS(%rsp)		/* interrupts off? */
96
	jnc	1f
97 98 99 100 101
	TRACE_IRQS_ON_DEBUG
1:
.endm

#else
102 103 104
# define TRACE_IRQS_OFF_DEBUG			TRACE_IRQS_OFF
# define TRACE_IRQS_ON_DEBUG			TRACE_IRQS_ON
# define TRACE_IRQS_IRETQ_DEBUG			TRACE_IRQS_IRETQ
105 106
#endif

L
Linus Torvalds 已提交
107
/*
108
 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
L
Linus Torvalds 已提交
109
 *
110 111 112 113 114 115 116 117 118 119
 * This is the only entry point used for 64-bit system calls.  The
 * hardware interface is reasonably well designed and the register to
 * argument mapping Linux uses fits well with the registers that are
 * available when SYSCALL is used.
 *
 * SYSCALL instructions can be found inlined in libc implementations as
 * well as some other programs and libraries.  There are also a handful
 * of SYSCALL instructions in the vDSO used, for example, as a
 * clock_gettimeofday fallback.
 *
120
 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
121 122 123 124 125 126
 * then loads new ss, cs, and rip from previously programmed MSRs.
 * rflags gets masked by a value from another MSR (so CLD and CLAC
 * are not needed). SYSCALL does not save anything on the stack
 * and does not change rsp.
 *
 * Registers on entry:
L
Linus Torvalds 已提交
127
 * rax  system call number
128 129
 * rcx  return address
 * r11  saved rflags (note: r11 is callee-clobbered register in C ABI)
L
Linus Torvalds 已提交
130 131
 * rdi  arg0
 * rsi  arg1
132
 * rdx  arg2
133
 * r10  arg3 (needs to be moved to rcx to conform to C ABI)
L
Linus Torvalds 已提交
134 135
 * r8   arg4
 * r9   arg5
136
 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
137
 *
L
Linus Torvalds 已提交
138 139
 * Only called from user space.
 *
140
 * When user can change pt_regs->foo always force IRET. That is because
141 142
 * it deals with uncanonical addresses better. SYSRET has trouble
 * with them due to bugs in both AMD and Intel CPUs.
143
 */
L
Linus Torvalds 已提交
144

145
ENTRY(entry_SYSCALL_64)
146
	UNWIND_HINT_EMPTY
147 148 149 150 151
	/*
	 * Interrupts are off on entry.
	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
	 * it is too small to ever cause noticeable irq latency.
	 */
152

153
	swapgs
154
	/* tss.sp2 is scratch space. */
155
	movq	%rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
156
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
157
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
158 159

	/* Construct struct pt_regs on stack */
160 161 162 163 164
	pushq	$__USER_DS				/* pt_regs->ss */
	pushq	PER_CPU_VAR(cpu_tss_rw + TSS_sp2)	/* pt_regs->sp */
	pushq	%r11					/* pt_regs->flags */
	pushq	$__USER_CS				/* pt_regs->cs */
	pushq	%rcx					/* pt_regs->ip */
165
GLOBAL(entry_SYSCALL_64_after_hwframe)
166
	pushq	%rax					/* pt_regs->orig_ax */
167 168

	PUSH_AND_CLEAR_REGS rax=$-ENOSYS
169

170 171
	TRACE_IRQS_OFF

172
	/* IRQs are off. */
173 174
	movq	%rax, %rdi
	movq	%rsp, %rsi
175 176
	call	do_syscall_64		/* returns with IRQs disabled */

177
	TRACE_IRQS_IRETQ		/* we're about to change IF */
178 179 180

	/*
	 * Try to use SYSRET instead of IRET if we're returning to
181 182
	 * a completely clean 64-bit userspace context.  If we're not,
	 * go to the slow exit path.
183
	 */
184 185
	movq	RCX(%rsp), %rcx
	movq	RIP(%rsp), %r11
186 187 188

	cmpq	%rcx, %r11	/* SYSRET requires RCX == RIP */
	jne	swapgs_restore_regs_and_return_to_usermode
189 190 191 192

	/*
	 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
	 * in kernel space.  This essentially lets the user take over
193
	 * the kernel, since userspace controls RSP.
194
	 *
195
	 * If width of "canonical tail" ever becomes variable, this will need
196
	 * to be updated to remain correct on both old and new CPUs.
197
	 *
198 199
	 * Change top bits to match most significant bit (47th or 56th bit
	 * depending on paging mode) in the address.
200
	 */
201
#ifdef CONFIG_X86_5LEVEL
202 203
	ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \
		"shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57
204
#else
205 206
	shl	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
	sar	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
207
#endif
208

209 210
	/* If this changed %rcx, it was not canonical */
	cmpq	%rcx, %r11
211
	jne	swapgs_restore_regs_and_return_to_usermode
212

213
	cmpq	$__USER_CS, CS(%rsp)		/* CS must match SYSRET */
214
	jne	swapgs_restore_regs_and_return_to_usermode
215

216 217
	movq	R11(%rsp), %r11
	cmpq	%r11, EFLAGS(%rsp)		/* R11 == RFLAGS */
218
	jne	swapgs_restore_regs_and_return_to_usermode
219 220

	/*
221 222 223 224 225 226 227 228 229
	 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
	 * restore RF properly. If the slowpath sets it for whatever reason, we
	 * need to restore it correctly.
	 *
	 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
	 * trap from userspace immediately after SYSRET.  This would cause an
	 * infinite loop whenever #DB happens with register state that satisfies
	 * the opportunistic SYSRET conditions.  For example, single-stepping
	 * this user code:
230
	 *
231
	 *           movq	$stuck_here, %rcx
232 233 234 235 236 237
	 *           pushfq
	 *           popq %r11
	 *   stuck_here:
	 *
	 * would never get past 'stuck_here'.
	 */
238
	testq	$(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
239
	jnz	swapgs_restore_regs_and_return_to_usermode
240 241 242

	/* nothing to check for RSP */

243
	cmpq	$__USER_DS, SS(%rsp)		/* SS must match SYSRET */
244
	jne	swapgs_restore_regs_and_return_to_usermode
245 246

	/*
247 248
	 * We win! This label is here just for ease of understanding
	 * perf profiles. Nothing jumps here.
249 250
	 */
syscall_return_via_sysret:
251
	/* rcx and r11 are already restored (see code above) */
252
	UNWIND_HINT_EMPTY
253
	POP_REGS pop_rdi=0 skip_r11rcx=1
254 255 256 257 258 259

	/*
	 * Now all regs are restored except RSP and RDI.
	 * Save old stack pointer and switch to trampoline stack.
	 */
	movq	%rsp, %rdi
260
	movq	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
261 262 263 264 265 266 267 268

	pushq	RSP-RDI(%rdi)	/* RSP */
	pushq	(%rdi)		/* RDI */

	/*
	 * We are on the trampoline stack.  All regs except RDI are live.
	 * We can do future final exit work right here.
	 */
269 270
	STACKLEAK_ERASE_NOCLOBBER

271
	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
272

273
	popq	%rdi
274
	popq	%rsp
275
	USERGS_SYSRET64
276
END(entry_SYSCALL_64)
277

278 279 280 281 282
/*
 * %rdi: prev task
 * %rsi: next task
 */
ENTRY(__switch_to_asm)
283
	UNWIND_HINT_FUNC
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
	/*
	 * Save callee-saved registers
	 * This must match the order in inactive_task_frame
	 */
	pushq	%rbp
	pushq	%rbx
	pushq	%r12
	pushq	%r13
	pushq	%r14
	pushq	%r15

	/* switch stack */
	movq	%rsp, TASK_threadsp(%rdi)
	movq	TASK_threadsp(%rsi), %rsp

299
#ifdef CONFIG_STACKPROTECTOR
300
	movq	TASK_stack_canary(%rsi), %rbx
301
	movq	%rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
302 303
#endif

304 305 306 307 308 309 310 311
#ifdef CONFIG_RETPOLINE
	/*
	 * When switching from a shallower to a deeper call stack
	 * the RSB may either underflow or use entries populated
	 * with userspace addresses. On CPUs where those concerns
	 * exist, overwrite the RSB with entries which capture
	 * speculative execution to prevent attack.
	 */
312
	FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
313 314
#endif

315 316 317 318 319 320 321 322 323 324 325
	/* restore callee-saved registers */
	popq	%r15
	popq	%r14
	popq	%r13
	popq	%r12
	popq	%rbx
	popq	%rbp

	jmp	__switch_to
END(__switch_to_asm)

326 327 328
/*
 * A newly forked process directly context switches into this address.
 *
329
 * rax: prev task we switched from
330 331
 * rbx: kernel thread func (NULL for user thread)
 * r12: kernel thread arg
332 333
 */
ENTRY(ret_from_fork)
334
	UNWIND_HINT_EMPTY
335
	movq	%rax, %rdi
336
	call	schedule_tail			/* rdi: 'prev' task parameter */
337

338 339
	testq	%rbx, %rbx			/* from kernel_thread? */
	jnz	1f				/* kernel threads are uncommon */
340

341
2:
342
	UNWIND_HINT_REGS
343
	movq	%rsp, %rdi
344 345
	call	syscall_return_slowpath	/* returns with IRQs disabled */
	TRACE_IRQS_ON			/* user mode is traced as IRQS on */
346
	jmp	swapgs_restore_regs_and_return_to_usermode
347 348 349

1:
	/* kernel thread */
350
	UNWIND_HINT_EMPTY
351
	movq	%r12, %rdi
352
	CALL_NOSPEC %rbx
353 354 355 356 357 358 359
	/*
	 * A kernel thread is allowed to return here after successfully
	 * calling do_execve().  Exit to userspace to complete the execve()
	 * syscall.
	 */
	movq	$0, RAX(%rsp)
	jmp	2b
360 361
END(ret_from_fork)

362
/*
363 364
 * Build the entry stubs with some assembler magic.
 * We pack 1 stub into every 8-byte block.
365
 */
366
	.align 8
367
ENTRY(irq_entries_start)
368 369
    vector=FIRST_EXTERNAL_VECTOR
    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
370
	UNWIND_HINT_IRET_REGS
371
	pushq	$(~vector+0x80)			/* Note: always in signed byte range */
372 373
	jmp	common_interrupt
	.align	8
374
	vector=vector+1
375
    .endr
376 377
END(irq_entries_start)

378 379 380 381 382 383 384 385 386 387 388 389
	.align 8
ENTRY(spurious_entries_start)
    vector=FIRST_SYSTEM_VECTOR
    .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
	UNWIND_HINT_IRET_REGS
	pushq	$(~vector+0x80)			/* Note: always in signed byte range */
	jmp	common_spurious
	.align	8
	vector=vector+1
    .endr
END(spurious_entries_start)

390 391
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
#ifdef CONFIG_DEBUG_ENTRY
392 393 394
	pushq %rax
	SAVE_FLAGS(CLBR_RAX)
	testl $X86_EFLAGS_IF, %eax
395 396 397
	jz .Lokay_\@
	ud2
.Lokay_\@:
398
	popq %rax
399 400 401 402 403 404 405 406 407 408
#endif
.endm

/*
 * Enters the IRQ stack if we're not already using it.  NMI-safe.  Clobbers
 * flags and puts old RSP into old_rsp, and leaves all other GPRs alone.
 * Requires kernel GSBASE.
 *
 * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
 */
409
.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0
410
	DEBUG_ENTRY_ASSERT_IRQS_OFF
411 412 413 414 415 416 417 418 419

	.if \save_ret
	/*
	 * If save_ret is set, the original stack contains one additional
	 * entry -- the return address. Therefore, move the address one
	 * entry below %rsp to \old_rsp.
	 */
	leaq	8(%rsp), \old_rsp
	.else
420
	movq	%rsp, \old_rsp
421
	.endif
422 423 424 425 426

	.if \regs
	UNWIND_HINT_REGS base=\old_rsp
	.endif

427
	incl	PER_CPU_VAR(irq_count)
428
	jnz	.Lirq_stack_push_old_rsp_\@
429 430 431 432 433 434 435 436 437

	/*
	 * Right now, if we just incremented irq_count to zero, we've
	 * claimed the IRQ stack but we haven't switched to it yet.
	 *
	 * If anything is added that can interrupt us here without using IST,
	 * it must be *extremely* careful to limit its stack usage.  This
	 * could include kprobes and a hypothetical future IST-less #DB
	 * handler.
438 439 440 441 442 443 444
	 *
	 * The OOPS unwinder relies on the word at the top of the IRQ
	 * stack linking back to the previous RSP for the entire time we're
	 * on the IRQ stack.  For this to work reliably, we need to write
	 * it before we actually move ourselves to the IRQ stack.
	 */

445
	movq	\old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8)
446
	movq	PER_CPU_VAR(hardirq_stack_ptr), %rsp
447 448 449 450 451 452 453

#ifdef CONFIG_DEBUG_ENTRY
	/*
	 * If the first movq above becomes wrong due to IRQ stack layout
	 * changes, the only way we'll notice is if we try to unwind right
	 * here.  Assert that we set up the stack right to catch this type
	 * of bug quickly.
454
	 */
455 456 457 458 459
	cmpq	-8(%rsp), \old_rsp
	je	.Lirq_stack_okay\@
	ud2
	.Lirq_stack_okay\@:
#endif
460

461
.Lirq_stack_push_old_rsp_\@:
462
	pushq	\old_rsp
463 464 465 466

	.if \regs
	UNWIND_HINT_REGS indirect=1
	.endif
467 468 469 470 471 472 473 474 475

	.if \save_ret
	/*
	 * Push the return address to the stack. This return address can
	 * be found at the "real" original RSP, which was offset by 8 at
	 * the beginning of this macro.
	 */
	pushq	-8(\old_rsp)
	.endif
476 477 478 479 480
.endm

/*
 * Undoes ENTER_IRQ_STACK.
 */
481
.macro LEAVE_IRQ_STACK regs=1
482 483 484 485
	DEBUG_ENTRY_ASSERT_IRQS_OFF
	/* We need to be off the IRQ stack before decrementing irq_count. */
	popq	%rsp

486 487 488 489
	.if \regs
	UNWIND_HINT_REGS
	.endif

490 491 492 493 494 495 496 497
	/*
	 * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
	 * the irq stack but we're not on it.
	 */

	decl	PER_CPU_VAR(irq_count)
.endm

498
/*
499
 * Interrupt entry helper function.
500
 *
501 502 503 504 505 506 507 508 509 510 511 512
 * Entry runs with interrupts off. Stack layout at entry:
 * +----------------------------------------------------+
 * | regs->ss						|
 * | regs->rsp						|
 * | regs->eflags					|
 * | regs->cs						|
 * | regs->ip						|
 * +----------------------------------------------------+
 * | regs->orig_ax = ~(interrupt number)		|
 * +----------------------------------------------------+
 * | return address					|
 * +----------------------------------------------------+
513
 */
514 515 516
ENTRY(interrupt_entry)
	UNWIND_HINT_FUNC
	ASM_CLAC
517
	cld
518

519
	testb	$3, CS-ORIG_RAX+8(%rsp)
520 521
	jz	1f
	SWAPGS
522 523 524 525 526 527 528

	/*
	 * Switch to the thread stack. The IRET frame and orig_ax are
	 * on the stack, as well as the return address. RDI..R12 are
	 * not (yet) on the stack and space has not (yet) been
	 * allocated for them.
	 */
529
	pushq	%rdi
530

531 532 533 534
	/* Need to switch before accessing the thread stack. */
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
	movq	%rsp, %rdi
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
535 536 537 538 539 540

	 /*
	  * We have RDI, return address, and orig_ax on the stack on
	  * top of the IRET frame. That means offset=24
	  */
	UNWIND_HINT_IRET_REGS base=%rdi offset=24
541 542 543 544 545 546 547 548 549 550 551

	pushq	7*8(%rdi)		/* regs->ss */
	pushq	6*8(%rdi)		/* regs->rsp */
	pushq	5*8(%rdi)		/* regs->eflags */
	pushq	4*8(%rdi)		/* regs->cs */
	pushq	3*8(%rdi)		/* regs->ip */
	pushq	2*8(%rdi)		/* regs->orig_ax */
	pushq	8(%rdi)			/* return address */
	UNWIND_HINT_FUNC

	movq	(%rdi), %rdi
552 553
1:

554 555
	PUSH_AND_CLEAR_REGS save_ret=1
	ENCODE_FRAME_POINTER 8
556

557
	testb	$3, CS+8(%rsp)
558
	jz	1f
559 560

	/*
561 562
	 * IRQ from user mode.
	 *
563 564
	 * We need to tell lockdep that IRQs are off.  We can't do this until
	 * we fix gsbase, and we should do it before enter_from_user_mode
565
	 * (which can take locks).  Since TRACE_IRQS_OFF is idempotent,
566 567 568 569 570 571
	 * the simplest way to handle it is to just call it twice if
	 * we enter from user mode.  There's no reason to optimize this since
	 * TRACE_IRQS_OFF is a no-op if lockdep is off.
	 */
	TRACE_IRQS_OFF

572
	CALL_enter_from_user_mode
573

574
1:
575
	ENTER_IRQ_STACK old_rsp=%rdi save_ret=1
576 577 578
	/* We entered an interrupt context - irqs are off: */
	TRACE_IRQS_OFF

579 580
	ret
END(interrupt_entry)
581
_ASM_NOKPROBE(interrupt_entry)
582

583 584

/* Interrupt entry/exit. */
L
Linus Torvalds 已提交
585

586 587 588 589 590 591 592 593 594 595 596 597 598 599
/*
 * The interrupt stubs push (~vector+0x80) onto the stack and
 * then jump to common_spurious/interrupt.
 */
common_spurious:
	addq	$-0x80, (%rsp)			/* Adjust vector to [-256, -1] range */
	call	interrupt_entry
	UNWIND_HINT_REGS indirect=1
	call	smp_spurious_interrupt		/* rdi points to pt_regs */
	jmp	ret_from_intr
END(common_spurious)
_ASM_NOKPROBE(common_spurious)

/* common_interrupt is a hotpath. Align it */
600 601
	.p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt:
602
	addq	$-0x80, (%rsp)			/* Adjust vector to [-256, -1] range */
603 604 605
	call	interrupt_entry
	UNWIND_HINT_REGS indirect=1
	call	do_IRQ	/* rdi points to pt_regs */
606
	/* 0(%rsp): old RSP */
607
ret_from_intr:
608
	DISABLE_INTERRUPTS(CLBR_ANY)
609
	TRACE_IRQS_OFF
610

611
	LEAVE_IRQ_STACK
612

613
	testb	$3, CS(%rsp)
614
	jz	retint_kernel
615

616 617 618 619
	/* Interrupt came from user space */
GLOBAL(retint_user)
	mov	%rsp,%rdi
	call	prepare_exit_to_usermode
620
	TRACE_IRQS_IRETQ
621

622
GLOBAL(swapgs_restore_regs_and_return_to_usermode)
623 624
#ifdef CONFIG_DEBUG_ENTRY
	/* Assert that pt_regs indicates user mode. */
625
	testb	$3, CS(%rsp)
626 627 628 629
	jnz	1f
	ud2
1:
#endif
630
	POP_REGS pop_rdi=0
631 632 633 634 635 636

	/*
	 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
	 * Save old stack pointer and switch to trampoline stack.
	 */
	movq	%rsp, %rdi
637
	movq	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652

	/* Copy the IRET frame to the trampoline stack. */
	pushq	6*8(%rdi)	/* SS */
	pushq	5*8(%rdi)	/* RSP */
	pushq	4*8(%rdi)	/* EFLAGS */
	pushq	3*8(%rdi)	/* CS */
	pushq	2*8(%rdi)	/* RIP */

	/* Push user RDI on the trampoline stack. */
	pushq	(%rdi)

	/*
	 * We are on the trampoline stack.  All regs except RDI are live.
	 * We can do future final exit work right here.
	 */
653
	STACKLEAK_ERASE_NOCLOBBER
654

655
	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
656

657 658 659
	/* Restore RDI. */
	popq	%rdi
	SWAPGS
660 661
	INTERRUPT_RETURN

662

663
/* Returning to kernel space */
664
retint_kernel:
665 666 667
#ifdef CONFIG_PREEMPT
	/* Interrupts are off */
	/* Check if we need preemption */
668
	btl	$9, EFLAGS(%rsp)		/* were interrupts off? */
669
	jnc	1f
670
	cmpl	$0, PER_CPU_VAR(__preempt_count)
671
	jnz	1f
672
	call	preempt_schedule_irq
673
1:
674
#endif
675 676 677 678
	/*
	 * The iretq could re-enable interrupts:
	 */
	TRACE_IRQS_IRETQ
679

680 681 682
GLOBAL(restore_regs_and_return_to_kernel)
#ifdef CONFIG_DEBUG_ENTRY
	/* Assert that pt_regs indicates kernel mode. */
683
	testb	$3, CS(%rsp)
684 685 686 687
	jz	1f
	ud2
1:
#endif
688
	POP_REGS
689
	addq	$8, %rsp	/* skip regs->orig_ax */
690 691 692 693
	/*
	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
	 * when returning from IPI handler.
	 */
694 695 696
	INTERRUPT_RETURN

ENTRY(native_iret)
697
	UNWIND_HINT_IRET_REGS
698 699 700 701
	/*
	 * Are we returning to a stack segment from the LDT?  Note: in
	 * 64-bit mode SS:RSP on the exception stack is always valid.
	 */
702
#ifdef CONFIG_X86_ESPFIX64
703 704
	testb	$4, (SS-RIP)(%rsp)
	jnz	native_irq_return_ldt
705
#endif
706

707
.global native_irq_return_iret
708
native_irq_return_iret:
A
Andy Lutomirski 已提交
709 710 711 712 713 714
	/*
	 * This may fault.  Non-paranoid faults on return to userspace are
	 * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
	 * Double-faults due to espfix64 are handled in do_double_fault.
	 * Other faults here are fatal.
	 */
L
Linus Torvalds 已提交
715
	iretq
I
Ingo Molnar 已提交
716

717
#ifdef CONFIG_X86_ESPFIX64
718
native_irq_return_ldt:
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
	/*
	 * We are running with user GSBASE.  All GPRs contain their user
	 * values.  We have a percpu ESPFIX stack that is eight slots
	 * long (see ESPFIX_STACK_SIZE).  espfix_waddr points to the bottom
	 * of the ESPFIX stack.
	 *
	 * We clobber RAX and RDI in this code.  We stash RDI on the
	 * normal stack and RAX on the ESPFIX stack.
	 *
	 * The ESPFIX stack layout we set up looks like this:
	 *
	 * --- top of ESPFIX stack ---
	 * SS
	 * RSP
	 * RFLAGS
	 * CS
	 * RIP  <-- RSP points here when we're done
	 * RAX  <-- espfix_waddr points here
	 * --- bottom of ESPFIX stack ---
	 */

	pushq	%rdi				/* Stash user RDI */
741 742 743
	SWAPGS					/* to kernel GS */
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi	/* to kernel CR3 */

744
	movq	PER_CPU_VAR(espfix_waddr), %rdi
745 746
	movq	%rax, (0*8)(%rdi)		/* user RAX */
	movq	(1*8)(%rsp), %rax		/* user RIP */
747
	movq	%rax, (1*8)(%rdi)
748
	movq	(2*8)(%rsp), %rax		/* user CS */
749
	movq	%rax, (2*8)(%rdi)
750
	movq	(3*8)(%rsp), %rax		/* user RFLAGS */
751
	movq	%rax, (3*8)(%rdi)
752
	movq	(5*8)(%rsp), %rax		/* user SS */
753
	movq	%rax, (5*8)(%rdi)
754
	movq	(4*8)(%rsp), %rax		/* user RSP */
755
	movq	%rax, (4*8)(%rdi)
756 757 758 759 760 761 762 763 764 765 766 767
	/* Now RAX == RSP. */

	andl	$0xffff0000, %eax		/* RAX = (RSP & 0xffff0000) */

	/*
	 * espfix_stack[31:16] == 0.  The page tables are set up such that
	 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
	 * espfix_waddr for any X.  That is, there are 65536 RO aliases of
	 * the same page.  Set up RSP so that RSP[31:16] contains the
	 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
	 * still points to an RO alias of the ESPFIX stack.
	 */
768
	orq	PER_CPU_VAR(espfix_stack), %rax
769

770
	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
771 772 773
	SWAPGS					/* to user GS */
	popq	%rdi				/* Restore user RDI */

774
	movq	%rax, %rsp
775
	UNWIND_HINT_IRET_REGS offset=8
776 777 778 779 780 781 782 783 784 785 786 787

	/*
	 * At this point, we cannot write to the stack any more, but we can
	 * still read.
	 */
	popq	%rax				/* Restore user RAX */

	/*
	 * RSP now points to an ordinary IRET frame, except that the page
	 * is read-only and RSP[31:16] are preloaded with the userspace
	 * values.  We can now IRET back to userspace.
	 */
788
	jmp	native_irq_return_iret
789
#endif
790
END(common_interrupt)
791
_ASM_NOKPROBE(common_interrupt)
792

L
Linus Torvalds 已提交
793 794
/*
 * APIC interrupts.
795
 */
796
.macro apicinterrupt3 num sym do_sym
797
ENTRY(\sym)
798
	UNWIND_HINT_IRET_REGS
799
	pushq	$~(\num)
800
.Lcommon_\sym:
801 802 803
	call	interrupt_entry
	UNWIND_HINT_REGS indirect=1
	call	\do_sym	/* rdi points to pt_regs */
804
	jmp	ret_from_intr
805
END(\sym)
806
_ASM_NOKPROBE(\sym)
807
.endm
L
Linus Torvalds 已提交
808

809
/* Make sure APIC interrupt handlers end up in the irqentry section: */
810 811
#define PUSH_SECTION_IRQENTRY	.pushsection .irqentry.text, "ax"
#define POP_SECTION_IRQENTRY	.popsection
812

813
.macro apicinterrupt num sym do_sym
814
PUSH_SECTION_IRQENTRY
815
apicinterrupt3 \num \sym \do_sym
816
POP_SECTION_IRQENTRY
817 818
.endm

819
#ifdef CONFIG_SMP
820 821
apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR		irq_move_cleanup_interrupt	smp_irq_move_cleanup_interrupt
apicinterrupt3 REBOOT_VECTOR			reboot_interrupt		smp_reboot_interrupt
822
#endif
L
Linus Torvalds 已提交
823

N
Nick Piggin 已提交
824
#ifdef CONFIG_X86_UV
825
apicinterrupt3 UV_BAU_MESSAGE			uv_bau_message_intr1		uv_bau_message_interrupt
N
Nick Piggin 已提交
826
#endif
827 828 829

apicinterrupt LOCAL_TIMER_VECTOR		apic_timer_interrupt		smp_apic_timer_interrupt
apicinterrupt X86_PLATFORM_IPI_VECTOR		x86_platform_ipi		smp_x86_platform_ipi
830

831
#ifdef CONFIG_HAVE_KVM
832 833
apicinterrupt3 POSTED_INTR_VECTOR		kvm_posted_intr_ipi		smp_kvm_posted_intr_ipi
apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR	kvm_posted_intr_wakeup_ipi	smp_kvm_posted_intr_wakeup_ipi
834
apicinterrupt3 POSTED_INTR_NESTED_VECTOR	kvm_posted_intr_nested_ipi	smp_kvm_posted_intr_nested_ipi
835 836
#endif

837
#ifdef CONFIG_X86_MCE_THRESHOLD
838
apicinterrupt THRESHOLD_APIC_VECTOR		threshold_interrupt		smp_threshold_interrupt
839 840
#endif

841
#ifdef CONFIG_X86_MCE_AMD
842
apicinterrupt DEFERRED_ERROR_VECTOR		deferred_error_interrupt	smp_deferred_error_interrupt
843 844
#endif

845
#ifdef CONFIG_X86_THERMAL_VECTOR
846
apicinterrupt THERMAL_APIC_VECTOR		thermal_interrupt		smp_thermal_interrupt
847
#endif
848

849
#ifdef CONFIG_SMP
850 851 852
apicinterrupt CALL_FUNCTION_SINGLE_VECTOR	call_function_single_interrupt	smp_call_function_single_interrupt
apicinterrupt CALL_FUNCTION_VECTOR		call_function_interrupt		smp_call_function_interrupt
apicinterrupt RESCHEDULE_VECTOR			reschedule_interrupt		smp_reschedule_interrupt
853
#endif
L
Linus Torvalds 已提交
854

855 856
apicinterrupt ERROR_APIC_VECTOR			error_interrupt			smp_error_interrupt
apicinterrupt SPURIOUS_APIC_VECTOR		spurious_interrupt		smp_spurious_interrupt
857

858
#ifdef CONFIG_IRQ_WORK
859
apicinterrupt IRQ_WORK_VECTOR			irq_work_interrupt		smp_irq_work_interrupt
I
Ingo Molnar 已提交
860 861
#endif

L
Linus Torvalds 已提交
862 863
/*
 * Exception entry points.
864
 */
865
#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8)
866

867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
.macro idtentry_part do_sym, has_error_code:req, paranoid:req, shift_ist=-1, ist_offset=0

	.if \paranoid
	call	paranoid_entry
	/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
	.else
	call	error_entry
	.endif
	UNWIND_HINT_REGS

	.if \paranoid
	.if \shift_ist != -1
	TRACE_IRQS_OFF_DEBUG			/* reload IDT in case of recursion */
	.else
	TRACE_IRQS_OFF
	.endif
	.endif

	movq	%rsp, %rdi			/* pt_regs pointer */

	.if \has_error_code
	movq	ORIG_RAX(%rsp), %rsi		/* get error code */
	movq	$-1, ORIG_RAX(%rsp)		/* no syscall to restart */
	.else
	xorl	%esi, %esi			/* no error code */
	.endif

	.if \shift_ist != -1
	subq	$\ist_offset, CPU_TSS_IST(\shift_ist)
	.endif

	call	\do_sym

	.if \shift_ist != -1
	addq	$\ist_offset, CPU_TSS_IST(\shift_ist)
	.endif

	.if \paranoid
	/* this procedure expect "no swapgs" flag in ebx */
	jmp	paranoid_exit
	.else
	jmp	error_exit
	.endif

.endm

913 914 915
/**
 * idtentry - Generate an IDT entry stub
 * @sym:		Name of the generated entry point
916 917 918
 * @do_sym:		C function to be called
 * @has_error_code:	True if this IDT vector has an error code on the stack
 * @paranoid:		non-zero means that this vector may be invoked from
919 920 921
 *			kernel mode with user GSBASE and/or user CR3.
 *			2 is special -- see below.
 * @shift_ist:		Set to an IST index if entries from kernel mode should
922
 *			decrement the IST stack so that nested entries get a
923
 *			fresh stack.  (This is for #DB, which has a nasty habit
924 925
 *			of recursing.)
 * @create_gap:		create a 6-word stack gap when coming from kernel mode.
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
 *
 * idtentry generates an IDT stub that sets up a usable kernel context,
 * creates struct pt_regs, and calls @do_sym.  The stub has the following
 * special behaviors:
 *
 * On an entry from user mode, the stub switches from the trampoline or
 * IST stack to the normal thread stack.  On an exit to user mode, the
 * normal exit-to-usermode path is invoked.
 *
 * On an exit to kernel mode, if @paranoid == 0, we check for preemption,
 * whereas we omit the preemption check if @paranoid != 0.  This is purely
 * because the implementation is simpler this way.  The kernel only needs
 * to check for asynchronous kernel preemption when IRQ handlers return.
 *
 * If @paranoid == 0, then the stub will handle IRET faults by pretending
 * that the fault came from user mode.  It will handle gs_change faults by
 * pretending that the fault happened with kernel GSBASE.  Since this handling
 * is omitted for @paranoid != 0, the #GP, #SS, and #NP stubs must have
 * @paranoid == 0.  This special handling will do the wrong thing for
 * espfix-induced #DF on IRET, so #DF must not use @paranoid == 0.
 *
 * @paranoid == 2 is special: the stub will never switch stacks.  This is for
 * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
 */
950
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0
951
ENTRY(\sym)
952
	UNWIND_HINT_IRET_REGS offset=\has_error_code*8
953

954
	/* Sanity check */
955
	.if \shift_ist != -1 && \paranoid != 1
956 957 958
	.error "using shift_ist requires paranoid=1"
	.endif

959 960 961 962
	.if \create_gap && \paranoid
	.error "using create_gap requires paranoid=0"
	.endif

963
	ASM_CLAC
964

965
	.if \has_error_code == 0
966
	pushq	$-1				/* ORIG_RAX: no syscall to restart */
967 968
	.endif

969
	.if \paranoid == 1
970
	testb	$3, CS-ORIG_RAX(%rsp)		/* If coming from userspace, switch stacks */
971
	jnz	.Lfrom_usermode_switch_stack_\@
972
	.endif
973

974 975 976 977 978 979 980 981 982 983 984 985 986 987
	.if \create_gap == 1
	/*
	 * If coming from kernel space, create a 6-word gap to allow the
	 * int3 handler to emulate a call instruction.
	 */
	testb	$3, CS-ORIG_RAX(%rsp)
	jnz	.Lfrom_usermode_no_gap_\@
	.rept	6
	pushq	5*8(%rsp)
	.endr
	UNWIND_HINT_IRET_REGS offset=8
.Lfrom_usermode_no_gap_\@:
	.endif

988
	idtentry_part \do_sym, \has_error_code, \paranoid, \shift_ist, \ist_offset
989

990
	.if \paranoid == 1
991
	/*
992
	 * Entry from userspace.  Switch stacks and treat it
993 994 995
	 * as a normal entry.  This means that paranoid handlers
	 * run in real process context if user_mode(regs).
	 */
996
.Lfrom_usermode_switch_stack_\@:
997
	idtentry_part \do_sym, \has_error_code, paranoid=0
998 999
	.endif

1000
_ASM_NOKPROBE(\sym)
1001
END(\sym)
1002
.endm
1003

1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
idtentry divide_error			do_divide_error			has_error_code=0
idtentry overflow			do_overflow			has_error_code=0
idtentry bounds				do_bounds			has_error_code=0
idtentry invalid_op			do_invalid_op			has_error_code=0
idtentry device_not_available		do_device_not_available		has_error_code=0
idtentry double_fault			do_double_fault			has_error_code=1 paranoid=2
idtentry coprocessor_segment_overrun	do_coprocessor_segment_overrun	has_error_code=0
idtentry invalid_TSS			do_invalid_TSS			has_error_code=1
idtentry segment_not_present		do_segment_not_present		has_error_code=1
idtentry spurious_interrupt_bug		do_spurious_interrupt_bug	has_error_code=0
idtentry coprocessor_error		do_coprocessor_error		has_error_code=0
idtentry alignment_check		do_alignment_check		has_error_code=1
idtentry simd_coprocessor_error		do_simd_coprocessor_error	has_error_code=0


	/*
	 * Reload gs selector with exception handling
	 * edi:  new selector
	 */
1023
ENTRY(native_load_gs_index)
1024
	FRAME_BEGIN
1025
	pushfq
1026
	DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1027
	TRACE_IRQS_OFF
1028
	SWAPGS
1029
.Lgs_change:
1030
	movl	%edi, %gs
1031
2:	ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
1032
	SWAPGS
1033
	TRACE_IRQS_FLAGS (%rsp)
1034
	popfq
1035
	FRAME_END
1036
	ret
1037
ENDPROC(native_load_gs_index)
1038
EXPORT_SYMBOL(native_load_gs_index)
1039

1040
	_ASM_EXTABLE(.Lgs_change, bad_gs)
1041
	.section .fixup, "ax"
L
Linus Torvalds 已提交
1042
	/* running with kernelgs */
1043
bad_gs:
1044
	SWAPGS					/* switch back to user gs */
1045 1046 1047 1048 1049 1050
.macro ZAP_GS
	/* This can't be a string because the preprocessor needs to see it. */
	movl $__USER_DS, %eax
	movl %eax, %gs
.endm
	ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
1051 1052 1053
	xorl	%eax, %eax
	movl	%eax, %gs
	jmp	2b
1054
	.previous
1055

1056
/* Call softirq on interrupt stack. Interrupts are off. */
1057
ENTRY(do_softirq_own_stack)
1058 1059
	pushq	%rbp
	mov	%rsp, %rbp
1060
	ENTER_IRQ_STACK regs=0 old_rsp=%r11
1061
	call	__do_softirq
1062
	LEAVE_IRQ_STACK regs=0
1063
	leaveq
1064
	ret
1065
ENDPROC(do_softirq_own_stack)
1066

1067
#ifdef CONFIG_XEN_PV
1068
idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
1069 1070

/*
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
 * A note on the "critical region" in our callback handler.
 * We want to avoid stacking callback handlers due to events occurring
 * during handling of the last event. To do this, we keep events disabled
 * until we've done all processing. HOWEVER, we must enable events before
 * popping the stack frame (can't be done atomically) and so it would still
 * be possible to get enough handler activations to overflow the stack.
 * Although unlikely, bugs of that kind are hard to track down, so we'd
 * like to avoid the possibility.
 * So, on entry to the handler we detect whether we interrupted an
 * existing activation in its critical region -- if so, we pop the current
 * activation and restart the handler using the previous one.
 */
1083 1084
ENTRY(xen_do_hypervisor_callback)		/* do_hypervisor_callback(struct *pt_regs) */

1085 1086 1087 1088
/*
 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
 * see the correct pointer to the pt_regs
 */
1089
	UNWIND_HINT_FUNC
1090
	movq	%rdi, %rsp			/* we don't return, adjust the stack frame */
1091
	UNWIND_HINT_REGS
1092 1093

	ENTER_IRQ_STACK old_rsp=%r10
1094
	call	xen_evtchn_do_upcall
1095 1096
	LEAVE_IRQ_STACK

1097
#ifndef CONFIG_PREEMPT
1098
	call	xen_maybe_preempt_hcall
1099
#endif
1100
	jmp	error_exit
1101
END(xen_do_hypervisor_callback)
1102 1103

/*
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
 * Hypervisor uses this for application faults while it executes.
 * We get here for two reasons:
 *  1. Fault while reloading DS, ES, FS or GS
 *  2. Fault while executing IRET
 * Category 1 we do not need to fix up as Xen has already reloaded all segment
 * registers that could be reloaded and zeroed the others.
 * Category 2 we fix up by killing the current process. We cannot use the
 * normal Linux return path in this case because if we use the IRET hypercall
 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 * We distinguish between categories by comparing each saved segment register
 * with its current contents: any discrepancy means we in category 1.
 */
1116
ENTRY(xen_failsafe_callback)
1117
	UNWIND_HINT_EMPTY
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
	movl	%ds, %ecx
	cmpw	%cx, 0x10(%rsp)
	jne	1f
	movl	%es, %ecx
	cmpw	%cx, 0x18(%rsp)
	jne	1f
	movl	%fs, %ecx
	cmpw	%cx, 0x20(%rsp)
	jne	1f
	movl	%gs, %ecx
	cmpw	%cx, 0x28(%rsp)
	jne	1f
1130
	/* All segments match their saved values => Category 2 (Bad IRET). */
1131 1132 1133 1134
	movq	(%rsp), %rcx
	movq	8(%rsp), %r11
	addq	$0x30, %rsp
	pushq	$0				/* RIP */
1135
	UNWIND_HINT_IRET_REGS offset=8
1136
	jmp	general_protection
1137
1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1138 1139 1140
	movq	(%rsp), %rcx
	movq	8(%rsp), %r11
	addq	$0x30, %rsp
1141
	UNWIND_HINT_IRET_REGS
1142
	pushq	$-1 /* orig_ax = -1 => not a system call */
1143
	PUSH_AND_CLEAR_REGS
1144
	ENCODE_FRAME_POINTER
1145
	jmp	error_exit
1146
END(xen_failsafe_callback)
1147
#endif /* CONFIG_XEN_PV */
1148

1149
#ifdef CONFIG_XEN_PVHVM
1150
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1151
	xen_hvm_callback_vector xen_evtchn_do_upcall
1152
#endif
1153

1154

1155
#if IS_ENABLED(CONFIG_HYPERV)
1156
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1157
	hyperv_callback_vector hyperv_vector_handler
1158 1159 1160

apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
	hyperv_reenlightenment_vector hyperv_reenlightenment_intr
1161 1162 1163

apicinterrupt3 HYPERV_STIMER0_VECTOR \
	hv_stimer0_callback_vector hv_stimer0_vector_handler
1164 1165
#endif /* CONFIG_HYPERV */

1166 1167 1168 1169 1170
#if IS_ENABLED(CONFIG_ACRN_GUEST)
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
	acrn_hv_callback_vector acrn_hv_vector_handler
#endif

1171
idtentry debug			do_debug		has_error_code=0	paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
1172
idtentry int3			do_int3			has_error_code=0	create_gap=1
1173 1174
idtentry stack_segment		do_stack_segment	has_error_code=1

1175
#ifdef CONFIG_XEN_PV
1176
idtentry xennmi			do_nmi			has_error_code=0
1177 1178
idtentry xendebug		do_debug		has_error_code=0
idtentry xenint3		do_int3			has_error_code=0
1179
#endif
1180 1181

idtentry general_protection	do_general_protection	has_error_code=1
1182
idtentry page_fault		do_page_fault		has_error_code=1
1183

G
Gleb Natapov 已提交
1184
#ifdef CONFIG_KVM_GUEST
1185
idtentry async_page_fault	do_async_page_fault	has_error_code=1
G
Gleb Natapov 已提交
1186
#endif
1187

1188
#ifdef CONFIG_X86_MCE
1189
idtentry machine_check		do_mce			has_error_code=0	paranoid=1
1190 1191
#endif

1192
/*
1193
 * Save all registers in pt_regs, and switch gs if needed.
1194 1195 1196 1197
 * Use slow, but surefire "are we in kernel?" check.
 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
 */
ENTRY(paranoid_entry)
1198
	UNWIND_HINT_FUNC
1199
	cld
1200 1201
	PUSH_AND_CLEAR_REGS save_ret=1
	ENCODE_FRAME_POINTER 8
1202 1203
	movl	$1, %ebx
	movl	$MSR_GS_BASE, %ecx
1204
	rdmsr
1205 1206
	testl	%edx, %edx
	js	1f				/* negative -> in kernel */
1207
	SWAPGS
1208
	xorl	%ebx, %ebx
1209 1210

1:
1211 1212
	/*
	 * Always stash CR3 in %r14.  This value will be restored,
1213 1214 1215
	 * verbatim, at exit.  Needed if paranoid_entry interrupted
	 * another entry that already switched to the user CR3 value
	 * but has not yet returned to userspace.
1216 1217 1218
	 *
	 * This is also why CS (stashed in the "iret frame" by the
	 * hardware at entry) can not be used: this may be a return
1219
	 * to kernel code, but with a user CR3 value.
1220
	 */
1221 1222 1223
	SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14

	ret
1224
END(paranoid_entry)
1225

1226 1227 1228 1229 1230 1231 1232 1233 1234
/*
 * "Paranoid" exit path from exception stack.  This is invoked
 * only on return from non-NMI IST interrupts that came
 * from kernel space.
 *
 * We may be returning to very strange contexts (e.g. very early
 * in syscall entry), so checking for preemption here would
 * be complicated.  Fortunately, we there's no good reason
 * to try to handle preemption here.
1235 1236
 *
 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
1237
 */
1238
ENTRY(paranoid_exit)
1239
	UNWIND_HINT_REGS
1240
	DISABLE_INTERRUPTS(CLBR_ANY)
1241
	TRACE_IRQS_OFF_DEBUG
1242
	testl	%ebx, %ebx			/* swapgs needed? */
1243
	jnz	.Lparanoid_exit_no_swapgs
1244
	TRACE_IRQS_IRETQ
1245
	/* Always restore stashed CR3 value (see paranoid_entry) */
P
Peter Zijlstra 已提交
1246
	RESTORE_CR3	scratch_reg=%rbx save_reg=%r14
1247
	SWAPGS_UNSAFE_STACK
1248 1249
	jmp	.Lparanoid_exit_restore
.Lparanoid_exit_no_swapgs:
1250
	TRACE_IRQS_IRETQ_DEBUG
1251
	/* Always restore stashed CR3 value (see paranoid_entry) */
1252
	RESTORE_CR3	scratch_reg=%rbx save_reg=%r14
1253 1254
.Lparanoid_exit_restore:
	jmp restore_regs_and_return_to_kernel
1255 1256 1257
END(paranoid_exit)

/*
1258
 * Save all registers in pt_regs, and switch GS if needed.
1259 1260
 */
ENTRY(error_entry)
1261
	UNWIND_HINT_FUNC
1262
	cld
1263 1264
	PUSH_AND_CLEAR_REGS save_ret=1
	ENCODE_FRAME_POINTER 8
1265
	testb	$3, CS+8(%rsp)
1266
	jz	.Lerror_kernelspace
1267

1268 1269 1270 1271
	/*
	 * We entered from user mode or we're pretending to have entered
	 * from user mode due to an IRET fault.
	 */
1272
	SWAPGS
1273 1274
	/* We have user CR3.  Change to kernel CR3. */
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1275

1276
.Lerror_entry_from_usermode_after_swapgs:
1277 1278 1279 1280 1281 1282 1283 1284
	/* Put us onto the real thread stack. */
	popq	%r12				/* save return addr in %12 */
	movq	%rsp, %rdi			/* arg0 = pt_regs pointer */
	call	sync_regs
	movq	%rax, %rsp			/* switch stack */
	ENCODE_FRAME_POINTER
	pushq	%r12

1285 1286 1287 1288 1289 1290
	/*
	 * We need to tell lockdep that IRQs are off.  We can't do this until
	 * we fix gsbase, and we should do it before enter_from_user_mode
	 * (which can take locks).
	 */
	TRACE_IRQS_OFF
1291
	CALL_enter_from_user_mode
1292
	ret
1293

1294
.Lerror_entry_done:
1295 1296 1297
	TRACE_IRQS_OFF
	ret

1298 1299 1300 1301 1302 1303
	/*
	 * There are two places in the kernel that can potentially fault with
	 * usergs. Handle them here.  B stepping K8s sometimes report a
	 * truncated RIP for IRET exceptions returning to compat mode. Check
	 * for these here too.
	 */
1304
.Lerror_kernelspace:
1305 1306
	leaq	native_irq_return_iret(%rip), %rcx
	cmpq	%rcx, RIP+8(%rsp)
1307
	je	.Lerror_bad_iret
1308 1309
	movl	%ecx, %eax			/* zero extend */
	cmpq	%rax, RIP+8(%rsp)
1310
	je	.Lbstep_iret
1311
	cmpq	$.Lgs_change, RIP+8(%rsp)
1312
	jne	.Lerror_entry_done
1313 1314

	/*
1315
	 * hack: .Lgs_change can fail with user gsbase.  If this happens, fix up
1316
	 * gsbase and proceed.  We'll fix up the exception and land in
1317
	 * .Lgs_change's error handler with kernel gsbase.
1318
	 */
1319
	SWAPGS
1320
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1321
	jmp .Lerror_entry_done
1322

1323
.Lbstep_iret:
1324
	/* Fix truncated RIP */
1325
	movq	%rcx, RIP+8(%rsp)
A
Andy Lutomirski 已提交
1326 1327
	/* fall through */

1328
.Lerror_bad_iret:
1329
	/*
1330 1331
	 * We came from an IRET to user mode, so we have user
	 * gsbase and CR3.  Switch to kernel gsbase and CR3:
1332
	 */
A
Andy Lutomirski 已提交
1333
	SWAPGS
1334
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1335 1336 1337

	/*
	 * Pretend that the exception came from user mode: set up pt_regs
1338
	 * as if we faulted immediately after IRET.
1339
	 */
1340 1341 1342
	mov	%rsp, %rdi
	call	fixup_bad_iret
	mov	%rax, %rsp
1343
	jmp	.Lerror_entry_from_usermode_after_swapgs
1344 1345 1346
END(error_entry)

ENTRY(error_exit)
1347
	UNWIND_HINT_REGS
1348
	DISABLE_INTERRUPTS(CLBR_ANY)
1349
	TRACE_IRQS_OFF
1350 1351
	testb	$3, CS(%rsp)
	jz	retint_kernel
1352
	jmp	retint_user
1353 1354
END(error_exit)

1355 1356 1357
/*
 * Runs on exception stack.  Xen PV does not go through this path at all,
 * so we can use real assembly here.
1358 1359 1360 1361
 *
 * Registers:
 *	%r14: Used to save/restore the CR3 of the interrupted context
 *	      when PAGE_TABLE_ISOLATION is in use.  Do not clobber.
1362
 */
1363
ENTRY(nmi)
1364
	UNWIND_HINT_IRET_REGS
1365

1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
	/*
	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
	 * the iretq it performs will take us out of NMI context.
	 * This means that we can have nested NMIs where the next
	 * NMI is using the top of the stack of the previous NMI. We
	 * can't let it execute because the nested NMI will corrupt the
	 * stack of the previous NMI. NMI handlers are not re-entrant
	 * anyway.
	 *
	 * To handle this case we do the following:
	 *  Check the a special location on the stack that contains
	 *  a variable that is set when NMIs are executing.
	 *  The interrupted task's stack is also checked to see if it
	 *  is an NMI stack.
	 *  If the variable is not set and the stack is not the NMI
	 *  stack then:
	 *    o Set the special variable on the stack
1383 1384 1385
	 *    o Copy the interrupt frame into an "outermost" location on the
	 *      stack
	 *    o Copy the interrupt frame into an "iret" location on the stack
1386 1387
	 *    o Continue processing the NMI
	 *  If the variable is set or the previous stack is the NMI stack:
1388
	 *    o Modify the "iret" location to jump to the repeat_nmi
1389 1390 1391 1392 1393 1394 1395 1396
	 *    o return back to the first NMI
	 *
	 * Now on exit of the first NMI, we first clear the stack variable
	 * The NMI stack will tell any nested NMIs at that point that it is
	 * nested. Then we pop the stack normally with iret, and if there was
	 * a nested NMI that updated the copy interrupt stack frame, a
	 * jump will be made to the repeat_nmi code that will handle the second
	 * NMI.
1397 1398 1399 1400 1401
	 *
	 * However, espfix prevents us from directly returning to userspace
	 * with a single IRET instruction.  Similarly, IRET to user mode
	 * can fault.  We therefore handle NMIs from user space like
	 * other IST entries.
1402 1403
	 */

1404 1405
	ASM_CLAC

1406
	/* Use %rdx as our temp variable throughout */
1407
	pushq	%rdx
1408

1409 1410 1411 1412 1413 1414 1415 1416 1417
	testb	$3, CS-RIP+8(%rsp)
	jz	.Lnmi_from_kernel

	/*
	 * NMI from user mode.  We need to run on the thread stack, but we
	 * can't go through the normal entry paths: NMIs are masked, and
	 * we don't want to enable interrupts, because then we'll end
	 * up in an awkward situation in which IRQs are on but NMIs
	 * are off.
1418 1419 1420
	 *
	 * We also must not push anything to the stack before switching
	 * stacks lest we corrupt the "NMI executing" variable.
1421 1422
	 */

1423
	swapgs
1424
	cld
1425
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1426 1427
	movq	%rsp, %rdx
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1428
	UNWIND_HINT_IRET_REGS base=%rdx offset=8
1429 1430 1431 1432 1433
	pushq	5*8(%rdx)	/* pt_regs->ss */
	pushq	4*8(%rdx)	/* pt_regs->rsp */
	pushq	3*8(%rdx)	/* pt_regs->flags */
	pushq	2*8(%rdx)	/* pt_regs->cs */
	pushq	1*8(%rdx)	/* pt_regs->rip */
1434
	UNWIND_HINT_IRET_REGS
1435
	pushq   $-1		/* pt_regs->orig_ax */
1436
	PUSH_AND_CLEAR_REGS rdx=(%rdx)
1437
	ENCODE_FRAME_POINTER
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448

	/*
	 * At this point we no longer need to worry about stack damage
	 * due to nesting -- we're on the normal thread stack and we're
	 * done with the NMI stack.
	 */

	movq	%rsp, %rdi
	movq	$-1, %rsi
	call	do_nmi

1449
	/*
1450
	 * Return back to user mode.  We must *not* do the normal exit
1451
	 * work, because we don't want to enable interrupts.
1452
	 */
1453
	jmp	swapgs_restore_regs_and_return_to_usermode
1454

1455
.Lnmi_from_kernel:
1456
	/*
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496
	 * Here's what our stack frame will look like:
	 * +---------------------------------------------------------+
	 * | original SS                                             |
	 * | original Return RSP                                     |
	 * | original RFLAGS                                         |
	 * | original CS                                             |
	 * | original RIP                                            |
	 * +---------------------------------------------------------+
	 * | temp storage for rdx                                    |
	 * +---------------------------------------------------------+
	 * | "NMI executing" variable                                |
	 * +---------------------------------------------------------+
	 * | iret SS          } Copied from "outermost" frame        |
	 * | iret Return RSP  } on each loop iteration; overwritten  |
	 * | iret RFLAGS      } by a nested NMI to force another     |
	 * | iret CS          } iteration if needed.                 |
	 * | iret RIP         }                                      |
	 * +---------------------------------------------------------+
	 * | outermost SS          } initialized in first_nmi;       |
	 * | outermost Return RSP  } will not be changed before      |
	 * | outermost RFLAGS      } NMI processing is done.         |
	 * | outermost CS          } Copied to "iret" frame on each  |
	 * | outermost RIP         } iteration.                      |
	 * +---------------------------------------------------------+
	 * | pt_regs                                                 |
	 * +---------------------------------------------------------+
	 *
	 * The "original" frame is used by hardware.  Before re-enabling
	 * NMIs, we need to be done with it, and we need to leave enough
	 * space for the asm code here.
	 *
	 * We return by executing IRET while RSP points to the "iret" frame.
	 * That will either return for real or it will loop back into NMI
	 * processing.
	 *
	 * The "outermost" frame is copied to the "iret" frame on each
	 * iteration of the loop, so each iteration starts with the "iret"
	 * frame pointing to the final return target.
	 */

1497
	/*
1498 1499
	 * Determine whether we're a nested NMI.
	 *
1500 1501 1502 1503 1504 1505
	 * If we interrupted kernel code between repeat_nmi and
	 * end_repeat_nmi, then we are a nested NMI.  We must not
	 * modify the "iret" frame because it's being written by
	 * the outer NMI.  That's okay; the outer NMI handler is
	 * about to about to call do_nmi anyway, so we can just
	 * resume the outer NMI.
1506
	 */
1507 1508 1509 1510 1511 1512 1513 1514

	movq	$repeat_nmi, %rdx
	cmpq	8(%rsp), %rdx
	ja	1f
	movq	$end_repeat_nmi, %rdx
	cmpq	8(%rsp), %rdx
	ja	nested_nmi_out
1:
1515

1516
	/*
1517
	 * Now check "NMI executing".  If it's set, then we're nested.
1518 1519
	 * This will not detect if we interrupted an outer NMI just
	 * before IRET.
1520
	 */
1521 1522
	cmpl	$1, -8(%rsp)
	je	nested_nmi
1523 1524

	/*
1525 1526
	 * Now test if the previous stack was an NMI stack.  This covers
	 * the case where we interrupt an outer NMI after it clears
1527 1528 1529 1530 1531 1532 1533 1534
	 * "NMI executing" but before IRET.  We need to be careful, though:
	 * there is one case in which RSP could point to the NMI stack
	 * despite there being no NMI active: naughty userspace controls
	 * RSP at the very beginning of the SYSCALL targets.  We can
	 * pull a fast one on naughty userspace, though: we program
	 * SYSCALL to mask DF, so userspace cannot cause DF to be set
	 * if it controls the kernel's RSP.  We set DF before we clear
	 * "NMI executing".
1535
	 */
1536 1537 1538 1539 1540
	lea	6*8(%rsp), %rdx
	/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
	cmpq	%rdx, 4*8(%rsp)
	/* If the stack pointer is above the NMI stack, this is a normal NMI */
	ja	first_nmi
1541

1542 1543 1544 1545
	subq	$EXCEPTION_STKSZ, %rdx
	cmpq	%rdx, 4*8(%rsp)
	/* If it is below the NMI stack, it is a normal NMI */
	jb	first_nmi
1546 1547 1548 1549 1550 1551 1552

	/* Ah, it is within the NMI stack. */

	testb	$(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
	jz	first_nmi	/* RSP was user controlled. */

	/* This is a nested NMI. */
1553

1554 1555
nested_nmi:
	/*
1556 1557
	 * Modify the "iret" frame to point to repeat_nmi, forcing another
	 * iteration of NMI handling.
1558
	 */
1559
	subq	$8, %rsp
1560 1561 1562
	leaq	-10*8(%rsp), %rdx
	pushq	$__KERNEL_DS
	pushq	%rdx
1563
	pushfq
1564 1565
	pushq	$__KERNEL_CS
	pushq	$repeat_nmi
1566 1567

	/* Put stack back */
1568
	addq	$(6*8), %rsp
1569 1570

nested_nmi_out:
1571
	popq	%rdx
1572

1573
	/* We are returning to kernel mode, so this cannot result in a fault. */
1574
	iretq
1575 1576

first_nmi:
1577
	/* Restore rdx. */
1578
	movq	(%rsp), %rdx
1579

1580 1581
	/* Make room for "NMI executing". */
	pushq	$0
1582

1583
	/* Leave room for the "iret" frame */
1584
	subq	$(5*8), %rsp
1585

1586
	/* Copy the "original" frame to the "outermost" frame */
1587
	.rept 5
1588
	pushq	11*8(%rsp)
1589
	.endr
1590
	UNWIND_HINT_IRET_REGS
1591

1592 1593
	/* Everything up to here is safe from nested NMIs */

1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
#ifdef CONFIG_DEBUG_ENTRY
	/*
	 * For ease of testing, unmask NMIs right away.  Disabled by
	 * default because IRET is very expensive.
	 */
	pushq	$0		/* SS */
	pushq	%rsp		/* RSP (minus 8 because of the previous push) */
	addq	$8, (%rsp)	/* Fix up RSP */
	pushfq			/* RFLAGS */
	pushq	$__KERNEL_CS	/* CS */
	pushq	$1f		/* RIP */
1605
	iretq			/* continues at repeat_nmi below */
1606
	UNWIND_HINT_IRET_REGS
1607 1608 1609
1:
#endif

1610
repeat_nmi:
1611 1612 1613 1614 1615 1616 1617 1618
	/*
	 * If there was a nested NMI, the first NMI's iret will return
	 * here. But NMIs are still enabled and we can take another
	 * nested NMI. The nested NMI checks the interrupted RIP to see
	 * if it is between repeat_nmi and end_repeat_nmi, and if so
	 * it will just return, as we are about to repeat an NMI anyway.
	 * This makes it safe to copy to the stack frame that a nested
	 * NMI will update.
1619 1620 1621 1622
	 *
	 * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
	 * we're repeating an NMI, gsbase has the same value that it had on
	 * the first iteration.  paranoid_entry will load the kernel
1623 1624
	 * gsbase if needed before we call do_nmi.  "NMI executing"
	 * is zero.
1625
	 */
1626
	movq	$1, 10*8(%rsp)		/* Set "NMI executing". */
1627

1628
	/*
1629 1630 1631
	 * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
	 * here must not modify the "iret" frame while we're writing to
	 * it or it will end up containing garbage.
1632
	 */
1633
	addq	$(10*8), %rsp
1634
	.rept 5
1635
	pushq	-6*8(%rsp)
1636
	.endr
1637
	subq	$(5*8), %rsp
1638
end_repeat_nmi:
1639 1640

	/*
1641 1642 1643
	 * Everything below this point can be preempted by a nested NMI.
	 * If this happens, then the inner NMI will change the "iret"
	 * frame to point back to repeat_nmi.
1644
	 */
1645
	pushq	$-1				/* ORIG_RAX: no syscall to restart */
1646

1647
	/*
1648
	 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1649 1650 1651 1652 1653
	 * as we should not be calling schedule in NMI context.
	 * Even with normal interrupts enabled. An NMI should not be
	 * setting NEED_RESCHED or anything that normal interrupts and
	 * exceptions might do.
	 */
1654
	call	paranoid_entry
1655
	UNWIND_HINT_REGS
1656

1657
	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1658 1659 1660
	movq	%rsp, %rdi
	movq	$-1, %rsi
	call	do_nmi
1661

1662
	/* Always restore stashed CR3 value (see paranoid_entry) */
P
Peter Zijlstra 已提交
1663
	RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
1664

1665 1666
	testl	%ebx, %ebx			/* swapgs needed? */
	jnz	nmi_restore
1667 1668 1669
nmi_swapgs:
	SWAPGS_UNSAFE_STACK
nmi_restore:
1670
	POP_REGS
1671

1672 1673 1674 1675 1676
	/*
	 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
	 * at the "iret" frame.
	 */
	addq	$6*8, %rsp
1677

1678 1679 1680
	/*
	 * Clear "NMI executing".  Set DF first so that we can easily
	 * distinguish the remaining code between here and IRET from
1681 1682 1683 1684 1685
	 * the SYSCALL entry and exit paths.
	 *
	 * We arguably should just inspect RIP instead, but I (Andy) wrote
	 * this code when I had the misapprehension that Xen PV supported
	 * NMIs, and Xen PV would break that approach.
1686 1687 1688
	 */
	std
	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
1689 1690

	/*
1691 1692 1693 1694
	 * iretq reads the "iret" frame and exits the NMI stack in a
	 * single instruction.  We are returning to kernel mode, so this
	 * cannot result in a fault.  Similarly, we don't need to worry
	 * about espfix64 on the way back to kernel mode.
1695
	 */
1696
	iretq
1697 1698
END(nmi)

1699 1700 1701 1702 1703
#ifndef CONFIG_IA32_EMULATION
/*
 * This handles SYSCALL from 32-bit code.  There is no way to program
 * MSRs to fully disable 32-bit SYSCALL.
 */
1704
ENTRY(ignore_sysret)
1705
	UNWIND_HINT_EMPTY
1706
	mov	$-ENOSYS, %eax
1707 1708
	sysret
END(ignore_sysret)
1709
#endif
1710 1711

ENTRY(rewind_stack_do_exit)
1712
	UNWIND_HINT_FUNC
1713 1714 1715 1716
	/* Prevent any naive code from trying to unwind to our caller. */
	xorl	%ebp, %ebp

	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rax
1717 1718
	leaq	-PTREGS_SIZE(%rax), %rsp
	UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
1719 1720 1721

	call	do_exit
END(rewind_stack_do_exit)