entry_32.S 26.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2
/*
3
 *  Copyright (C) 1991,1992  Linus Torvalds
L
Linus Torvalds 已提交
4
 *
5
 * entry_32.S contains the system-call and low-level fault and trap handling routines.
L
Linus Torvalds 已提交
6
 *
7
 * Stack layout while running C code:
8 9 10
 *	ptrace needs to have all registers on the stack.
 *	If the order here is changed, it needs to be
 *	updated in fork.c:copy_process(), signal.c:do_signal(),
L
Linus Torvalds 已提交
11 12 13 14 15
 *	ptrace.c and ptrace.h
 *
 *	 0(%esp) - %ebx
 *	 4(%esp) - %ecx
 *	 8(%esp) - %edx
16
 *	 C(%esp) - %esi
L
Linus Torvalds 已提交
17 18 19 20 21
 *	10(%esp) - %edi
 *	14(%esp) - %ebp
 *	18(%esp) - %eax
 *	1C(%esp) - %ds
 *	20(%esp) - %es
22
 *	24(%esp) - %fs
23 24 25 26 27 28 29
 *	28(%esp) - %gs		saved iff !CONFIG_X86_32_LAZY_GS
 *	2C(%esp) - orig_eax
 *	30(%esp) - %eip
 *	34(%esp) - %cs
 *	38(%esp) - %eflags
 *	3C(%esp) - %oldesp
 *	40(%esp) - %oldss
L
Linus Torvalds 已提交
30 31 32
 */

#include <linux/linkage.h>
33
#include <linux/err.h>
L
Linus Torvalds 已提交
34
#include <asm/thread_info.h>
35
#include <asm/irqflags.h>
L
Linus Torvalds 已提交
36 37 38
#include <asm/errno.h>
#include <asm/segment.h>
#include <asm/smp.h>
S
Stas Sergeev 已提交
39
#include <asm/percpu.h>
40
#include <asm/processor-flags.h>
41
#include <asm/irq_vectors.h>
42
#include <asm/cpufeatures.h>
43
#include <asm/alternative-asm.h>
44
#include <asm/asm.h>
45
#include <asm/smap.h>
46
#include <asm/frame.h>
47
#include <asm/nospec-branch.h>
L
Linus Torvalds 已提交
48

J
Jiri Olsa 已提交
49 50
	.section .entry.text, "ax"

51 52 53 54 55
/*
 * We use macros for low-level operations which need to be overridden
 * for paravirtualization.  The following will never clobber any registers:
 *   INTERRUPT_RETURN (aka. "iret")
 *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
56
 *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
57 58 59 60 61 62 63
 *
 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
 * Allowing a register to be clobbered can shrink the paravirt replacement
 * enough to patch inline, increasing performance.
 */

L
Linus Torvalds 已提交
64
#ifdef CONFIG_PREEMPT
65
# define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
L
Linus Torvalds 已提交
66
#else
67 68
# define preempt_stop(clobbers)
# define resume_kernel		restore_all
L
Linus Torvalds 已提交
69 70
#endif

71 72
.macro TRACE_IRQS_IRET
#ifdef CONFIG_TRACE_IRQFLAGS
73 74
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)     # interrupts off?
	jz	1f
75 76 77 78 79
	TRACE_IRQS_ON
1:
#endif
.endm

80 81 82 83 84 85 86 87 88 89 90 91 92
/*
 * User gs save/restore
 *
 * %gs is used for userland TLS and kernel only uses it for stack
 * canary which is required to be at %gs:20 by gcc.  Read the comment
 * at the top of stackprotector.h for more info.
 *
 * Local labels 98 and 99 are used.
 */
#ifdef CONFIG_X86_32_LAZY_GS

 /* unfortunately push/pop can't be no-op */
.macro PUSH_GS
93
	pushl	$0
94 95
.endm
.macro POP_GS pop=0
96
	addl	$(4 + \pop), %esp
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
.endm
.macro POP_GS_EX
.endm

 /* all the rest are no-op */
.macro PTGS_TO_GS
.endm
.macro PTGS_TO_GS_EX
.endm
.macro GS_TO_REG reg
.endm
.macro REG_TO_PTGS reg
.endm
.macro SET_KERNEL_GS reg
.endm

#else	/* CONFIG_X86_32_LAZY_GS */

.macro PUSH_GS
116
	pushl	%gs
117 118 119
.endm

.macro POP_GS pop=0
120
98:	popl	%gs
121
  .if \pop <> 0
122
	add	$\pop, %esp
123 124 125 126
  .endif
.endm
.macro POP_GS_EX
.pushsection .fixup, "ax"
127 128
99:	movl	$0, (%esp)
	jmp	98b
129
.popsection
130
	_ASM_EXTABLE(98b, 99b)
131 132 133
.endm

.macro PTGS_TO_GS
134
98:	mov	PT_GS(%esp), %gs
135 136 137
.endm
.macro PTGS_TO_GS_EX
.pushsection .fixup, "ax"
138 139
99:	movl	$0, PT_GS(%esp)
	jmp	98b
140
.popsection
141
	_ASM_EXTABLE(98b, 99b)
142 143 144
.endm

.macro GS_TO_REG reg
145
	movl	%gs, \reg
146 147
.endm
.macro REG_TO_PTGS reg
148
	movl	\reg, PT_GS(%esp)
149 150
.endm
.macro SET_KERNEL_GS reg
151 152
	movl	$(__KERNEL_STACK_CANARY), \reg
	movl	\reg, %gs
153 154
.endm

155
#endif /* CONFIG_X86_32_LAZY_GS */
156

157
.macro SAVE_ALL pt_regs_ax=%eax
158
	cld
159
	PUSH_GS
160 161 162
	pushl	%fs
	pushl	%es
	pushl	%ds
163
	pushl	\pt_regs_ax
164 165 166 167 168 169 170 171 172 173 174
	pushl	%ebp
	pushl	%edi
	pushl	%esi
	pushl	%edx
	pushl	%ecx
	pushl	%ebx
	movl	$(__USER_DS), %edx
	movl	%edx, %ds
	movl	%edx, %es
	movl	$(__KERNEL_PERCPU), %edx
	movl	%edx, %fs
175
	SET_KERNEL_GS %edx
176
.endm
L
Linus Torvalds 已提交
177

178 179 180
/*
 * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
 * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
181
 * is just clearing the MSB, which makes it an invalid stack address and is also
182 183 184 185 186 187 188 189
 * a signal to the unwinder that it's a pt_regs pointer in disguise.
 *
 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
 * original rbp.
 */
.macro ENCODE_FRAME_POINTER
#ifdef CONFIG_FRAME_POINTER
	mov %esp, %ebp
190
	andl $0x7fffffff, %ebp
191 192 193
#endif
.endm

194
.macro RESTORE_INT_REGS
195 196 197 198 199 200 201
	popl	%ebx
	popl	%ecx
	popl	%edx
	popl	%esi
	popl	%edi
	popl	%ebp
	popl	%eax
202
.endm
L
Linus Torvalds 已提交
203

204
.macro RESTORE_REGS pop=0
205
	RESTORE_INT_REGS
206 207 208
1:	popl	%ds
2:	popl	%es
3:	popl	%fs
209
	POP_GS \pop
210
.pushsection .fixup, "ax"
211 212 213 214 215 216
4:	movl	$0, (%esp)
	jmp	1b
5:	movl	$0, (%esp)
	jmp	2b
6:	movl	$0, (%esp)
	jmp	3b
217
.popsection
218 219 220
	_ASM_EXTABLE(1b, 4b)
	_ASM_EXTABLE(2b, 5b)
	_ASM_EXTABLE(3b, 6b)
221
	POP_GS_EX
222
.endm
L
Linus Torvalds 已提交
223

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
/*
 * %eax: prev task
 * %edx: next task
 */
ENTRY(__switch_to_asm)
	/*
	 * Save callee-saved registers
	 * This must match the order in struct inactive_task_frame
	 */
	pushl	%ebp
	pushl	%ebx
	pushl	%edi
	pushl	%esi

	/* switch stack */
	movl	%esp, TASK_threadsp(%eax)
	movl	TASK_threadsp(%edx), %esp

242
#ifdef CONFIG_STACKPROTECTOR
243 244 245 246
	movl	TASK_stack_canary(%edx), %ebx
	movl	%ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
#endif

247 248 249 250 251 252 253 254
#ifdef CONFIG_RETPOLINE
	/*
	 * When switching from a shallower to a deeper call stack
	 * the RSB may either underflow or use entries populated
	 * with userspace addresses. On CPUs where those concerns
	 * exist, overwrite the RSB with entries which capture
	 * speculative execution to prevent attack.
	 */
255
	FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
256 257
#endif

258 259 260 261 262 263 264 265 266
	/* restore callee-saved registers */
	popl	%esi
	popl	%edi
	popl	%ebx
	popl	%ebp

	jmp	__switch_to
END(__switch_to_asm)

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
/*
 * The unwinder expects the last frame on the stack to always be at the same
 * offset from the end of the page, which allows it to validate the stack.
 * Calling schedule_tail() directly would break that convention because its an
 * asmlinkage function so its argument has to be pushed on the stack.  This
 * wrapper creates a proper "end of stack" frame header before the call.
 */
ENTRY(schedule_tail_wrapper)
	FRAME_BEGIN

	pushl	%eax
	call	schedule_tail
	popl	%eax

	FRAME_END
	ret
ENDPROC(schedule_tail_wrapper)
284 285 286 287
/*
 * A newly forked process directly context switches into this address.
 *
 * eax: prev task we switched from
288 289
 * ebx: kernel thread func (NULL for user thread)
 * edi: kernel thread arg
290
 */
L
Linus Torvalds 已提交
291
ENTRY(ret_from_fork)
292
	call	schedule_tail_wrapper
293

294 295 296 297
	testl	%ebx, %ebx
	jnz	1f		/* kernel threads are uncommon */

2:
298
	/* When we fork, we trace the syscall return in the child, too. */
299
	movl    %esp, %eax
300 301 302
	call    syscall_return_slowpath
	jmp     restore_all

303 304
	/* kernel thread */
1:	movl	%edi, %eax
305
	CALL_NOSPEC %ebx
306
	/*
307 308 309
	 * A kernel thread is allowed to return here after successfully
	 * calling do_execve().  Exit to userspace to complete the execve()
	 * syscall.
310
	 */
311 312 313
	movl	$0, PT_EAX(%esp)
	jmp	2b
END(ret_from_fork)
314

L
Linus Torvalds 已提交
315 316 317 318 319 320 321 322 323 324
/*
 * Return to user mode is not as complex as all this looks,
 * but we want the default path for a system call return to
 * go as quickly as possible which is why some of this is
 * less clear than it otherwise should be.
 */

	# userspace resumption stub bypassing syscall exit tracing
	ALIGN
ret_from_exception:
325
	preempt_stop(CLBR_ANY)
L
Linus Torvalds 已提交
326
ret_from_intr:
327
#ifdef CONFIG_VM86
328 329 330
	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS and CS
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
331 332
#else
	/*
333
	 * We can be coming here from child spawned by kernel_thread().
334
	 */
335 336
	movl	PT_CS(%esp), %eax
	andl	$SEGMENT_RPL_MASK, %eax
337
#endif
338 339
	cmpl	$USER_RPL, %eax
	jb	resume_kernel			# not returning to v8086 or userspace
340

L
Linus Torvalds 已提交
341
ENTRY(resume_userspace)
342
	DISABLE_INTERRUPTS(CLBR_ANY)
343
	TRACE_IRQS_OFF
344 345
	movl	%esp, %eax
	call	prepare_exit_to_usermode
346
	jmp	restore_all
347
END(ret_from_exception)
L
Linus Torvalds 已提交
348 349 350

#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
351
	DISABLE_INTERRUPTS(CLBR_ANY)
352
.Lneed_resched:
353 354 355 356 357
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	jnz	restore_all
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
	jz	restore_all
	call	preempt_schedule_irq
358
	jmp	.Lneed_resched
359
END(resume_kernel)
L
Linus Torvalds 已提交
360 361
#endif

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
GLOBAL(__begin_SYSENTER_singlestep_region)
/*
 * All code from here through __end_SYSENTER_singlestep_region is subject
 * to being single-stepped if a user program sets TF and executes SYSENTER.
 * There is absolutely nothing that we can do to prevent this from happening
 * (thanks Intel!).  To keep our handling of this situation as simple as
 * possible, we handle TF just like AC and NT, except that our #DB handler
 * will ignore all of the single-step traps generated in this range.
 */

#ifdef CONFIG_XEN
/*
 * Xen doesn't set %esp to be precisely what the normal SYSENTER
 * entry point expects, so fix it up before using the normal path.
 */
ENTRY(xen_sysenter_target)
	addl	$5*4, %esp			/* remove xen-provided frame */
379
	jmp	.Lsysenter_past_esp
380 381
#endif

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
/*
 * 32-bit SYSENTER entry.
 *
 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
 * if X86_FEATURE_SEP is available.  This is the preferred system call
 * entry on 32-bit systems.
 *
 * The SYSENTER instruction, in principle, should *only* occur in the
 * vDSO.  In practice, a small number of Android devices were shipped
 * with a copy of Bionic that inlined a SYSENTER instruction.  This
 * never happened in any of Google's Bionic versions -- it only happened
 * in a narrow range of Intel-provided versions.
 *
 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
 * SYSENTER does not save anything on the stack,
 * and does not save old EIP (!!!), ESP, or EFLAGS.
 *
 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
 * user and/or vm86 state), we explicitly disable the SYSENTER
 * instruction in vm86 mode by reprogramming the MSRs.
 *
 * Arguments:
 * eax  system call number
 * ebx  arg1
 * ecx  arg2
 * edx  arg3
 * esi  arg4
 * edi  arg5
 * ebp  user stack
 * 0(%ebp) arg6
 */
414
ENTRY(entry_SYSENTER_32)
415
	movl	TSS_sysenter_sp0(%esp), %esp
416
.Lsysenter_past_esp:
417
	pushl	$__USER_DS		/* pt_regs->ss */
418
	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
419 420 421 422 423 424 425
	pushfl				/* pt_regs->flags (except IF = 0) */
	orl	$X86_EFLAGS_IF, (%esp)	/* Fix IF */
	pushl	$__USER_CS		/* pt_regs->cs */
	pushl	$0			/* pt_regs->ip = 0 (placeholder) */
	pushl	%eax			/* pt_regs->orig_ax */
	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */

426
	/*
427 428
	 * SYSENTER doesn't filter flags, so we need to clear NT, AC
	 * and TF ourselves.  To save a few cycles, we can check whether
429 430 431 432
	 * either was set instead of doing an unconditional popfq.
	 * This needs to happen before enabling interrupts so that
	 * we don't get preempted with NT set.
	 *
433 434 435 436 437 438
	 * If TF is set, we will single-step all the way to here -- do_debug
	 * will ignore all the traps.  (Yes, this is slow, but so is
	 * single-stepping in general.  This allows us to avoid having
	 * a more complicated code to handle the case where a user program
	 * forces us to single-step through the SYSENTER entry code.)
	 *
439 440 441 442 443 444
	 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
	 * out-of-line as an optimization: NT is unlikely to be set in the
	 * majority of the cases and instead of polluting the I$ unnecessarily,
	 * we're keeping that code behind a branch which will predict as
	 * not-taken and therefore its instructions won't be fetched.
	 */
445
	testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
446 447 448
	jnz	.Lsysenter_fix_flags
.Lsysenter_flags_fixed:

449
	/*
450 451
	 * User mode is traced as though IRQs are on, and SYSENTER
	 * turned them off.
452
	 */
453
	TRACE_IRQS_OFF
454 455 456

	movl	%esp, %eax
	call	do_fast_syscall_32
457 458 459
	/* XEN PV guests always use IRET path */
	ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
		    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
460 461 462 463 464

/* Opportunistic SYSEXIT */
	TRACE_IRQS_ON			/* User mode traces as IRQs on. */
	movl	PT_EIP(%esp), %edx	/* pt_regs->ip */
	movl	PT_OLDESP(%esp), %ecx	/* pt_regs->sp */
465 466
1:	mov	PT_FS(%esp), %fs
	PTGS_TO_GS
467 468 469 470 471 472 473
	popl	%ebx			/* pt_regs->bx */
	addl	$2*4, %esp		/* skip pt_regs->cx and pt_regs->dx */
	popl	%esi			/* pt_regs->si */
	popl	%edi			/* pt_regs->di */
	popl	%ebp			/* pt_regs->bp */
	popl	%eax			/* pt_regs->ax */

474 475 476 477 478 479 480 481 482
	/*
	 * Restore all flags except IF. (We restore IF separately because
	 * STI gives a one-instruction window in which we won't be interrupted,
	 * whereas POPF does not.)
	 */
	addl	$PT_EFLAGS-PT_DS, %esp	/* point esp at pt_regs->flags */
	btr	$X86_EFLAGS_IF_BIT, (%esp)
	popfl

483 484 485 486
	/*
	 * Return back to the vDSO, which will pop ecx and edx.
	 * Don't bother with DS and ES (they already contain __USER_DS).
	 */
487 488
	sti
	sysexit
R
Roland McGrath 已提交
489

490 491 492
.pushsection .fixup, "ax"
2:	movl	$0, PT_FS(%esp)
	jmp	1b
493
.popsection
494
	_ASM_EXTABLE(1b, 2b)
495
	PTGS_TO_GS_EX
496 497 498 499 500

.Lsysenter_fix_flags:
	pushl	$X86_EFLAGS_FIXED
	popfl
	jmp	.Lsysenter_flags_fixed
501
GLOBAL(__end_SYSENTER_singlestep_region)
502
ENDPROC(entry_SYSENTER_32)
L
Linus Torvalds 已提交
503

504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
/*
 * 32-bit legacy system call entry.
 *
 * 32-bit x86 Linux system calls traditionally used the INT $0x80
 * instruction.  INT $0x80 lands here.
 *
 * This entry point can be used by any 32-bit perform system calls.
 * Instances of INT $0x80 can be found inline in various programs and
 * libraries.  It is also used by the vDSO's __kernel_vsyscall
 * fallback for hardware that doesn't support a faster entry method.
 * Restarted 32-bit system calls also fall back to INT $0x80
 * regardless of what instruction was originally used to do the system
 * call.  (64-bit programs can use INT $0x80 as well, but they can
 * only run on 64-bit kernels and therefore land in
 * entry_INT80_compat.)
 *
 * This is considered a slow path.  It is not used by most libc
 * implementations on modern hardware except during process startup.
 *
 * Arguments:
 * eax  system call number
 * ebx  arg1
 * ecx  arg2
 * edx  arg3
 * esi  arg4
 * edi  arg5
 * ebp  arg6
 */
532
ENTRY(entry_INT80_32)
533
	ASM_CLAC
534
	pushl	%eax			/* pt_regs->orig_ax */
535
	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */
536 537

	/*
538 539
	 * User mode is traced as though IRQs are on, and the interrupt gate
	 * turned them off.
540
	 */
541
	TRACE_IRQS_OFF
542 543

	movl	%esp, %eax
544
	call	do_int80_syscall_32
545
.Lsyscall_32_done:
L
Linus Torvalds 已提交
546 547

restore_all:
548
	TRACE_IRQS_IRET
549
.Lrestore_all_notrace:
550
#ifdef CONFIG_X86_ESPFIX32
551
	ALTERNATIVE	"jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
552

553 554 555 556 557 558 559 560 561 562
	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
	/*
	 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
	 * are returning to the kernel.
	 * See comments in process.c:copy_thread() for details.
	 */
	movb	PT_OLDSS(%esp), %ah
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
563
	je .Lldt_ss				# returning to user-space with LDT SS
564
#endif
565
.Lrestore_nocheck:
566
	RESTORE_REGS 4				# skip orig_eax/error_code
567
.Lirq_return:
568 569 570 571 572
	/*
	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
	 * when returning from IPI handler and when returning from
	 * scheduler to user-space.
	 */
I
Ingo Molnar 已提交
573
	INTERRUPT_RETURN
574

575 576 577 578
.section .fixup, "ax"
ENTRY(iret_exc	)
	pushl	$0				# no error code
	pushl	$do_iret_error
579
	jmp	common_exception
L
Linus Torvalds 已提交
580
.previous
581
	_ASM_EXTABLE(.Lirq_return, iret_exc)
L
Linus Torvalds 已提交
582

583
#ifdef CONFIG_X86_ESPFIX32
584
.Lldt_ss:
585 586 587 588 589 590 591 592 593 594 595
/*
 * Setup and switch to ESPFIX stack
 *
 * We're returning to userspace with a 16 bit stack. The CPU will not
 * restore the high word of ESP for us on executing iret... This is an
 * "official" bug of all the x86-compatible CPUs, which we can work
 * around to make dosemu and wine happy. We do this by preloading the
 * high word of ESP with the high word of the userspace ESP while
 * compensating for the offset by changing to the ESPFIX segment with
 * a base address that matches for the difference.
 */
596
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
597 598 599
	mov	%esp, %edx			/* load kernel esp */
	mov	PT_OLDESP(%esp), %eax		/* load userspace esp */
	mov	%dx, %ax			/* eax: new kernel esp */
600 601
	sub	%eax, %edx			/* offset (low word is 0) */
	shr	$16, %edx
602 603 604 605 606 607
	mov	%dl, GDT_ESPFIX_SS + 4		/* bits 16..23 */
	mov	%dh, GDT_ESPFIX_SS + 7		/* bits 24..31 */
	pushl	$__ESPFIX_SS
	pushl	%eax				/* new kernel esp */
	/*
	 * Disable interrupts, but do not irqtrace this section: we
608
	 * will soon execute iret and the tracer was already set to
609 610
	 * the irqstate after the IRET:
	 */
611
	DISABLE_INTERRUPTS(CLBR_ANY)
612
	lss	(%esp), %esp			/* switch to espfix segment */
613
	jmp	.Lrestore_nocheck
614
#endif
615
ENDPROC(entry_INT80_32)
L
Linus Torvalds 已提交
616

617
.macro FIXUP_ESPFIX_STACK
618 619 620 621 622 623 624
/*
 * Switch back for ESPFIX stack to the normal zerobased stack
 *
 * We can't call C functions using the ESPFIX stack. This code reads
 * the high word of the segment base from the GDT and swiches to the
 * normal stack and adjusts ESP with the matching offset.
 */
625
#ifdef CONFIG_X86_ESPFIX32
626
	/* fixup the stack */
627 628
	mov	GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
	mov	GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
629
	shl	$16, %eax
630 631 632 633
	addl	%esp, %eax			/* the adjusted stack pointer */
	pushl	$__KERNEL_DS
	pushl	%eax
	lss	(%esp), %esp			/* switch to the normal stack segment */
634
#endif
635 636
.endm
.macro UNWIND_ESPFIX_STACK
637
#ifdef CONFIG_X86_ESPFIX32
638
	movl	%ss, %eax
639
	/* see if on espfix stack */
640 641 642 643 644
	cmpw	$__ESPFIX_SS, %ax
	jne	27f
	movl	$__KERNEL_DS, %eax
	movl	%eax, %ds
	movl	%eax, %es
645 646 647
	/* switch to normal stack */
	FIXUP_ESPFIX_STACK
27:
648
#endif
649
.endm
L
Linus Torvalds 已提交
650 651

/*
652 653
 * Build the entry stubs with some assembler magic.
 * We pack 1 stub into every 8-byte block.
L
Linus Torvalds 已提交
654
 */
655
	.align 8
L
Linus Torvalds 已提交
656
ENTRY(irq_entries_start)
657 658
    vector=FIRST_EXTERNAL_VECTOR
    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
659
	pushl	$(~vector+0x80)			/* Note: always in signed byte range */
660 661 662 663
    vector=vector+1
	jmp	common_interrupt
	.align	8
    .endr
664 665
END(irq_entries_start)

666 667 668 669
/*
 * the CPU automatically disables interrupts when executing an IRQ vector,
 * so IRQ-flags tracing has to follow that:
 */
670
	.p2align CONFIG_X86_L1_CACHE_SHIFT
L
Linus Torvalds 已提交
671
common_interrupt:
672
	ASM_CLAC
673
	addl	$-0x80, (%esp)			/* Adjust vector into the [-256, -1] range */
L
Linus Torvalds 已提交
674
	SAVE_ALL
675
	ENCODE_FRAME_POINTER
676
	TRACE_IRQS_OFF
677 678 679
	movl	%esp, %eax
	call	do_IRQ
	jmp	ret_from_intr
680
ENDPROC(common_interrupt)
L
Linus Torvalds 已提交
681

T
Tejun Heo 已提交
682
#define BUILD_INTERRUPT3(name, nr, fn)	\
L
Linus Torvalds 已提交
683
ENTRY(name)				\
684
	ASM_CLAC;			\
685
	pushl	$~(nr);			\
686
	SAVE_ALL;			\
687
	ENCODE_FRAME_POINTER;		\
688
	TRACE_IRQS_OFF			\
689 690 691
	movl	%esp, %eax;		\
	call	fn;			\
	jmp	ret_from_intr;		\
692
ENDPROC(name)
L
Linus Torvalds 已提交
693

694 695
#define BUILD_INTERRUPT(name, nr)		\
	BUILD_INTERRUPT3(name, nr, smp_##name);	\
T
Tejun Heo 已提交
696

L
Linus Torvalds 已提交
697
/* The include is where all of the SMP etc. interrupts come from */
698
#include <asm/entry_arch.h>
L
Linus Torvalds 已提交
699 700

ENTRY(coprocessor_error)
701
	ASM_CLAC
702 703
	pushl	$0
	pushl	$do_coprocessor_error
704
	jmp	common_exception
705
END(coprocessor_error)
L
Linus Torvalds 已提交
706 707

ENTRY(simd_coprocessor_error)
708
	ASM_CLAC
709
	pushl	$0
710 711
#ifdef CONFIG_X86_INVD_BUG
	/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
712 713
	ALTERNATIVE "pushl	$do_general_protection",	\
		    "pushl	$do_simd_coprocessor_error",	\
714
		    X86_FEATURE_XMM
715
#else
716
	pushl	$do_simd_coprocessor_error
717
#endif
718
	jmp	common_exception
719
END(simd_coprocessor_error)
L
Linus Torvalds 已提交
720 721

ENTRY(device_not_available)
722
	ASM_CLAC
723 724
	pushl	$-1				# mark this as an int
	pushl	$do_device_not_available
725
	jmp	common_exception
726
END(device_not_available)
L
Linus Torvalds 已提交
727

728 729
#ifdef CONFIG_PARAVIRT
ENTRY(native_iret)
I
Ingo Molnar 已提交
730
	iret
731
	_ASM_EXTABLE(native_iret, iret_exc)
732
END(native_iret)
733 734
#endif

L
Linus Torvalds 已提交
735
ENTRY(overflow)
736
	ASM_CLAC
737 738
	pushl	$0
	pushl	$do_overflow
739
	jmp	common_exception
740
END(overflow)
L
Linus Torvalds 已提交
741 742

ENTRY(bounds)
743
	ASM_CLAC
744 745
	pushl	$0
	pushl	$do_bounds
746
	jmp	common_exception
747
END(bounds)
L
Linus Torvalds 已提交
748 749

ENTRY(invalid_op)
750
	ASM_CLAC
751 752
	pushl	$0
	pushl	$do_invalid_op
753
	jmp	common_exception
754
END(invalid_op)
L
Linus Torvalds 已提交
755 756

ENTRY(coprocessor_segment_overrun)
757
	ASM_CLAC
758 759
	pushl	$0
	pushl	$do_coprocessor_segment_overrun
760
	jmp	common_exception
761
END(coprocessor_segment_overrun)
L
Linus Torvalds 已提交
762 763

ENTRY(invalid_TSS)
764
	ASM_CLAC
765
	pushl	$do_invalid_TSS
766
	jmp	common_exception
767
END(invalid_TSS)
L
Linus Torvalds 已提交
768 769

ENTRY(segment_not_present)
770
	ASM_CLAC
771
	pushl	$do_segment_not_present
772
	jmp	common_exception
773
END(segment_not_present)
L
Linus Torvalds 已提交
774 775

ENTRY(stack_segment)
776
	ASM_CLAC
777
	pushl	$do_stack_segment
778
	jmp	common_exception
779
END(stack_segment)
L
Linus Torvalds 已提交
780 781

ENTRY(alignment_check)
782
	ASM_CLAC
783
	pushl	$do_alignment_check
784
	jmp	common_exception
785
END(alignment_check)
L
Linus Torvalds 已提交
786

787
ENTRY(divide_error)
788
	ASM_CLAC
789 790
	pushl	$0				# no error code
	pushl	$do_divide_error
791
	jmp	common_exception
792
END(divide_error)
L
Linus Torvalds 已提交
793 794 795

#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
796
	ASM_CLAC
797 798
	pushl	$0
	pushl	machine_check_vector
799
	jmp	common_exception
800
END(machine_check)
L
Linus Torvalds 已提交
801 802 803
#endif

ENTRY(spurious_interrupt_bug)
804
	ASM_CLAC
805 806
	pushl	$0
	pushl	$do_spurious_interrupt_bug
807
	jmp	common_exception
808
END(spurious_interrupt_bug)
L
Linus Torvalds 已提交
809

810 811
#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
812
	pushl	$-1				/* orig_ax = -1 => not a system call */
813
	SAVE_ALL
814
	ENCODE_FRAME_POINTER
815
	TRACE_IRQS_OFF
816

817 818 819 820 821 822 823 824 825 826 827 828
	/*
	 * Check to see if we got the event in the critical
	 * region in xen_iret_direct, after we've reenabled
	 * events and checked for pending events.  This simulates
	 * iret instruction's behaviour where it delivers a
	 * pending interrupt when enabling interrupts:
	 */
	movl	PT_EIP(%esp), %eax
	cmpl	$xen_iret_start_crit, %eax
	jb	1f
	cmpl	$xen_iret_end_crit, %eax
	jae	1f
829

830
	jmp	xen_iret_crit_fixup
831 832

ENTRY(xen_do_upcall)
833 834
1:	mov	%esp, %eax
	call	xen_evtchn_do_upcall
835
#ifndef CONFIG_PREEMPT
836
	call	xen_maybe_preempt_hcall
837
#endif
838
	jmp	ret_from_intr
839 840
ENDPROC(xen_hypervisor_callback)

841 842 843 844 845 846 847 848 849 850 851 852
/*
 * Hypervisor uses this for application faults while it executes.
 * We get here for two reasons:
 *  1. Fault while reloading DS, ES, FS or GS
 *  2. Fault while executing IRET
 * Category 1 we fix up by reattempting the load, and zeroing the segment
 * register if the load fails.
 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
 * normal Linux return path in this case because if we use the IRET hypercall
 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 * We distinguish between categories by maintaining a status value in EAX.
 */
853
ENTRY(xen_failsafe_callback)
854 855 856 857 858 859
	pushl	%eax
	movl	$1, %eax
1:	mov	4(%esp), %ds
2:	mov	8(%esp), %es
3:	mov	12(%esp), %fs
4:	mov	16(%esp), %gs
860 861
	/* EAX == 0 => Category 1 (Bad segment)
	   EAX != 0 => Category 2 (Bad IRET) */
862 863 864 865 866 867
	testl	%eax, %eax
	popl	%eax
	lea	16(%esp), %esp
	jz	5f
	jmp	iret_exc
5:	pushl	$-1				/* orig_ax = -1 => not a system call */
868
	SAVE_ALL
869
	ENCODE_FRAME_POINTER
870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
	jmp	ret_from_exception

.section .fixup, "ax"
6:	xorl	%eax, %eax
	movl	%eax, 4(%esp)
	jmp	1b
7:	xorl	%eax, %eax
	movl	%eax, 8(%esp)
	jmp	2b
8:	xorl	%eax, %eax
	movl	%eax, 12(%esp)
	jmp	3b
9:	xorl	%eax, %eax
	movl	%eax, 16(%esp)
	jmp	4b
885
.previous
886 887 888 889
	_ASM_EXTABLE(1b, 6b)
	_ASM_EXTABLE(2b, 7b)
	_ASM_EXTABLE(3b, 8b)
	_ASM_EXTABLE(4b, 9b)
890 891
ENDPROC(xen_failsafe_callback)

892
BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
893
		 xen_evtchn_do_upcall)
894

895
#endif /* CONFIG_XEN */
896 897 898 899

#if IS_ENABLED(CONFIG_HYPERV)

BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
900
		 hyperv_vector_handler)
901

902 903 904
BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR,
		 hyperv_reenlightenment_intr)

905 906 907
BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
		 hv_stimer0_vector_handler)

908
#endif /* CONFIG_HYPERV */
909

910
ENTRY(page_fault)
911
	ASM_CLAC
912
	pushl	$do_page_fault
913
	ALIGN
914 915 916 917
	jmp common_exception
END(page_fault)

common_exception:
918
	/* the function address is in %gs's slot on the stack */
919 920 921 922 923 924 925 926 927 928
	pushl	%fs
	pushl	%es
	pushl	%ds
	pushl	%eax
	pushl	%ebp
	pushl	%edi
	pushl	%esi
	pushl	%edx
	pushl	%ecx
	pushl	%ebx
929
	ENCODE_FRAME_POINTER
930
	cld
931 932
	movl	$(__KERNEL_PERCPU), %ecx
	movl	%ecx, %fs
933
	UNWIND_ESPFIX_STACK
934
	GS_TO_REG %ecx
935 936 937
	movl	PT_GS(%esp), %edi		# get the function address
	movl	PT_ORIG_EAX(%esp), %edx		# get the error code
	movl	$-1, PT_ORIG_EAX(%esp)		# no syscall to restart
938 939
	REG_TO_PTGS %ecx
	SET_KERNEL_GS %ecx
940 941 942
	movl	$(__USER_DS), %ecx
	movl	%ecx, %ds
	movl	%ecx, %es
943
	TRACE_IRQS_OFF
944
	movl	%esp, %eax			# pt_regs pointer
945
	CALL_NOSPEC %edi
946
	jmp	ret_from_exception
947
END(common_exception)
948 949

ENTRY(debug)
950 951 952 953 954 955 956 957 958
	/*
	 * #DB can happen at the first instruction of
	 * entry_SYSENTER_32 or in Xen's SYSENTER prologue.  If this
	 * happens, then we will be running on a very small stack.  We
	 * need to detect this condition and switch to the thread
	 * stack before calling any C code at all.
	 *
	 * If you edit this code, keep in mind that NMIs can happen in here.
	 */
959
	ASM_CLAC
960
	pushl	$-1				# mark this as an int
961
	SAVE_ALL
962
	ENCODE_FRAME_POINTER
963 964
	xorl	%edx, %edx			# error code 0
	movl	%esp, %eax			# pt_regs pointer
965 966

	/* Are we currently on the SYSENTER stack? */
967
	movl	PER_CPU_VAR(cpu_entry_area), %ecx
968 969 970
	addl	$CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
	subl	%eax, %ecx	/* ecx = (end of entry_stack) - esp */
	cmpl	$SIZEOF_entry_stack, %ecx
971 972 973 974 975 976 977 978
	jb	.Ldebug_from_sysenter_stack

	TRACE_IRQS_OFF
	call	do_debug
	jmp	ret_from_exception

.Ldebug_from_sysenter_stack:
	/* We're on the SYSENTER stack.  Switch off. */
979
	movl	%esp, %ebx
980 981
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	TRACE_IRQS_OFF
982
	call	do_debug
983
	movl	%ebx, %esp
984
	jmp	ret_from_exception
985 986 987
END(debug)

/*
988 989 990 991 992
 * NMI is doubly nasty.  It can happen on the first instruction of
 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
 * switched stacks.  We handle both conditions by simply checking whether we
 * interrupted kernel code running on the SYSENTER stack.
993 994
 */
ENTRY(nmi)
995
	ASM_CLAC
996
#ifdef CONFIG_X86_ESPFIX32
997 998 999 1000
	pushl	%eax
	movl	%ss, %eax
	cmpw	$__ESPFIX_SS, %ax
	popl	%eax
1001
	je	.Lnmi_espfix_stack
1002
#endif
1003 1004

	pushl	%eax				# pt_regs->orig_ax
1005
	SAVE_ALL
1006
	ENCODE_FRAME_POINTER
1007 1008
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
1009 1010

	/* Are we currently on the SYSENTER stack? */
1011
	movl	PER_CPU_VAR(cpu_entry_area), %ecx
1012 1013 1014
	addl	$CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
	subl	%eax, %ecx	/* ecx = (end of entry_stack) - esp */
	cmpl	$SIZEOF_entry_stack, %ecx
1015 1016 1017
	jb	.Lnmi_from_sysenter_stack

	/* Not on SYSENTER stack. */
1018
	call	do_nmi
1019
	jmp	.Lrestore_all_notrace
1020

1021 1022 1023 1024 1025
.Lnmi_from_sysenter_stack:
	/*
	 * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
	 * is using the thread stack right now, so it's safe for us to use it.
	 */
1026
	movl	%esp, %ebx
1027 1028
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	call	do_nmi
1029
	movl	%ebx, %esp
1030
	jmp	.Lrestore_all_notrace
1031

1032
#ifdef CONFIG_X86_ESPFIX32
1033
.Lnmi_espfix_stack:
1034
	/*
1035 1036
	 * create the pointer to lss back
	 */
1037 1038 1039
	pushl	%ss
	pushl	%esp
	addl	$4, (%esp)
1040 1041
	/* copy the iret frame of 12 bytes */
	.rept 3
1042
	pushl	16(%esp)
1043
	.endr
1044
	pushl	%eax
1045
	SAVE_ALL
1046
	ENCODE_FRAME_POINTER
1047 1048 1049
	FIXUP_ESPFIX_STACK			# %eax == %esp
	xorl	%edx, %edx			# zero error code
	call	do_nmi
1050
	RESTORE_REGS
1051
	lss	12+4(%esp), %esp		# back to espfix stack
1052
	jmp	.Lirq_return
1053
#endif
1054 1055 1056
END(nmi)

ENTRY(int3)
1057
	ASM_CLAC
1058
	pushl	$-1				# mark this as an int
1059
	SAVE_ALL
1060
	ENCODE_FRAME_POINTER
1061
	TRACE_IRQS_OFF
1062 1063 1064 1065
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
	call	do_int3
	jmp	ret_from_exception
1066 1067 1068
END(int3)

ENTRY(general_protection)
1069
	pushl	$do_general_protection
1070
	jmp	common_exception
1071 1072
END(general_protection)

G
Gleb Natapov 已提交
1073 1074
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
1075
	ASM_CLAC
1076
	pushl	$do_async_page_fault
1077
	jmp	common_exception
1078
END(async_page_fault)
G
Gleb Natapov 已提交
1079
#endif
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090

ENTRY(rewind_stack_do_exit)
	/* Prevent any naive code from trying to unwind to our caller. */
	xorl	%ebp, %ebp

	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esi
	leal	-TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp

	call	do_exit
1:	jmp 1b
END(rewind_stack_do_exit)