entry_32.S 25.5 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  Copyright (C) 1991,1992  Linus Torvalds
L
Linus Torvalds 已提交
3
 *
4
 * entry_32.S contains the system-call and low-level fault and trap handling routines.
L
Linus Torvalds 已提交
5
 *
6
 * Stack layout while running C code:
7 8 9
 *	ptrace needs to have all registers on the stack.
 *	If the order here is changed, it needs to be
 *	updated in fork.c:copy_process(), signal.c:do_signal(),
L
Linus Torvalds 已提交
10 11 12 13 14
 *	ptrace.c and ptrace.h
 *
 *	 0(%esp) - %ebx
 *	 4(%esp) - %ecx
 *	 8(%esp) - %edx
15
 *	 C(%esp) - %esi
L
Linus Torvalds 已提交
16 17 18 19 20
 *	10(%esp) - %edi
 *	14(%esp) - %ebp
 *	18(%esp) - %eax
 *	1C(%esp) - %ds
 *	20(%esp) - %es
21
 *	24(%esp) - %fs
22 23 24 25 26 27 28
 *	28(%esp) - %gs		saved iff !CONFIG_X86_32_LAZY_GS
 *	2C(%esp) - orig_eax
 *	30(%esp) - %eip
 *	34(%esp) - %cs
 *	38(%esp) - %eflags
 *	3C(%esp) - %oldesp
 *	40(%esp) - %oldss
L
Linus Torvalds 已提交
29 30 31
 */

#include <linux/linkage.h>
32
#include <linux/err.h>
L
Linus Torvalds 已提交
33
#include <asm/thread_info.h>
34
#include <asm/irqflags.h>
L
Linus Torvalds 已提交
35 36 37
#include <asm/errno.h>
#include <asm/segment.h>
#include <asm/smp.h>
38
#include <asm/page_types.h>
S
Stas Sergeev 已提交
39
#include <asm/percpu.h>
40
#include <asm/processor-flags.h>
41
#include <asm/ftrace.h>
42
#include <asm/irq_vectors.h>
43
#include <asm/cpufeatures.h>
44
#include <asm/alternative-asm.h>
45
#include <asm/asm.h>
46
#include <asm/smap.h>
L
Linus Torvalds 已提交
47

J
Jiri Olsa 已提交
48 49
	.section .entry.text, "ax"

50 51 52 53 54
/*
 * We use macros for low-level operations which need to be overridden
 * for paravirtualization.  The following will never clobber any registers:
 *   INTERRUPT_RETURN (aka. "iret")
 *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
55
 *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
56 57 58 59 60 61 62
 *
 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
 * Allowing a register to be clobbered can shrink the paravirt replacement
 * enough to patch inline, increasing performance.
 */

L
Linus Torvalds 已提交
63
#ifdef CONFIG_PREEMPT
64
# define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
L
Linus Torvalds 已提交
65
#else
66 67
# define preempt_stop(clobbers)
# define resume_kernel		restore_all
L
Linus Torvalds 已提交
68 69
#endif

70 71
.macro TRACE_IRQS_IRET
#ifdef CONFIG_TRACE_IRQFLAGS
72 73
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)     # interrupts off?
	jz	1f
74 75 76 77 78
	TRACE_IRQS_ON
1:
#endif
.endm

79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * User gs save/restore
 *
 * %gs is used for userland TLS and kernel only uses it for stack
 * canary which is required to be at %gs:20 by gcc.  Read the comment
 * at the top of stackprotector.h for more info.
 *
 * Local labels 98 and 99 are used.
 */
#ifdef CONFIG_X86_32_LAZY_GS

 /* unfortunately push/pop can't be no-op */
.macro PUSH_GS
92
	pushl	$0
93 94
.endm
.macro POP_GS pop=0
95
	addl	$(4 + \pop), %esp
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
.endm
.macro POP_GS_EX
.endm

 /* all the rest are no-op */
.macro PTGS_TO_GS
.endm
.macro PTGS_TO_GS_EX
.endm
.macro GS_TO_REG reg
.endm
.macro REG_TO_PTGS reg
.endm
.macro SET_KERNEL_GS reg
.endm

#else	/* CONFIG_X86_32_LAZY_GS */

.macro PUSH_GS
115
	pushl	%gs
116 117 118
.endm

.macro POP_GS pop=0
119
98:	popl	%gs
120
  .if \pop <> 0
121
	add	$\pop, %esp
122 123 124 125
  .endif
.endm
.macro POP_GS_EX
.pushsection .fixup, "ax"
126 127
99:	movl	$0, (%esp)
	jmp	98b
128
.popsection
129
	_ASM_EXTABLE(98b, 99b)
130 131 132
.endm

.macro PTGS_TO_GS
133
98:	mov	PT_GS(%esp), %gs
134 135 136
.endm
.macro PTGS_TO_GS_EX
.pushsection .fixup, "ax"
137 138
99:	movl	$0, PT_GS(%esp)
	jmp	98b
139
.popsection
140
	_ASM_EXTABLE(98b, 99b)
141 142 143
.endm

.macro GS_TO_REG reg
144
	movl	%gs, \reg
145 146
.endm
.macro REG_TO_PTGS reg
147
	movl	\reg, PT_GS(%esp)
148 149
.endm
.macro SET_KERNEL_GS reg
150 151
	movl	$(__KERNEL_STACK_CANARY), \reg
	movl	\reg, %gs
152 153
.endm

154
#endif /* CONFIG_X86_32_LAZY_GS */
155

156
.macro SAVE_ALL pt_regs_ax=%eax
157
	cld
158
	PUSH_GS
159 160 161
	pushl	%fs
	pushl	%es
	pushl	%ds
162
	pushl	\pt_regs_ax
163 164 165 166 167 168 169 170 171 172 173
	pushl	%ebp
	pushl	%edi
	pushl	%esi
	pushl	%edx
	pushl	%ecx
	pushl	%ebx
	movl	$(__USER_DS), %edx
	movl	%edx, %ds
	movl	%edx, %es
	movl	$(__KERNEL_PERCPU), %edx
	movl	%edx, %fs
174
	SET_KERNEL_GS %edx
175
.endm
L
Linus Torvalds 已提交
176

177
.macro RESTORE_INT_REGS
178 179 180 181 182 183 184
	popl	%ebx
	popl	%ecx
	popl	%edx
	popl	%esi
	popl	%edi
	popl	%ebp
	popl	%eax
185
.endm
L
Linus Torvalds 已提交
186

187
.macro RESTORE_REGS pop=0
188
	RESTORE_INT_REGS
189 190 191
1:	popl	%ds
2:	popl	%es
3:	popl	%fs
192
	POP_GS \pop
193
.pushsection .fixup, "ax"
194 195 196 197 198 199
4:	movl	$0, (%esp)
	jmp	1b
5:	movl	$0, (%esp)
	jmp	2b
6:	movl	$0, (%esp)
	jmp	3b
200
.popsection
201 202 203
	_ASM_EXTABLE(1b, 4b)
	_ASM_EXTABLE(2b, 5b)
	_ASM_EXTABLE(3b, 6b)
204
	POP_GS_EX
205
.endm
L
Linus Torvalds 已提交
206 207

ENTRY(ret_from_fork)
208 209
	pushl	%eax
	call	schedule_tail
L
Linus Torvalds 已提交
210
	GET_THREAD_INFO(%ebp)
211 212
	popl	%eax
	pushl	$0x0202				# Reset kernel eflags
213
	popfl
214 215 216 217 218

	/* When we fork, we trace the syscall return in the child, too. */
	movl    %esp, %eax
	call    syscall_return_slowpath
	jmp     restore_all
219
END(ret_from_fork)
L
Linus Torvalds 已提交
220

221
ENTRY(ret_from_kernel_thread)
222 223
	pushl	%eax
	call	schedule_tail
224
	GET_THREAD_INFO(%ebp)
225 226
	popl	%eax
	pushl	$0x0202				# Reset kernel eflags
227
	popfl
228 229 230
	movl	PT_EBP(%esp), %eax
	call	*PT_EBX(%esp)
	movl	$0, PT_EAX(%esp)
231 232 233 234 235 236 237 238 239

	/*
	 * Kernel threads return to userspace as if returning from a syscall.
	 * We should check whether anything actually uses this path and, if so,
	 * consider switching it over to ret_from_fork.
	 */
	movl    %esp, %eax
	call    syscall_return_slowpath
	jmp     restore_all
240
ENDPROC(ret_from_kernel_thread)
241

L
Linus Torvalds 已提交
242 243 244 245 246 247 248 249 250 251
/*
 * Return to user mode is not as complex as all this looks,
 * but we want the default path for a system call return to
 * go as quickly as possible which is why some of this is
 * less clear than it otherwise should be.
 */

	# userspace resumption stub bypassing syscall exit tracing
	ALIGN
ret_from_exception:
252
	preempt_stop(CLBR_ANY)
L
Linus Torvalds 已提交
253 254
ret_from_intr:
	GET_THREAD_INFO(%ebp)
255
#ifdef CONFIG_VM86
256 257 258
	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS and CS
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
259 260
#else
	/*
261
	 * We can be coming here from child spawned by kernel_thread().
262
	 */
263 264
	movl	PT_CS(%esp), %eax
	andl	$SEGMENT_RPL_MASK, %eax
265
#endif
266 267
	cmpl	$USER_RPL, %eax
	jb	resume_kernel			# not returning to v8086 or userspace
268

L
Linus Torvalds 已提交
269
ENTRY(resume_userspace)
270
	DISABLE_INTERRUPTS(CLBR_ANY)
271
	TRACE_IRQS_OFF
272 273
	movl	%esp, %eax
	call	prepare_exit_to_usermode
274
	jmp	restore_all
275
END(ret_from_exception)
L
Linus Torvalds 已提交
276 277 278

#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
279
	DISABLE_INTERRUPTS(CLBR_ANY)
L
Linus Torvalds 已提交
280
need_resched:
281 282 283 284 285 286
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	jnz	restore_all
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
	jz	restore_all
	call	preempt_schedule_irq
	jmp	need_resched
287
END(resume_kernel)
L
Linus Torvalds 已提交
288 289
#endif

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
GLOBAL(__begin_SYSENTER_singlestep_region)
/*
 * All code from here through __end_SYSENTER_singlestep_region is subject
 * to being single-stepped if a user program sets TF and executes SYSENTER.
 * There is absolutely nothing that we can do to prevent this from happening
 * (thanks Intel!).  To keep our handling of this situation as simple as
 * possible, we handle TF just like AC and NT, except that our #DB handler
 * will ignore all of the single-step traps generated in this range.
 */

#ifdef CONFIG_XEN
/*
 * Xen doesn't set %esp to be precisely what the normal SYSENTER
 * entry point expects, so fix it up before using the normal path.
 */
ENTRY(xen_sysenter_target)
	addl	$5*4, %esp			/* remove xen-provided frame */
	jmp	sysenter_past_esp
#endif

310
ENTRY(entry_SYSENTER_32)
311
	movl	TSS_sysenter_sp0(%esp), %esp
L
Linus Torvalds 已提交
312
sysenter_past_esp:
313
	pushl	$__USER_DS		/* pt_regs->ss */
314
	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
315 316 317 318 319 320 321
	pushfl				/* pt_regs->flags (except IF = 0) */
	orl	$X86_EFLAGS_IF, (%esp)	/* Fix IF */
	pushl	$__USER_CS		/* pt_regs->cs */
	pushl	$0			/* pt_regs->ip = 0 (placeholder) */
	pushl	%eax			/* pt_regs->orig_ax */
	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */

322
	/*
323 324
	 * SYSENTER doesn't filter flags, so we need to clear NT, AC
	 * and TF ourselves.  To save a few cycles, we can check whether
325 326 327 328
	 * either was set instead of doing an unconditional popfq.
	 * This needs to happen before enabling interrupts so that
	 * we don't get preempted with NT set.
	 *
329 330 331 332 333 334
	 * If TF is set, we will single-step all the way to here -- do_debug
	 * will ignore all the traps.  (Yes, this is slow, but so is
	 * single-stepping in general.  This allows us to avoid having
	 * a more complicated code to handle the case where a user program
	 * forces us to single-step through the SYSENTER entry code.)
	 *
335 336 337 338 339 340
	 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
	 * out-of-line as an optimization: NT is unlikely to be set in the
	 * majority of the cases and instead of polluting the I$ unnecessarily,
	 * we're keeping that code behind a branch which will predict as
	 * not-taken and therefore its instructions won't be fetched.
	 */
341
	testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
342 343 344
	jnz	.Lsysenter_fix_flags
.Lsysenter_flags_fixed:

345
	/*
346 347
	 * User mode is traced as though IRQs are on, and SYSENTER
	 * turned them off.
348
	 */
349
	TRACE_IRQS_OFF
350 351 352

	movl	%esp, %eax
	call	do_fast_syscall_32
353 354 355
	/* XEN PV guests always use IRET path */
	ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
		    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
356 357 358 359 360

/* Opportunistic SYSEXIT */
	TRACE_IRQS_ON			/* User mode traces as IRQs on. */
	movl	PT_EIP(%esp), %edx	/* pt_regs->ip */
	movl	PT_OLDESP(%esp), %ecx	/* pt_regs->sp */
361 362
1:	mov	PT_FS(%esp), %fs
	PTGS_TO_GS
363 364 365 366 367 368 369
	popl	%ebx			/* pt_regs->bx */
	addl	$2*4, %esp		/* skip pt_regs->cx and pt_regs->dx */
	popl	%esi			/* pt_regs->si */
	popl	%edi			/* pt_regs->di */
	popl	%ebp			/* pt_regs->bp */
	popl	%eax			/* pt_regs->ax */

370 371 372 373 374 375 376 377 378
	/*
	 * Restore all flags except IF. (We restore IF separately because
	 * STI gives a one-instruction window in which we won't be interrupted,
	 * whereas POPF does not.)
	 */
	addl	$PT_EFLAGS-PT_DS, %esp	/* point esp at pt_regs->flags */
	btr	$X86_EFLAGS_IF_BIT, (%esp)
	popfl

379 380 381 382
	/*
	 * Return back to the vDSO, which will pop ecx and edx.
	 * Don't bother with DS and ES (they already contain __USER_DS).
	 */
383 384
	sti
	sysexit
R
Roland McGrath 已提交
385

386 387 388
.pushsection .fixup, "ax"
2:	movl	$0, PT_FS(%esp)
	jmp	1b
389
.popsection
390
	_ASM_EXTABLE(1b, 2b)
391
	PTGS_TO_GS_EX
392 393 394 395 396

.Lsysenter_fix_flags:
	pushl	$X86_EFLAGS_FIXED
	popfl
	jmp	.Lsysenter_flags_fixed
397
GLOBAL(__end_SYSENTER_singlestep_region)
398
ENDPROC(entry_SYSENTER_32)
L
Linus Torvalds 已提交
399 400

	# system call handler stub
401
ENTRY(entry_INT80_32)
402
	ASM_CLAC
403
	pushl	%eax			/* pt_regs->orig_ax */
404
	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */
405 406

	/*
407 408 409
	 * User mode is traced as though IRQs are on.  Unlike the 64-bit
	 * case, INT80 is a trap gate on 32-bit kernels, so interrupts
	 * are already on (unless user code is messing around with iopl).
410 411 412
	 */

	movl	%esp, %eax
413
	call	do_syscall_32_irqs_on
414
.Lsyscall_32_done:
L
Linus Torvalds 已提交
415 416

restore_all:
417 418
	TRACE_IRQS_IRET
restore_all_notrace:
419
#ifdef CONFIG_X86_ESPFIX32
420 421
	ALTERNATIVE	"jmp restore_nocheck", "", X86_BUG_ESPFIX

422 423 424 425 426 427 428 429 430 431 432
	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
	/*
	 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
	 * are returning to the kernel.
	 * See comments in process.c:copy_thread() for details.
	 */
	movb	PT_OLDSS(%esp), %ah
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
	je ldt_ss				# returning to user-space with LDT SS
433
#endif
L
Linus Torvalds 已提交
434
restore_nocheck:
435
	RESTORE_REGS 4				# skip orig_eax/error_code
A
Adrian Bunk 已提交
436
irq_return:
I
Ingo Molnar 已提交
437
	INTERRUPT_RETURN
438 439 440 441 442
.section .fixup, "ax"
ENTRY(iret_exc	)
	pushl	$0				# no error code
	pushl	$do_iret_error
	jmp	error_code
L
Linus Torvalds 已提交
443
.previous
444
	_ASM_EXTABLE(irq_return, iret_exc)
L
Linus Torvalds 已提交
445

446
#ifdef CONFIG_X86_ESPFIX32
L
Linus Torvalds 已提交
447
ldt_ss:
448 449 450 451 452 453 454 455 456 457 458
/*
 * Setup and switch to ESPFIX stack
 *
 * We're returning to userspace with a 16 bit stack. The CPU will not
 * restore the high word of ESP for us on executing iret... This is an
 * "official" bug of all the x86-compatible CPUs, which we can work
 * around to make dosemu and wine happy. We do this by preloading the
 * high word of ESP with the high word of the userspace ESP while
 * compensating for the offset by changing to the ESPFIX segment with
 * a base address that matches for the difference.
 */
459
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
460 461 462
	mov	%esp, %edx			/* load kernel esp */
	mov	PT_OLDESP(%esp), %eax		/* load userspace esp */
	mov	%dx, %ax			/* eax: new kernel esp */
463 464
	sub	%eax, %edx			/* offset (low word is 0) */
	shr	$16, %edx
465 466 467 468 469 470
	mov	%dl, GDT_ESPFIX_SS + 4		/* bits 16..23 */
	mov	%dh, GDT_ESPFIX_SS + 7		/* bits 24..31 */
	pushl	$__ESPFIX_SS
	pushl	%eax				/* new kernel esp */
	/*
	 * Disable interrupts, but do not irqtrace this section: we
471
	 * will soon execute iret and the tracer was already set to
472 473
	 * the irqstate after the IRET:
	 */
474
	DISABLE_INTERRUPTS(CLBR_EAX)
475 476
	lss	(%esp), %esp			/* switch to espfix segment */
	jmp	restore_nocheck
477
#endif
478
ENDPROC(entry_INT80_32)
L
Linus Torvalds 已提交
479

480
.macro FIXUP_ESPFIX_STACK
481 482 483 484 485 486 487
/*
 * Switch back for ESPFIX stack to the normal zerobased stack
 *
 * We can't call C functions using the ESPFIX stack. This code reads
 * the high word of the segment base from the GDT and swiches to the
 * normal stack and adjusts ESP with the matching offset.
 */
488
#ifdef CONFIG_X86_ESPFIX32
489
	/* fixup the stack */
490 491
	mov	GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
	mov	GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
492
	shl	$16, %eax
493 494 495 496
	addl	%esp, %eax			/* the adjusted stack pointer */
	pushl	$__KERNEL_DS
	pushl	%eax
	lss	(%esp), %esp			/* switch to the normal stack segment */
497
#endif
498 499
.endm
.macro UNWIND_ESPFIX_STACK
500
#ifdef CONFIG_X86_ESPFIX32
501
	movl	%ss, %eax
502
	/* see if on espfix stack */
503 504 505 506 507
	cmpw	$__ESPFIX_SS, %ax
	jne	27f
	movl	$__KERNEL_DS, %eax
	movl	%eax, %ds
	movl	%eax, %es
508 509 510
	/* switch to normal stack */
	FIXUP_ESPFIX_STACK
27:
511
#endif
512
.endm
L
Linus Torvalds 已提交
513 514

/*
515 516
 * Build the entry stubs with some assembler magic.
 * We pack 1 stub into every 8-byte block.
L
Linus Torvalds 已提交
517
 */
518
	.align 8
L
Linus Torvalds 已提交
519
ENTRY(irq_entries_start)
520 521
    vector=FIRST_EXTERNAL_VECTOR
    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
522
	pushl	$(~vector+0x80)			/* Note: always in signed byte range */
523 524 525 526
    vector=vector+1
	jmp	common_interrupt
	.align	8
    .endr
527 528
END(irq_entries_start)

529 530 531 532
/*
 * the CPU automatically disables interrupts when executing an IRQ vector,
 * so IRQ-flags tracing has to follow that:
 */
533
	.p2align CONFIG_X86_L1_CACHE_SHIFT
L
Linus Torvalds 已提交
534
common_interrupt:
535
	ASM_CLAC
536
	addl	$-0x80, (%esp)			/* Adjust vector into the [-256, -1] range */
L
Linus Torvalds 已提交
537
	SAVE_ALL
538
	TRACE_IRQS_OFF
539 540 541
	movl	%esp, %eax
	call	do_IRQ
	jmp	ret_from_intr
542
ENDPROC(common_interrupt)
L
Linus Torvalds 已提交
543

T
Tejun Heo 已提交
544
#define BUILD_INTERRUPT3(name, nr, fn)	\
L
Linus Torvalds 已提交
545
ENTRY(name)				\
546
	ASM_CLAC;			\
547
	pushl	$~(nr);			\
548
	SAVE_ALL;			\
549
	TRACE_IRQS_OFF			\
550 551 552
	movl	%esp, %eax;		\
	call	fn;			\
	jmp	ret_from_intr;		\
553
ENDPROC(name)
L
Linus Torvalds 已提交
554

555 556

#ifdef CONFIG_TRACING
557
# define TRACE_BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
558
#else
559
# define TRACE_BUILD_INTERRUPT(name, nr)
560 561
#endif

562 563
#define BUILD_INTERRUPT(name, nr)		\
	BUILD_INTERRUPT3(name, nr, smp_##name);	\
564
	TRACE_BUILD_INTERRUPT(name, nr)
T
Tejun Heo 已提交
565

L
Linus Torvalds 已提交
566
/* The include is where all of the SMP etc. interrupts come from */
567
#include <asm/entry_arch.h>
L
Linus Torvalds 已提交
568 569

ENTRY(coprocessor_error)
570
	ASM_CLAC
571 572 573
	pushl	$0
	pushl	$do_coprocessor_error
	jmp	error_code
574
END(coprocessor_error)
L
Linus Torvalds 已提交
575 576

ENTRY(simd_coprocessor_error)
577
	ASM_CLAC
578
	pushl	$0
579 580
#ifdef CONFIG_X86_INVD_BUG
	/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
581 582
	ALTERNATIVE "pushl	$do_general_protection",	\
		    "pushl	$do_simd_coprocessor_error",	\
583
		    X86_FEATURE_XMM
584
#else
585
	pushl	$do_simd_coprocessor_error
586
#endif
587
	jmp	error_code
588
END(simd_coprocessor_error)
L
Linus Torvalds 已提交
589 590

ENTRY(device_not_available)
591
	ASM_CLAC
592 593 594
	pushl	$-1				# mark this as an int
	pushl	$do_device_not_available
	jmp	error_code
595
END(device_not_available)
L
Linus Torvalds 已提交
596

597 598
#ifdef CONFIG_PARAVIRT
ENTRY(native_iret)
I
Ingo Molnar 已提交
599
	iret
600
	_ASM_EXTABLE(native_iret, iret_exc)
601
END(native_iret)
602 603
#endif

L
Linus Torvalds 已提交
604
ENTRY(overflow)
605
	ASM_CLAC
606 607 608
	pushl	$0
	pushl	$do_overflow
	jmp	error_code
609
END(overflow)
L
Linus Torvalds 已提交
610 611

ENTRY(bounds)
612
	ASM_CLAC
613 614 615
	pushl	$0
	pushl	$do_bounds
	jmp	error_code
616
END(bounds)
L
Linus Torvalds 已提交
617 618

ENTRY(invalid_op)
619
	ASM_CLAC
620 621 622
	pushl	$0
	pushl	$do_invalid_op
	jmp	error_code
623
END(invalid_op)
L
Linus Torvalds 已提交
624 625

ENTRY(coprocessor_segment_overrun)
626
	ASM_CLAC
627 628 629
	pushl	$0
	pushl	$do_coprocessor_segment_overrun
	jmp	error_code
630
END(coprocessor_segment_overrun)
L
Linus Torvalds 已提交
631 632

ENTRY(invalid_TSS)
633
	ASM_CLAC
634 635
	pushl	$do_invalid_TSS
	jmp	error_code
636
END(invalid_TSS)
L
Linus Torvalds 已提交
637 638

ENTRY(segment_not_present)
639
	ASM_CLAC
640 641
	pushl	$do_segment_not_present
	jmp	error_code
642
END(segment_not_present)
L
Linus Torvalds 已提交
643 644

ENTRY(stack_segment)
645
	ASM_CLAC
646 647
	pushl	$do_stack_segment
	jmp	error_code
648
END(stack_segment)
L
Linus Torvalds 已提交
649 650

ENTRY(alignment_check)
651
	ASM_CLAC
652 653
	pushl	$do_alignment_check
	jmp	error_code
654
END(alignment_check)
L
Linus Torvalds 已提交
655

656
ENTRY(divide_error)
657
	ASM_CLAC
658 659 660
	pushl	$0				# no error code
	pushl	$do_divide_error
	jmp	error_code
661
END(divide_error)
L
Linus Torvalds 已提交
662 663 664

#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
665
	ASM_CLAC
666 667 668
	pushl	$0
	pushl	machine_check_vector
	jmp	error_code
669
END(machine_check)
L
Linus Torvalds 已提交
670 671 672
#endif

ENTRY(spurious_interrupt_bug)
673
	ASM_CLAC
674 675 676
	pushl	$0
	pushl	$do_spurious_interrupt_bug
	jmp	error_code
677
END(spurious_interrupt_bug)
L
Linus Torvalds 已提交
678

679 680
#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
681
	pushl	$-1				/* orig_ax = -1 => not a system call */
682 683
	SAVE_ALL
	TRACE_IRQS_OFF
684

685 686 687 688 689 690 691 692 693 694 695 696
	/*
	 * Check to see if we got the event in the critical
	 * region in xen_iret_direct, after we've reenabled
	 * events and checked for pending events.  This simulates
	 * iret instruction's behaviour where it delivers a
	 * pending interrupt when enabling interrupts:
	 */
	movl	PT_EIP(%esp), %eax
	cmpl	$xen_iret_start_crit, %eax
	jb	1f
	cmpl	$xen_iret_end_crit, %eax
	jae	1f
697

698
	jmp	xen_iret_crit_fixup
699 700

ENTRY(xen_do_upcall)
701 702
1:	mov	%esp, %eax
	call	xen_evtchn_do_upcall
703
#ifndef CONFIG_PREEMPT
704
	call	xen_maybe_preempt_hcall
705
#endif
706
	jmp	ret_from_intr
707 708
ENDPROC(xen_hypervisor_callback)

709 710 711 712 713 714 715 716 717 718 719 720
/*
 * Hypervisor uses this for application faults while it executes.
 * We get here for two reasons:
 *  1. Fault while reloading DS, ES, FS or GS
 *  2. Fault while executing IRET
 * Category 1 we fix up by reattempting the load, and zeroing the segment
 * register if the load fails.
 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
 * normal Linux return path in this case because if we use the IRET hypercall
 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 * We distinguish between categories by maintaining a status value in EAX.
 */
721
ENTRY(xen_failsafe_callback)
722 723 724 725 726 727
	pushl	%eax
	movl	$1, %eax
1:	mov	4(%esp), %ds
2:	mov	8(%esp), %es
3:	mov	12(%esp), %fs
4:	mov	16(%esp), %gs
728 729
	/* EAX == 0 => Category 1 (Bad segment)
	   EAX != 0 => Category 2 (Bad IRET) */
730 731 732 733 734 735
	testl	%eax, %eax
	popl	%eax
	lea	16(%esp), %esp
	jz	5f
	jmp	iret_exc
5:	pushl	$-1				/* orig_ax = -1 => not a system call */
736
	SAVE_ALL
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
	jmp	ret_from_exception

.section .fixup, "ax"
6:	xorl	%eax, %eax
	movl	%eax, 4(%esp)
	jmp	1b
7:	xorl	%eax, %eax
	movl	%eax, 8(%esp)
	jmp	2b
8:	xorl	%eax, %eax
	movl	%eax, 12(%esp)
	jmp	3b
9:	xorl	%eax, %eax
	movl	%eax, 16(%esp)
	jmp	4b
752
.previous
753 754 755 756
	_ASM_EXTABLE(1b, 6b)
	_ASM_EXTABLE(2b, 7b)
	_ASM_EXTABLE(3b, 8b)
	_ASM_EXTABLE(4b, 9b)
757 758
ENDPROC(xen_failsafe_callback)

759
BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
760 761
		xen_evtchn_do_upcall)

762
#endif /* CONFIG_XEN */
763 764 765 766 767 768 769

#if IS_ENABLED(CONFIG_HYPERV)

BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
	hyperv_vector_handler)

#endif /* CONFIG_HYPERV */
770

771
#ifdef CONFIG_FUNCTION_TRACER
772 773 774 775 776 777 778
#ifdef CONFIG_DYNAMIC_FTRACE

ENTRY(mcount)
	ret
END(mcount)

ENTRY(ftrace_caller)
779 780 781 782 783 784 785 786
	pushl	%eax
	pushl	%ecx
	pushl	%edx
	pushl	$0				/* Pass NULL as regs pointer */
	movl	4*4(%esp), %eax
	movl	0x4(%ebp), %edx
	movl	function_trace_op, %ecx
	subl	$MCOUNT_INSN_SIZE, %eax
787 788 789

.globl ftrace_call
ftrace_call:
790
	call	ftrace_stub
791

792 793 794 795
	addl	$4, %esp			/* skip NULL pointer */
	popl	%edx
	popl	%ecx
	popl	%eax
796
ftrace_ret:
797 798 799
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
800
	jmp	ftrace_stub
801
#endif
802 803 804 805 806 807

.globl ftrace_stub
ftrace_stub:
	ret
END(ftrace_caller)

808 809 810 811 812 813 814 815 816 817
ENTRY(ftrace_regs_caller)
	pushf	/* push flags before compare (in cs location) */

	/*
	 * i386 does not save SS and ESP when coming from kernel.
	 * Instead, to get sp, &regs->sp is used (see ptrace.h).
	 * Unfortunately, that means eflags must be at the same location
	 * as the current return ip is. We move the return ip into the
	 * ip location, and move flags into the return ip location.
	 */
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
	pushl	4(%esp)				/* save return ip into ip slot */

	pushl	$0				/* Load 0 into orig_ax */
	pushl	%gs
	pushl	%fs
	pushl	%es
	pushl	%ds
	pushl	%eax
	pushl	%ebp
	pushl	%edi
	pushl	%esi
	pushl	%edx
	pushl	%ecx
	pushl	%ebx

	movl	13*4(%esp), %eax		/* Get the saved flags */
	movl	%eax, 14*4(%esp)		/* Move saved flags into regs->flags location */
						/* clobbering return ip */
	movl	$__KERNEL_CS, 13*4(%esp)

	movl	12*4(%esp), %eax		/* Load ip (1st parameter) */
	subl	$MCOUNT_INSN_SIZE, %eax		/* Adjust ip */
	movl	0x4(%ebp), %edx			/* Load parent ip (2nd parameter) */
	movl	function_trace_op, %ecx		/* Save ftrace_pos in 3rd parameter */
	pushl	%esp				/* Save pt_regs as 4th parameter */
843 844

GLOBAL(ftrace_regs_call)
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
	call	ftrace_stub

	addl	$4, %esp			/* Skip pt_regs */
	movl	14*4(%esp), %eax		/* Move flags back into cs */
	movl	%eax, 13*4(%esp)		/* Needed to keep addl	from modifying flags */
	movl	12*4(%esp), %eax		/* Get return ip from regs->ip */
	movl	%eax, 14*4(%esp)		/* Put return ip back for ret */

	popl	%ebx
	popl	%ecx
	popl	%edx
	popl	%esi
	popl	%edi
	popl	%ebp
	popl	%eax
	popl	%ds
	popl	%es
	popl	%fs
	popl	%gs
	addl	$8, %esp			/* Skip orig_ax and ip */
	popf					/* Pop flags at end (no addl to corrupt flags) */
	jmp	ftrace_ret
867 868

	popf
869
	jmp	ftrace_stub
870 871
#else /* ! CONFIG_DYNAMIC_FTRACE */

872
ENTRY(mcount)
873 874
	cmpl	$__PAGE_OFFSET, %esp
	jb	ftrace_stub			/* Paging not enabled yet? */
875

876 877
	cmpl	$ftrace_stub, ftrace_trace_function
	jnz	trace
878
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
879 880
	cmpl	$ftrace_stub, ftrace_graph_return
	jnz	ftrace_graph_caller
881

882 883
	cmpl	$ftrace_graph_entry_stub, ftrace_graph_entry
	jnz	ftrace_graph_caller
884
#endif
885 886 887 888 889 890
.globl ftrace_stub
ftrace_stub:
	ret

	/* taken from glibc */
trace:
891 892 893 894 895 896 897 898 899 900 901 902 903
	pushl	%eax
	pushl	%ecx
	pushl	%edx
	movl	0xc(%esp), %eax
	movl	0x4(%ebp), %edx
	subl	$MCOUNT_INSN_SIZE, %eax

	call	*ftrace_trace_function

	popl	%edx
	popl	%ecx
	popl	%eax
	jmp	ftrace_stub
904
END(mcount)
905
#endif /* CONFIG_DYNAMIC_FTRACE */
906
#endif /* CONFIG_FUNCTION_TRACER */
907

908 909
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
910 911 912 913 914 915 916 917 918 919 920
	pushl	%eax
	pushl	%ecx
	pushl	%edx
	movl	0xc(%esp), %eax
	lea	0x4(%ebp), %edx
	movl	(%ebp), %ecx
	subl	$MCOUNT_INSN_SIZE, %eax
	call	prepare_ftrace_return
	popl	%edx
	popl	%ecx
	popl	%eax
921
	ret
922
END(ftrace_graph_caller)
923 924 925

.globl return_to_handler
return_to_handler:
926 927 928 929 930 931 932 933
	pushl	%eax
	pushl	%edx
	movl	%ebp, %eax
	call	ftrace_return_to_handler
	movl	%eax, %ecx
	popl	%edx
	popl	%eax
	jmp	*%ecx
934
#endif
935

936 937 938
#ifdef CONFIG_TRACING
ENTRY(trace_page_fault)
	ASM_CLAC
939 940
	pushl	$trace_do_page_fault
	jmp	error_code
941 942 943
END(trace_page_fault)
#endif

944
ENTRY(page_fault)
945
	ASM_CLAC
946
	pushl	$do_page_fault
947 948
	ALIGN
error_code:
949
	/* the function address is in %gs's slot on the stack */
950 951 952 953 954 955 956 957 958 959
	pushl	%fs
	pushl	%es
	pushl	%ds
	pushl	%eax
	pushl	%ebp
	pushl	%edi
	pushl	%esi
	pushl	%edx
	pushl	%ecx
	pushl	%ebx
960
	cld
961 962
	movl	$(__KERNEL_PERCPU), %ecx
	movl	%ecx, %fs
963
	UNWIND_ESPFIX_STACK
964
	GS_TO_REG %ecx
965 966 967
	movl	PT_GS(%esp), %edi		# get the function address
	movl	PT_ORIG_EAX(%esp), %edx		# get the error code
	movl	$-1, PT_ORIG_EAX(%esp)		# no syscall to restart
968 969
	REG_TO_PTGS %ecx
	SET_KERNEL_GS %ecx
970 971 972
	movl	$(__USER_DS), %ecx
	movl	%ecx, %ds
	movl	%ecx, %es
973
	TRACE_IRQS_OFF
974 975 976
	movl	%esp, %eax			# pt_regs pointer
	call	*%edi
	jmp	ret_from_exception
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
END(page_fault)

/*
 * Debug traps and NMI can happen at the one SYSENTER instruction
 * that sets up the real kernel stack. Check here, since we can't
 * allow the wrong stack to be used.
 *
 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
 * already pushed 3 words if it hits on the sysenter instruction:
 * eflags, cs and eip.
 *
 * We just load the right stack, and push the three (known) values
 * by hand onto the new stack - while updating the return eip past
 * the instruction that would have done it for sysenter.
 */
992
.macro FIX_STACK offset ok label
993 994
	cmpw	$__KERNEL_CS, 4(%esp)
	jne	\ok
995
\label:
996
	movl	TSS_sysenter_sp0 + \offset(%esp), %esp
997
	pushfl
998 999
	pushl	$__KERNEL_CS
	pushl	$sysenter_past_esp
1000
.endm
1001 1002

ENTRY(debug)
1003
	ASM_CLAC
1004 1005
	cmpl	$entry_SYSENTER_32, (%esp)
	jne	debug_stack_correct
1006
	FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1007
debug_stack_correct:
1008
	pushl	$-1				# mark this as an int
1009 1010
	SAVE_ALL
	TRACE_IRQS_OFF
1011 1012 1013 1014
	xorl	%edx, %edx			# error code 0
	movl	%esp, %eax			# pt_regs pointer
	call	do_debug
	jmp	ret_from_exception
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
END(debug)

/*
 * NMI is doubly nasty. It can happen _while_ we're handling
 * a debug fault, and the debug fault hasn't yet been able to
 * clear up the stack. So we first check whether we got  an
 * NMI on the sysenter entry path, but after that we need to
 * check whether we got an NMI on the debug path where the debug
 * fault happened on the sysenter path.
 */
ENTRY(nmi)
1026
	ASM_CLAC
1027
#ifdef CONFIG_X86_ESPFIX32
1028 1029 1030 1031 1032
	pushl	%eax
	movl	%ss, %eax
	cmpw	$__ESPFIX_SS, %ax
	popl	%eax
	je	nmi_espfix_stack
1033
#endif
1034 1035 1036 1037 1038 1039
	cmpl	$entry_SYSENTER_32, (%esp)
	je	nmi_stack_fixup
	pushl	%eax
	movl	%esp, %eax
	/*
	 * Do not access memory above the end of our stack page,
1040 1041
	 * it might not exist.
	 */
1042 1043 1044 1045 1046 1047
	andl	$(THREAD_SIZE-1), %eax
	cmpl	$(THREAD_SIZE-20), %eax
	popl	%eax
	jae	nmi_stack_correct
	cmpl	$entry_SYSENTER_32, 12(%esp)
	je	nmi_debug_stack_check
1048
nmi_stack_correct:
1049
	pushl	%eax
1050
	SAVE_ALL
1051 1052 1053 1054
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
	call	do_nmi
	jmp	restore_all_notrace
1055 1056

nmi_stack_fixup:
1057
	FIX_STACK 12, nmi_stack_correct, 1
1058
	jmp	nmi_stack_correct
1059 1060

nmi_debug_stack_check:
1061 1062 1063 1064 1065 1066
	cmpw	$__KERNEL_CS, 16(%esp)
	jne	nmi_stack_correct
	cmpl	$debug, (%esp)
	jb	nmi_stack_correct
	cmpl	$debug_esp_fix_insn, (%esp)
	ja	nmi_stack_correct
1067
	FIX_STACK 24, nmi_stack_correct, 1
1068
	jmp	nmi_stack_correct
1069

1070
#ifdef CONFIG_X86_ESPFIX32
1071
nmi_espfix_stack:
1072
	/*
1073 1074
	 * create the pointer to lss back
	 */
1075 1076 1077
	pushl	%ss
	pushl	%esp
	addl	$4, (%esp)
1078 1079
	/* copy the iret frame of 12 bytes */
	.rept 3
1080
	pushl	16(%esp)
1081
	.endr
1082
	pushl	%eax
1083
	SAVE_ALL
1084 1085 1086
	FIXUP_ESPFIX_STACK			# %eax == %esp
	xorl	%edx, %edx			# zero error code
	call	do_nmi
1087
	RESTORE_REGS
1088 1089
	lss	12+4(%esp), %esp		# back to espfix stack
	jmp	irq_return
1090
#endif
1091 1092 1093
END(nmi)

ENTRY(int3)
1094
	ASM_CLAC
1095
	pushl	$-1				# mark this as an int
1096 1097
	SAVE_ALL
	TRACE_IRQS_OFF
1098 1099 1100 1101
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
	call	do_int3
	jmp	ret_from_exception
1102 1103 1104
END(int3)

ENTRY(general_protection)
1105 1106
	pushl	$do_general_protection
	jmp	error_code
1107 1108
END(general_protection)

G
Gleb Natapov 已提交
1109 1110
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
1111
	ASM_CLAC
1112 1113
	pushl	$do_async_page_fault
	jmp	error_code
1114
END(async_page_fault)
G
Gleb Natapov 已提交
1115
#endif