entry_32.S 28.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  Copyright (C) 1991,1992  Linus Torvalds
L
Linus Torvalds 已提交
3
 *
4
 * entry_32.S contains the system-call and low-level fault and trap handling routines.
L
Linus Torvalds 已提交
5
 *
6
 * Stack layout in 'syscall_exit':
7 8 9
 *	ptrace needs to have all registers on the stack.
 *	If the order here is changed, it needs to be
 *	updated in fork.c:copy_process(), signal.c:do_signal(),
L
Linus Torvalds 已提交
10 11 12 13 14
 *	ptrace.c and ptrace.h
 *
 *	 0(%esp) - %ebx
 *	 4(%esp) - %ecx
 *	 8(%esp) - %edx
15
 *	 C(%esp) - %esi
L
Linus Torvalds 已提交
16 17 18 19 20
 *	10(%esp) - %edi
 *	14(%esp) - %ebp
 *	18(%esp) - %eax
 *	1C(%esp) - %ds
 *	20(%esp) - %es
21
 *	24(%esp) - %fs
22 23 24 25 26 27 28
 *	28(%esp) - %gs		saved iff !CONFIG_X86_32_LAZY_GS
 *	2C(%esp) - orig_eax
 *	30(%esp) - %eip
 *	34(%esp) - %cs
 *	38(%esp) - %eflags
 *	3C(%esp) - %oldesp
 *	40(%esp) - %oldss
L
Linus Torvalds 已提交
29 30 31
 */

#include <linux/linkage.h>
32
#include <linux/err.h>
L
Linus Torvalds 已提交
33
#include <asm/thread_info.h>
34
#include <asm/irqflags.h>
L
Linus Torvalds 已提交
35 36 37
#include <asm/errno.h>
#include <asm/segment.h>
#include <asm/smp.h>
38
#include <asm/page_types.h>
S
Stas Sergeev 已提交
39
#include <asm/percpu.h>
40
#include <asm/processor-flags.h>
41
#include <asm/ftrace.h>
42
#include <asm/irq_vectors.h>
43
#include <asm/cpufeature.h>
44
#include <asm/alternative-asm.h>
45
#include <asm/asm.h>
46
#include <asm/smap.h>
L
Linus Torvalds 已提交
47

R
Roland McGrath 已提交
48 49 50
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
#include <linux/elf-em.h>
#define AUDIT_ARCH_I386		(EM_386|__AUDIT_ARCH_LE)
51
#define __AUDIT_ARCH_LE		0x40000000
R
Roland McGrath 已提交
52 53

#ifndef CONFIG_AUDITSYSCALL
54 55
# define sysenter_audit		syscall_trace_entry
# define sysexit_audit		syscall_exit_work
R
Roland McGrath 已提交
56 57
#endif

J
Jiri Olsa 已提交
58 59
	.section .entry.text, "ax"

60 61 62 63 64
/*
 * We use macros for low-level operations which need to be overridden
 * for paravirtualization.  The following will never clobber any registers:
 *   INTERRUPT_RETURN (aka. "iret")
 *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
65
 *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
66 67 68 69 70 71 72
 *
 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
 * Allowing a register to be clobbered can shrink the paravirt replacement
 * enough to patch inline, increasing performance.
 */

L
Linus Torvalds 已提交
73
#ifdef CONFIG_PREEMPT
74
# define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
L
Linus Torvalds 已提交
75
#else
76 77
# define preempt_stop(clobbers)
# define resume_kernel		restore_all
L
Linus Torvalds 已提交
78 79
#endif

80 81
.macro TRACE_IRQS_IRET
#ifdef CONFIG_TRACE_IRQFLAGS
82 83
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)     # interrupts off?
	jz	1f
84 85 86 87 88
	TRACE_IRQS_ON
1:
#endif
.endm

89 90 91 92 93 94 95 96 97 98 99 100 101
/*
 * User gs save/restore
 *
 * %gs is used for userland TLS and kernel only uses it for stack
 * canary which is required to be at %gs:20 by gcc.  Read the comment
 * at the top of stackprotector.h for more info.
 *
 * Local labels 98 and 99 are used.
 */
#ifdef CONFIG_X86_32_LAZY_GS

 /* unfortunately push/pop can't be no-op */
.macro PUSH_GS
102
	pushl	$0
103 104
.endm
.macro POP_GS pop=0
105
	addl	$(4 + \pop), %esp
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
.endm
.macro POP_GS_EX
.endm

 /* all the rest are no-op */
.macro PTGS_TO_GS
.endm
.macro PTGS_TO_GS_EX
.endm
.macro GS_TO_REG reg
.endm
.macro REG_TO_PTGS reg
.endm
.macro SET_KERNEL_GS reg
.endm

#else	/* CONFIG_X86_32_LAZY_GS */

.macro PUSH_GS
125
	pushl	%gs
126 127 128
.endm

.macro POP_GS pop=0
129
98:	popl	%gs
130
  .if \pop <> 0
131
	add	$\pop, %esp
132 133 134 135
  .endif
.endm
.macro POP_GS_EX
.pushsection .fixup, "ax"
136 137
99:	movl	$0, (%esp)
	jmp	98b
138
.popsection
139
	_ASM_EXTABLE(98b, 99b)
140 141 142
.endm

.macro PTGS_TO_GS
143
98:	mov	PT_GS(%esp), %gs
144 145 146
.endm
.macro PTGS_TO_GS_EX
.pushsection .fixup, "ax"
147 148
99:	movl	$0, PT_GS(%esp)
	jmp	98b
149
.popsection
150
	_ASM_EXTABLE(98b, 99b)
151 152 153
.endm

.macro GS_TO_REG reg
154
	movl	%gs, \reg
155 156
.endm
.macro REG_TO_PTGS reg
157
	movl	\reg, PT_GS(%esp)
158 159
.endm
.macro SET_KERNEL_GS reg
160 161
	movl	$(__KERNEL_STACK_CANARY), \reg
	movl	\reg, %gs
162 163
.endm

164
#endif /* CONFIG_X86_32_LAZY_GS */
165

166 167
.macro SAVE_ALL
	cld
168
	PUSH_GS
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
	pushl	%fs
	pushl	%es
	pushl	%ds
	pushl	%eax
	pushl	%ebp
	pushl	%edi
	pushl	%esi
	pushl	%edx
	pushl	%ecx
	pushl	%ebx
	movl	$(__USER_DS), %edx
	movl	%edx, %ds
	movl	%edx, %es
	movl	$(__KERNEL_PERCPU), %edx
	movl	%edx, %fs
184
	SET_KERNEL_GS %edx
185
.endm
L
Linus Torvalds 已提交
186

187
.macro RESTORE_INT_REGS
188 189 190 191 192 193 194
	popl	%ebx
	popl	%ecx
	popl	%edx
	popl	%esi
	popl	%edi
	popl	%ebp
	popl	%eax
195
.endm
L
Linus Torvalds 已提交
196

197
.macro RESTORE_REGS pop=0
198
	RESTORE_INT_REGS
199 200 201
1:	popl	%ds
2:	popl	%es
3:	popl	%fs
202
	POP_GS \pop
203
.pushsection .fixup, "ax"
204 205 206 207 208 209
4:	movl	$0, (%esp)
	jmp	1b
5:	movl	$0, (%esp)
	jmp	2b
6:	movl	$0, (%esp)
	jmp	3b
210
.popsection
211 212 213
	_ASM_EXTABLE(1b, 4b)
	_ASM_EXTABLE(2b, 5b)
	_ASM_EXTABLE(3b, 6b)
214
	POP_GS_EX
215
.endm
L
Linus Torvalds 已提交
216 217

ENTRY(ret_from_fork)
218 219
	pushl	%eax
	call	schedule_tail
L
Linus Torvalds 已提交
220
	GET_THREAD_INFO(%ebp)
221 222
	popl	%eax
	pushl	$0x0202				# Reset kernel eflags
223
	popfl
224
	jmp	syscall_exit
225
END(ret_from_fork)
L
Linus Torvalds 已提交
226

227
ENTRY(ret_from_kernel_thread)
228 229
	pushl	%eax
	call	schedule_tail
230
	GET_THREAD_INFO(%ebp)
231 232
	popl	%eax
	pushl	$0x0202				# Reset kernel eflags
233
	popfl
234 235 236 237
	movl	PT_EBP(%esp), %eax
	call	*PT_EBX(%esp)
	movl	$0, PT_EAX(%esp)
	jmp	syscall_exit
238
ENDPROC(ret_from_kernel_thread)
239

L
Linus Torvalds 已提交
240 241 242 243 244 245 246 247 248 249
/*
 * Return to user mode is not as complex as all this looks,
 * but we want the default path for a system call return to
 * go as quickly as possible which is why some of this is
 * less clear than it otherwise should be.
 */

	# userspace resumption stub bypassing syscall exit tracing
	ALIGN
ret_from_exception:
250
	preempt_stop(CLBR_ANY)
L
Linus Torvalds 已提交
251 252
ret_from_intr:
	GET_THREAD_INFO(%ebp)
253
#ifdef CONFIG_VM86
254 255 256
	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS and CS
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
257 258
#else
	/*
259
	 * We can be coming here from child spawned by kernel_thread().
260
	 */
261 262
	movl	PT_CS(%esp), %eax
	andl	$SEGMENT_RPL_MASK, %eax
263
#endif
264 265
	cmpl	$USER_RPL, %eax
	jb	resume_kernel			# not returning to v8086 or userspace
266

L
Linus Torvalds 已提交
267
ENTRY(resume_userspace)
268
	LOCKDEP_SYS_EXIT
269 270 271
	DISABLE_INTERRUPTS(CLBR_ANY)		# make sure we don't miss an interrupt
						# setting need_resched or sigpending
						# between sampling and the iret
272
	TRACE_IRQS_OFF
273 274 275 276 277
	movl	TI_flags(%ebp), %ecx
	andl	$_TIF_WORK_MASK, %ecx		# is there any work to be done on
						# int/exception return?
	jne	work_pending
	jmp	restore_all
278
END(ret_from_exception)
L
Linus Torvalds 已提交
279 280 281

#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
282
	DISABLE_INTERRUPTS(CLBR_ANY)
L
Linus Torvalds 已提交
283
need_resched:
284 285 286 287 288 289
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	jnz	restore_all
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
	jz	restore_all
	call	preempt_schedule_irq
	jmp	need_resched
290
END(resume_kernel)
L
Linus Torvalds 已提交
291 292
#endif

293 294 295 296 297
/*
 * SYSENTER_RETURN points to after the SYSENTER instruction
 * in the vsyscall page.  See vsyscall-sysentry.S, which defines
 * the symbol.
 */
L
Linus Torvalds 已提交
298

299
	# SYSENTER  call handler stub
300
ENTRY(entry_SYSENTER_32)
301
	movl	TSS_sysenter_sp0(%esp), %esp
L
Linus Torvalds 已提交
302
sysenter_past_esp:
303
	/*
304 305 306
	 * Interrupts are disabled here, but we can't trace it until
	 * enough kernel state to call TRACE_IRQS_OFF can be called - but
	 * we immediately enable interrupts at that point anyway.
307
	 */
308 309
	pushl	$__USER_DS
	pushl	%ebp
310
	pushfl
311 312
	orl	$X86_EFLAGS_IF, (%esp)
	pushl	$__USER_CS
313 314
	/*
	 * Push current_thread_info()->sysenter_return to the stack.
315 316 317 318 319
	 * A tiny bit of offset fixup is necessary: TI_sysenter_return
	 * is relative to thread_info, which is at the bottom of the
	 * kernel stack page.  4*4 means the 4 words pushed above;
	 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
	 * and THREAD_SIZE takes us to the bottom.
320
	 */
321
	pushl	((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
L
Linus Torvalds 已提交
322

323
	pushl	%eax
324 325 326
	SAVE_ALL
	ENABLE_INTERRUPTS(CLBR_NONE)

L
Linus Torvalds 已提交
327 328 329 330
/*
 * Load the potential sixth argument from user stack.
 * Careful about security.
 */
331 332
	cmpl	$__PAGE_OFFSET-3, %ebp
	jae	syscall_fault
333
	ASM_STAC
334
1:	movl	(%ebp), %ebp
335
	ASM_CLAC
336 337
	movl	%ebp, PT_EBP(%esp)
	_ASM_EXTABLE(1b, syscall_fault)
L
Linus Torvalds 已提交
338 339 340

	GET_THREAD_INFO(%ebp)

341 342
	testl	$_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
	jnz	sysenter_audit
R
Roland McGrath 已提交
343
sysenter_do_call:
344 345 346
	cmpl	$(NR_syscalls), %eax
	jae	sysenter_badsys
	call	*sys_call_table(, %eax, 4)
347
sysenter_after_call:
348
	movl	%eax, PT_EAX(%esp)
349
	LOCKDEP_SYS_EXIT
350
	DISABLE_INTERRUPTS(CLBR_ANY)
351
	TRACE_IRQS_OFF
352 353 354
	movl	TI_flags(%ebp), %ecx
	testl	$_TIF_ALLWORK_MASK, %ecx
	jnz	sysexit_audit
R
Roland McGrath 已提交
355
sysenter_exit:
L
Linus Torvalds 已提交
356
/* if something modifies registers it must also disable sysexit */
357 358 359
	movl	PT_EIP(%esp), %edx
	movl	PT_OLDESP(%esp), %ecx
	xorl	%ebp, %ebp
360
	TRACE_IRQS_ON
361
1:	mov	PT_FS(%esp), %fs
362
	PTGS_TO_GS
363
	ENABLE_INTERRUPTS_SYSEXIT
R
Roland McGrath 已提交
364 365 366

#ifdef CONFIG_AUDITSYSCALL
sysenter_audit:
367 368 369 370 371 372 373 374 375 376 377 378
	testl	$(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp)
	jnz	syscall_trace_entry
	/* movl	PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
	movl	PT_EBX(%esp), %edx		/* ebx/a0: 2nd arg to audit */
	/* movl	PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
	pushl	PT_ESI(%esp)			/* a3: 5th arg */
	pushl	PT_EDX+4(%esp)			/* a2: 4th arg */
	call	__audit_syscall_entry
	popl	%ecx				/* get that remapped edx off the stack */
	popl	%ecx				/* get that remapped esi off the stack */
	movl	PT_EAX(%esp), %eax		/* reload syscall number */
	jmp	sysenter_do_call
R
Roland McGrath 已提交
379 380

sysexit_audit:
381 382
	testl	$(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
	jnz	syscall_exit_work
R
Roland McGrath 已提交
383 384
	TRACE_IRQS_ON
	ENABLE_INTERRUPTS(CLBR_ANY)
385 386 387 388 389
	movl	%eax, %edx			/* second arg, syscall return value */
	cmpl	$-MAX_ERRNO, %eax		/* is it an error ? */
	setbe %al				/* 1 if so, 0 if not */
	movzbl %al, %eax			/* zero-extend that */
	call	__audit_syscall_exit
R
Roland McGrath 已提交
390 391
	DISABLE_INTERRUPTS(CLBR_ANY)
	TRACE_IRQS_OFF
392 393 394 395 396
	movl	TI_flags(%ebp), %ecx
	testl	$(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
	jnz	syscall_exit_work
	movl	PT_EAX(%esp), %eax		/* reload syscall return value */
	jmp	sysenter_exit
R
Roland McGrath 已提交
397 398
#endif

399 400 401
.pushsection .fixup, "ax"
2:	movl	$0, PT_FS(%esp)
	jmp	1b
402
.popsection
403
	_ASM_EXTABLE(1b, 2b)
404
	PTGS_TO_GS_EX
405
ENDPROC(entry_SYSENTER_32)
L
Linus Torvalds 已提交
406 407

	# system call handler stub
408
ENTRY(entry_INT80_32)
409
	ASM_CLAC
410
	pushl	%eax				# save orig_eax
L
Linus Torvalds 已提交
411 412
	SAVE_ALL
	GET_THREAD_INFO(%ebp)
413 414 415 416 417
						# system call tracing in operation / emulation
	testl	$_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
	jnz	syscall_trace_entry
	cmpl	$(NR_syscalls), %eax
	jae	syscall_badsys
L
Linus Torvalds 已提交
418
syscall_call:
419
	call	*sys_call_table(, %eax, 4)
420
syscall_after_call:
421
	movl	%eax, PT_EAX(%esp)		# store the return value
L
Linus Torvalds 已提交
422
syscall_exit:
423
	LOCKDEP_SYS_EXIT
424 425 426
	DISABLE_INTERRUPTS(CLBR_ANY)		# make sure we don't miss an interrupt
						# setting need_resched or sigpending
						# between sampling and the iret
427
	TRACE_IRQS_OFF
428 429 430
	movl	TI_flags(%ebp), %ecx
	testl	$_TIF_ALLWORK_MASK, %ecx	# current->work
	jnz	syscall_exit_work
L
Linus Torvalds 已提交
431 432

restore_all:
433 434
	TRACE_IRQS_IRET
restore_all_notrace:
435
#ifdef CONFIG_X86_ESPFIX32
436 437 438 439 440 441 442 443 444 445 446
	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
	/*
	 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
	 * are returning to the kernel.
	 * See comments in process.c:copy_thread() for details.
	 */
	movb	PT_OLDSS(%esp), %ah
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
	je ldt_ss				# returning to user-space with LDT SS
447
#endif
L
Linus Torvalds 已提交
448
restore_nocheck:
449
	RESTORE_REGS 4				# skip orig_eax/error_code
A
Adrian Bunk 已提交
450
irq_return:
I
Ingo Molnar 已提交
451
	INTERRUPT_RETURN
452 453 454 455 456
.section .fixup, "ax"
ENTRY(iret_exc	)
	pushl	$0				# no error code
	pushl	$do_iret_error
	jmp	error_code
L
Linus Torvalds 已提交
457
.previous
458
	_ASM_EXTABLE(irq_return, iret_exc)
L
Linus Torvalds 已提交
459

460
#ifdef CONFIG_X86_ESPFIX32
L
Linus Torvalds 已提交
461
ldt_ss:
462 463 464 465 466 467 468 469 470
#ifdef CONFIG_PARAVIRT
	/*
	 * The kernel can't run on a non-flat stack if paravirt mode
	 * is active.  Rather than try to fixup the high bits of
	 * ESP, bypass this code entirely.  This may break DOSemu
	 * and/or Wine support in a paravirt VM, although the option
	 * is still available to implement the setting of the high
	 * 16-bits in the INTERRUPT_RETURN paravirt-op.
	 */
471 472
	cmpl	$0, pv_info+PARAVIRT_enabled
	jne	restore_nocheck
473 474
#endif

475 476 477 478 479 480 481 482 483 484 485
/*
 * Setup and switch to ESPFIX stack
 *
 * We're returning to userspace with a 16 bit stack. The CPU will not
 * restore the high word of ESP for us on executing iret... This is an
 * "official" bug of all the x86-compatible CPUs, which we can work
 * around to make dosemu and wine happy. We do this by preloading the
 * high word of ESP with the high word of the userspace ESP while
 * compensating for the offset by changing to the ESPFIX segment with
 * a base address that matches for the difference.
 */
486
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
487 488 489
	mov	%esp, %edx			/* load kernel esp */
	mov	PT_OLDESP(%esp), %eax		/* load userspace esp */
	mov	%dx, %ax			/* eax: new kernel esp */
490 491
	sub	%eax, %edx			/* offset (low word is 0) */
	shr	$16, %edx
492 493 494 495 496 497
	mov	%dl, GDT_ESPFIX_SS + 4		/* bits 16..23 */
	mov	%dh, GDT_ESPFIX_SS + 7		/* bits 24..31 */
	pushl	$__ESPFIX_SS
	pushl	%eax				/* new kernel esp */
	/*
	 * Disable interrupts, but do not irqtrace this section: we
498
	 * will soon execute iret and the tracer was already set to
499 500
	 * the irqstate after the IRET:
	 */
501
	DISABLE_INTERRUPTS(CLBR_EAX)
502 503
	lss	(%esp), %esp			/* switch to espfix segment */
	jmp	restore_nocheck
504
#endif
505
ENDPROC(entry_INT80_32)
L
Linus Torvalds 已提交
506 507 508 509

	# perform work that needs to be done immediately before resumption
	ALIGN
work_pending:
510
	testb	$_TIF_NEED_RESCHED, %cl
511
	jz	work_notifysig
L
Linus Torvalds 已提交
512
work_resched:
513
	call	schedule
514
	LOCKDEP_SYS_EXIT
515 516 517
	DISABLE_INTERRUPTS(CLBR_ANY)		# make sure we don't miss an interrupt
						# setting need_resched or sigpending
						# between sampling and the iret
518
	TRACE_IRQS_OFF
519 520 521 522
	movl	TI_flags(%ebp), %ecx
	andl	$_TIF_WORK_MASK, %ecx		# is there any work to be done other
						# than syscall tracing?
	jz	restore_all
523
	testb	$_TIF_NEED_RESCHED, %cl
524
	jnz	work_resched
L
Linus Torvalds 已提交
525

526 527
work_notifysig:					# deal with pending signals and
						# notify-resume requests
528
#ifdef CONFIG_VM86
529 530 531 532
	testl	$X86_EFLAGS_VM, PT_EFLAGS(%esp)
	movl	%esp, %eax
	jnz	work_notifysig_v86		# returning to kernel-space or
						# vm86-space
533 534
1:
#else
535
	movl	%esp, %eax
536
#endif
537 538
	TRACE_IRQS_ON
	ENABLE_INTERRUPTS(CLBR_NONE)
539
	movb	PT_CS(%esp), %bl
540 541
	andb	$SEGMENT_RPL_MASK, %bl
	cmpb	$USER_RPL, %bl
542 543 544 545
	jb	resume_kernel
	xorl	%edx, %edx
	call	do_notify_resume
	jmp	resume_userspace
L
Linus Torvalds 已提交
546

547
#ifdef CONFIG_VM86
L
Linus Torvalds 已提交
548 549
	ALIGN
work_notifysig_v86:
550 551 552 553 554
	pushl	%ecx				# save ti_flags for do_notify_resume
	call	save_v86_state			# %eax contains pt_regs pointer
	popl	%ecx
	movl	%eax, %esp
	jmp	1b
555
#endif
556
END(work_pending)
L
Linus Torvalds 已提交
557 558 559 560

	# perform syscall exit tracing
	ALIGN
syscall_trace_entry:
561 562 563
	movl	$-ENOSYS, PT_EAX(%esp)
	movl	%esp, %eax
	call	syscall_trace_enter
564
	/* What it returned is what we'll actually use.  */
565 566 567
	cmpl	$(NR_syscalls), %eax
	jnae	syscall_call
	jmp	syscall_exit
568
END(syscall_trace_entry)
L
Linus Torvalds 已提交
569 570 571 572

	# perform syscall exit tracing
	ALIGN
syscall_exit_work:
573 574
	testl	$_TIF_WORK_SYSCALL_EXIT, %ecx
	jz	work_pending
575
	TRACE_IRQS_ON
576 577 578 579 580
	ENABLE_INTERRUPTS(CLBR_ANY)		# could let syscall_trace_leave() call
						# schedule() instead
	movl	%esp, %eax
	call	syscall_trace_leave
	jmp	resume_userspace
581
END(syscall_exit_work)
L
Linus Torvalds 已提交
582 583

syscall_fault:
584
	ASM_CLAC
L
Linus Torvalds 已提交
585
	GET_THREAD_INFO(%ebp)
586 587
	movl	$-EFAULT, PT_EAX(%esp)
	jmp	resume_userspace
588
END(syscall_fault)
L
Linus Torvalds 已提交
589 590

syscall_badsys:
591 592
	movl	$-ENOSYS, %eax
	jmp	syscall_after_call
593 594 595
END(syscall_badsys)

sysenter_badsys:
596 597
	movl	$-ENOSYS, %eax
	jmp	sysenter_after_call
598
END(sysenter_badsys)
L
Linus Torvalds 已提交
599

600
.macro FIXUP_ESPFIX_STACK
601 602 603 604 605 606 607
/*
 * Switch back for ESPFIX stack to the normal zerobased stack
 *
 * We can't call C functions using the ESPFIX stack. This code reads
 * the high word of the segment base from the GDT and swiches to the
 * normal stack and adjusts ESP with the matching offset.
 */
608
#ifdef CONFIG_X86_ESPFIX32
609
	/* fixup the stack */
610 611
	mov	GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
	mov	GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
612
	shl	$16, %eax
613 614 615 616
	addl	%esp, %eax			/* the adjusted stack pointer */
	pushl	$__KERNEL_DS
	pushl	%eax
	lss	(%esp), %esp			/* switch to the normal stack segment */
617
#endif
618 619
.endm
.macro UNWIND_ESPFIX_STACK
620
#ifdef CONFIG_X86_ESPFIX32
621
	movl	%ss, %eax
622
	/* see if on espfix stack */
623 624 625 626 627
	cmpw	$__ESPFIX_SS, %ax
	jne	27f
	movl	$__KERNEL_DS, %eax
	movl	%eax, %ds
	movl	%eax, %es
628 629 630
	/* switch to normal stack */
	FIXUP_ESPFIX_STACK
27:
631
#endif
632
.endm
L
Linus Torvalds 已提交
633 634

/*
635 636
 * Build the entry stubs with some assembler magic.
 * We pack 1 stub into every 8-byte block.
L
Linus Torvalds 已提交
637
 */
638
	.align 8
L
Linus Torvalds 已提交
639
ENTRY(irq_entries_start)
640 641
    vector=FIRST_EXTERNAL_VECTOR
    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
642
	pushl	$(~vector+0x80)			/* Note: always in signed byte range */
643 644 645 646
    vector=vector+1
	jmp	common_interrupt
	.align	8
    .endr
647 648
END(irq_entries_start)

649 650 651 652
/*
 * the CPU automatically disables interrupts when executing an IRQ vector,
 * so IRQ-flags tracing has to follow that:
 */
653
	.p2align CONFIG_X86_L1_CACHE_SHIFT
L
Linus Torvalds 已提交
654
common_interrupt:
655
	ASM_CLAC
656
	addl	$-0x80, (%esp)			/* Adjust vector into the [-256, -1] range */
L
Linus Torvalds 已提交
657
	SAVE_ALL
658
	TRACE_IRQS_OFF
659 660 661
	movl	%esp, %eax
	call	do_IRQ
	jmp	ret_from_intr
662
ENDPROC(common_interrupt)
L
Linus Torvalds 已提交
663

T
Tejun Heo 已提交
664
#define BUILD_INTERRUPT3(name, nr, fn)	\
L
Linus Torvalds 已提交
665
ENTRY(name)				\
666
	ASM_CLAC;			\
667
	pushl	$~(nr);			\
668
	SAVE_ALL;			\
669
	TRACE_IRQS_OFF			\
670 671 672
	movl	%esp, %eax;		\
	call	fn;			\
	jmp	ret_from_intr;		\
673
ENDPROC(name)
L
Linus Torvalds 已提交
674

675 676

#ifdef CONFIG_TRACING
677
# define TRACE_BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
678
#else
679
# define TRACE_BUILD_INTERRUPT(name, nr)
680 681
#endif

682 683
#define BUILD_INTERRUPT(name, nr)		\
	BUILD_INTERRUPT3(name, nr, smp_##name);	\
684
	TRACE_BUILD_INTERRUPT(name, nr)
T
Tejun Heo 已提交
685

L
Linus Torvalds 已提交
686
/* The include is where all of the SMP etc. interrupts come from */
687
#include <asm/entry_arch.h>
L
Linus Torvalds 已提交
688 689

ENTRY(coprocessor_error)
690
	ASM_CLAC
691 692 693
	pushl	$0
	pushl	$do_coprocessor_error
	jmp	error_code
694
END(coprocessor_error)
L
Linus Torvalds 已提交
695 696

ENTRY(simd_coprocessor_error)
697
	ASM_CLAC
698
	pushl	$0
699 700
#ifdef CONFIG_X86_INVD_BUG
	/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
701 702
	ALTERNATIVE "pushl	$do_general_protection",	\
		    "pushl	$do_simd_coprocessor_error",	\
703
		    X86_FEATURE_XMM
704
#else
705
	pushl	$do_simd_coprocessor_error
706
#endif
707
	jmp	error_code
708
END(simd_coprocessor_error)
L
Linus Torvalds 已提交
709 710

ENTRY(device_not_available)
711
	ASM_CLAC
712 713 714
	pushl	$-1				# mark this as an int
	pushl	$do_device_not_available
	jmp	error_code
715
END(device_not_available)
L
Linus Torvalds 已提交
716

717 718
#ifdef CONFIG_PARAVIRT
ENTRY(native_iret)
I
Ingo Molnar 已提交
719
	iret
720
	_ASM_EXTABLE(native_iret, iret_exc)
721
END(native_iret)
722

723
ENTRY(native_irq_enable_sysexit)
724 725
	sti
	sysexit
726
END(native_irq_enable_sysexit)
727 728
#endif

L
Linus Torvalds 已提交
729
ENTRY(overflow)
730
	ASM_CLAC
731 732 733
	pushl	$0
	pushl	$do_overflow
	jmp	error_code
734
END(overflow)
L
Linus Torvalds 已提交
735 736

ENTRY(bounds)
737
	ASM_CLAC
738 739 740
	pushl	$0
	pushl	$do_bounds
	jmp	error_code
741
END(bounds)
L
Linus Torvalds 已提交
742 743

ENTRY(invalid_op)
744
	ASM_CLAC
745 746 747
	pushl	$0
	pushl	$do_invalid_op
	jmp	error_code
748
END(invalid_op)
L
Linus Torvalds 已提交
749 750

ENTRY(coprocessor_segment_overrun)
751
	ASM_CLAC
752 753 754
	pushl	$0
	pushl	$do_coprocessor_segment_overrun
	jmp	error_code
755
END(coprocessor_segment_overrun)
L
Linus Torvalds 已提交
756 757

ENTRY(invalid_TSS)
758
	ASM_CLAC
759 760
	pushl	$do_invalid_TSS
	jmp	error_code
761
END(invalid_TSS)
L
Linus Torvalds 已提交
762 763

ENTRY(segment_not_present)
764
	ASM_CLAC
765 766
	pushl	$do_segment_not_present
	jmp	error_code
767
END(segment_not_present)
L
Linus Torvalds 已提交
768 769

ENTRY(stack_segment)
770
	ASM_CLAC
771 772
	pushl	$do_stack_segment
	jmp	error_code
773
END(stack_segment)
L
Linus Torvalds 已提交
774 775

ENTRY(alignment_check)
776
	ASM_CLAC
777 778
	pushl	$do_alignment_check
	jmp	error_code
779
END(alignment_check)
L
Linus Torvalds 已提交
780

781
ENTRY(divide_error)
782
	ASM_CLAC
783 784 785
	pushl	$0				# no error code
	pushl	$do_divide_error
	jmp	error_code
786
END(divide_error)
L
Linus Torvalds 已提交
787 788 789

#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
790
	ASM_CLAC
791 792 793
	pushl	$0
	pushl	machine_check_vector
	jmp	error_code
794
END(machine_check)
L
Linus Torvalds 已提交
795 796 797
#endif

ENTRY(spurious_interrupt_bug)
798
	ASM_CLAC
799 800 801
	pushl	$0
	pushl	$do_spurious_interrupt_bug
	jmp	error_code
802
END(spurious_interrupt_bug)
L
Linus Torvalds 已提交
803

804
#ifdef CONFIG_XEN
805 806 807 808
/*
 * Xen doesn't set %esp to be precisely what the normal SYSENTER
 * entry point expects, so fix it up before using the normal path.
 */
809
ENTRY(xen_sysenter_target)
810 811
	addl	$5*4, %esp			/* remove xen-provided frame */
	jmp	sysenter_past_esp
812

813
ENTRY(xen_hypervisor_callback)
814
	pushl	$-1				/* orig_ax = -1 => not a system call */
815 816
	SAVE_ALL
	TRACE_IRQS_OFF
817

818 819 820 821 822 823 824 825 826 827 828 829
	/*
	 * Check to see if we got the event in the critical
	 * region in xen_iret_direct, after we've reenabled
	 * events and checked for pending events.  This simulates
	 * iret instruction's behaviour where it delivers a
	 * pending interrupt when enabling interrupts:
	 */
	movl	PT_EIP(%esp), %eax
	cmpl	$xen_iret_start_crit, %eax
	jb	1f
	cmpl	$xen_iret_end_crit, %eax
	jae	1f
830

831
	jmp	xen_iret_crit_fixup
832 833

ENTRY(xen_do_upcall)
834 835
1:	mov	%esp, %eax
	call	xen_evtchn_do_upcall
836
#ifndef CONFIG_PREEMPT
837
	call	xen_maybe_preempt_hcall
838
#endif
839
	jmp	ret_from_intr
840 841
ENDPROC(xen_hypervisor_callback)

842 843 844 845 846 847 848 849 850 851 852 853
/*
 * Hypervisor uses this for application faults while it executes.
 * We get here for two reasons:
 *  1. Fault while reloading DS, ES, FS or GS
 *  2. Fault while executing IRET
 * Category 1 we fix up by reattempting the load, and zeroing the segment
 * register if the load fails.
 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
 * normal Linux return path in this case because if we use the IRET hypercall
 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 * We distinguish between categories by maintaining a status value in EAX.
 */
854
ENTRY(xen_failsafe_callback)
855 856 857 858 859 860
	pushl	%eax
	movl	$1, %eax
1:	mov	4(%esp), %ds
2:	mov	8(%esp), %es
3:	mov	12(%esp), %fs
4:	mov	16(%esp), %gs
861 862
	/* EAX == 0 => Category 1 (Bad segment)
	   EAX != 0 => Category 2 (Bad IRET) */
863 864 865 866 867 868
	testl	%eax, %eax
	popl	%eax
	lea	16(%esp), %esp
	jz	5f
	jmp	iret_exc
5:	pushl	$-1				/* orig_ax = -1 => not a system call */
869
	SAVE_ALL
870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
	jmp	ret_from_exception

.section .fixup, "ax"
6:	xorl	%eax, %eax
	movl	%eax, 4(%esp)
	jmp	1b
7:	xorl	%eax, %eax
	movl	%eax, 8(%esp)
	jmp	2b
8:	xorl	%eax, %eax
	movl	%eax, 12(%esp)
	jmp	3b
9:	xorl	%eax, %eax
	movl	%eax, 16(%esp)
	jmp	4b
885
.previous
886 887 888 889
	_ASM_EXTABLE(1b, 6b)
	_ASM_EXTABLE(2b, 7b)
	_ASM_EXTABLE(3b, 8b)
	_ASM_EXTABLE(4b, 9b)
890 891
ENDPROC(xen_failsafe_callback)

892
BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
893 894
		xen_evtchn_do_upcall)

895
#endif /* CONFIG_XEN */
896 897 898 899 900 901 902

#if IS_ENABLED(CONFIG_HYPERV)

BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
	hyperv_vector_handler)

#endif /* CONFIG_HYPERV */
903

904
#ifdef CONFIG_FUNCTION_TRACER
905 906 907 908 909 910 911
#ifdef CONFIG_DYNAMIC_FTRACE

ENTRY(mcount)
	ret
END(mcount)

ENTRY(ftrace_caller)
912 913 914 915 916 917 918 919
	pushl	%eax
	pushl	%ecx
	pushl	%edx
	pushl	$0				/* Pass NULL as regs pointer */
	movl	4*4(%esp), %eax
	movl	0x4(%ebp), %edx
	movl	function_trace_op, %ecx
	subl	$MCOUNT_INSN_SIZE, %eax
920 921 922

.globl ftrace_call
ftrace_call:
923
	call	ftrace_stub
924

925 926 927 928
	addl	$4, %esp			/* skip NULL pointer */
	popl	%edx
	popl	%ecx
	popl	%eax
929
ftrace_ret:
930 931 932
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
933
	jmp	ftrace_stub
934
#endif
935 936 937 938 939 940

.globl ftrace_stub
ftrace_stub:
	ret
END(ftrace_caller)

941 942 943 944 945 946 947 948 949 950
ENTRY(ftrace_regs_caller)
	pushf	/* push flags before compare (in cs location) */

	/*
	 * i386 does not save SS and ESP when coming from kernel.
	 * Instead, to get sp, &regs->sp is used (see ptrace.h).
	 * Unfortunately, that means eflags must be at the same location
	 * as the current return ip is. We move the return ip into the
	 * ip location, and move flags into the return ip location.
	 */
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
	pushl	4(%esp)				/* save return ip into ip slot */

	pushl	$0				/* Load 0 into orig_ax */
	pushl	%gs
	pushl	%fs
	pushl	%es
	pushl	%ds
	pushl	%eax
	pushl	%ebp
	pushl	%edi
	pushl	%esi
	pushl	%edx
	pushl	%ecx
	pushl	%ebx

	movl	13*4(%esp), %eax		/* Get the saved flags */
	movl	%eax, 14*4(%esp)		/* Move saved flags into regs->flags location */
						/* clobbering return ip */
	movl	$__KERNEL_CS, 13*4(%esp)

	movl	12*4(%esp), %eax		/* Load ip (1st parameter) */
	subl	$MCOUNT_INSN_SIZE, %eax		/* Adjust ip */
	movl	0x4(%ebp), %edx			/* Load parent ip (2nd parameter) */
	movl	function_trace_op, %ecx		/* Save ftrace_pos in 3rd parameter */
	pushl	%esp				/* Save pt_regs as 4th parameter */
976 977

GLOBAL(ftrace_regs_call)
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
	call	ftrace_stub

	addl	$4, %esp			/* Skip pt_regs */
	movl	14*4(%esp), %eax		/* Move flags back into cs */
	movl	%eax, 13*4(%esp)		/* Needed to keep addl	from modifying flags */
	movl	12*4(%esp), %eax		/* Get return ip from regs->ip */
	movl	%eax, 14*4(%esp)		/* Put return ip back for ret */

	popl	%ebx
	popl	%ecx
	popl	%edx
	popl	%esi
	popl	%edi
	popl	%ebp
	popl	%eax
	popl	%ds
	popl	%es
	popl	%fs
	popl	%gs
	addl	$8, %esp			/* Skip orig_ax and ip */
	popf					/* Pop flags at end (no addl to corrupt flags) */
	jmp	ftrace_ret
1000 1001

	popf
1002
	jmp	ftrace_stub
1003 1004
#else /* ! CONFIG_DYNAMIC_FTRACE */

1005
ENTRY(mcount)
1006 1007
	cmpl	$__PAGE_OFFSET, %esp
	jb	ftrace_stub			/* Paging not enabled yet? */
1008

1009 1010
	cmpl	$ftrace_stub, ftrace_trace_function
	jnz	trace
1011
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1012 1013
	cmpl	$ftrace_stub, ftrace_graph_return
	jnz	ftrace_graph_caller
1014

1015 1016
	cmpl	$ftrace_graph_entry_stub, ftrace_graph_entry
	jnz	ftrace_graph_caller
1017
#endif
1018 1019 1020 1021 1022 1023
.globl ftrace_stub
ftrace_stub:
	ret

	/* taken from glibc */
trace:
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
	pushl	%eax
	pushl	%ecx
	pushl	%edx
	movl	0xc(%esp), %eax
	movl	0x4(%ebp), %edx
	subl	$MCOUNT_INSN_SIZE, %eax

	call	*ftrace_trace_function

	popl	%edx
	popl	%ecx
	popl	%eax
	jmp	ftrace_stub
1037
END(mcount)
1038
#endif /* CONFIG_DYNAMIC_FTRACE */
1039
#endif /* CONFIG_FUNCTION_TRACER */
1040

1041 1042
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	pushl	%eax
	pushl	%ecx
	pushl	%edx
	movl	0xc(%esp), %eax
	lea	0x4(%ebp), %edx
	movl	(%ebp), %ecx
	subl	$MCOUNT_INSN_SIZE, %eax
	call	prepare_ftrace_return
	popl	%edx
	popl	%ecx
	popl	%eax
1054
	ret
1055
END(ftrace_graph_caller)
1056 1057 1058

.globl return_to_handler
return_to_handler:
1059 1060 1061 1062 1063 1064 1065 1066
	pushl	%eax
	pushl	%edx
	movl	%ebp, %eax
	call	ftrace_return_to_handler
	movl	%eax, %ecx
	popl	%edx
	popl	%eax
	jmp	*%ecx
1067
#endif
1068

1069 1070 1071
#ifdef CONFIG_TRACING
ENTRY(trace_page_fault)
	ASM_CLAC
1072 1073
	pushl	$trace_do_page_fault
	jmp	error_code
1074 1075 1076
END(trace_page_fault)
#endif

1077
ENTRY(page_fault)
1078
	ASM_CLAC
1079
	pushl	$do_page_fault
1080 1081
	ALIGN
error_code:
1082
	/* the function address is in %gs's slot on the stack */
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
	pushl	%fs
	pushl	%es
	pushl	%ds
	pushl	%eax
	pushl	%ebp
	pushl	%edi
	pushl	%esi
	pushl	%edx
	pushl	%ecx
	pushl	%ebx
1093
	cld
1094 1095
	movl	$(__KERNEL_PERCPU), %ecx
	movl	%ecx, %fs
1096
	UNWIND_ESPFIX_STACK
1097
	GS_TO_REG %ecx
1098 1099 1100
	movl	PT_GS(%esp), %edi		# get the function address
	movl	PT_ORIG_EAX(%esp), %edx		# get the error code
	movl	$-1, PT_ORIG_EAX(%esp)		# no syscall to restart
1101 1102
	REG_TO_PTGS %ecx
	SET_KERNEL_GS %ecx
1103 1104 1105
	movl	$(__USER_DS), %ecx
	movl	%ecx, %ds
	movl	%ecx, %es
1106
	TRACE_IRQS_OFF
1107 1108 1109
	movl	%esp, %eax			# pt_regs pointer
	call	*%edi
	jmp	ret_from_exception
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
END(page_fault)

/*
 * Debug traps and NMI can happen at the one SYSENTER instruction
 * that sets up the real kernel stack. Check here, since we can't
 * allow the wrong stack to be used.
 *
 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
 * already pushed 3 words if it hits on the sysenter instruction:
 * eflags, cs and eip.
 *
 * We just load the right stack, and push the three (known) values
 * by hand onto the new stack - while updating the return eip past
 * the instruction that would have done it for sysenter.
 */
1125
.macro FIX_STACK offset ok label
1126 1127
	cmpw	$__KERNEL_CS, 4(%esp)
	jne	\ok
1128
\label:
1129
	movl	TSS_sysenter_sp0 + \offset(%esp), %esp
1130
	pushfl
1131 1132
	pushl	$__KERNEL_CS
	pushl	$sysenter_past_esp
1133
.endm
1134 1135

ENTRY(debug)
1136
	ASM_CLAC
1137 1138
	cmpl	$entry_SYSENTER_32, (%esp)
	jne	debug_stack_correct
1139
	FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1140
debug_stack_correct:
1141
	pushl	$-1				# mark this as an int
1142 1143
	SAVE_ALL
	TRACE_IRQS_OFF
1144 1145 1146 1147
	xorl	%edx, %edx			# error code 0
	movl	%esp, %eax			# pt_regs pointer
	call	do_debug
	jmp	ret_from_exception
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
END(debug)

/*
 * NMI is doubly nasty. It can happen _while_ we're handling
 * a debug fault, and the debug fault hasn't yet been able to
 * clear up the stack. So we first check whether we got  an
 * NMI on the sysenter entry path, but after that we need to
 * check whether we got an NMI on the debug path where the debug
 * fault happened on the sysenter path.
 */
ENTRY(nmi)
1159
	ASM_CLAC
1160
#ifdef CONFIG_X86_ESPFIX32
1161 1162 1163 1164 1165
	pushl	%eax
	movl	%ss, %eax
	cmpw	$__ESPFIX_SS, %ax
	popl	%eax
	je	nmi_espfix_stack
1166
#endif
1167 1168 1169 1170 1171 1172
	cmpl	$entry_SYSENTER_32, (%esp)
	je	nmi_stack_fixup
	pushl	%eax
	movl	%esp, %eax
	/*
	 * Do not access memory above the end of our stack page,
1173 1174
	 * it might not exist.
	 */
1175 1176 1177 1178 1179 1180
	andl	$(THREAD_SIZE-1), %eax
	cmpl	$(THREAD_SIZE-20), %eax
	popl	%eax
	jae	nmi_stack_correct
	cmpl	$entry_SYSENTER_32, 12(%esp)
	je	nmi_debug_stack_check
1181
nmi_stack_correct:
1182
	pushl	%eax
1183
	SAVE_ALL
1184 1185 1186 1187
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
	call	do_nmi
	jmp	restore_all_notrace
1188 1189

nmi_stack_fixup:
1190
	FIX_STACK 12, nmi_stack_correct, 1
1191
	jmp	nmi_stack_correct
1192 1193

nmi_debug_stack_check:
1194 1195 1196 1197 1198 1199
	cmpw	$__KERNEL_CS, 16(%esp)
	jne	nmi_stack_correct
	cmpl	$debug, (%esp)
	jb	nmi_stack_correct
	cmpl	$debug_esp_fix_insn, (%esp)
	ja	nmi_stack_correct
1200
	FIX_STACK 24, nmi_stack_correct, 1
1201
	jmp	nmi_stack_correct
1202

1203
#ifdef CONFIG_X86_ESPFIX32
1204
nmi_espfix_stack:
1205
	/*
1206 1207
	 * create the pointer to lss back
	 */
1208 1209 1210
	pushl	%ss
	pushl	%esp
	addl	$4, (%esp)
1211 1212
	/* copy the iret frame of 12 bytes */
	.rept 3
1213
	pushl	16(%esp)
1214
	.endr
1215
	pushl	%eax
1216
	SAVE_ALL
1217 1218 1219
	FIXUP_ESPFIX_STACK			# %eax == %esp
	xorl	%edx, %edx			# zero error code
	call	do_nmi
1220
	RESTORE_REGS
1221 1222
	lss	12+4(%esp), %esp		# back to espfix stack
	jmp	irq_return
1223
#endif
1224 1225 1226
END(nmi)

ENTRY(int3)
1227
	ASM_CLAC
1228
	pushl	$-1				# mark this as an int
1229 1230
	SAVE_ALL
	TRACE_IRQS_OFF
1231 1232 1233 1234
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
	call	do_int3
	jmp	ret_from_exception
1235 1236 1237
END(int3)

ENTRY(general_protection)
1238 1239
	pushl	$do_general_protection
	jmp	error_code
1240 1241
END(general_protection)

G
Gleb Natapov 已提交
1242 1243
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
1244
	ASM_CLAC
1245 1246
	pushl	$do_async_page_fault
	jmp	error_code
1247
END(async_page_fault)
G
Gleb Natapov 已提交
1248
#endif