entry_32.S 25.7 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  Copyright (C) 1991,1992  Linus Torvalds
L
Linus Torvalds 已提交
3
 *
4
 * entry_32.S contains the system-call and low-level fault and trap handling routines.
L
Linus Torvalds 已提交
5
 *
6
 * Stack layout while running C code:
7 8 9
 *	ptrace needs to have all registers on the stack.
 *	If the order here is changed, it needs to be
 *	updated in fork.c:copy_process(), signal.c:do_signal(),
L
Linus Torvalds 已提交
10 11 12 13 14
 *	ptrace.c and ptrace.h
 *
 *	 0(%esp) - %ebx
 *	 4(%esp) - %ecx
 *	 8(%esp) - %edx
15
 *	 C(%esp) - %esi
L
Linus Torvalds 已提交
16 17 18 19 20
 *	10(%esp) - %edi
 *	14(%esp) - %ebp
 *	18(%esp) - %eax
 *	1C(%esp) - %ds
 *	20(%esp) - %es
21
 *	24(%esp) - %fs
22 23 24 25 26 27 28
 *	28(%esp) - %gs		saved iff !CONFIG_X86_32_LAZY_GS
 *	2C(%esp) - orig_eax
 *	30(%esp) - %eip
 *	34(%esp) - %cs
 *	38(%esp) - %eflags
 *	3C(%esp) - %oldesp
 *	40(%esp) - %oldss
L
Linus Torvalds 已提交
29 30 31
 */

#include <linux/linkage.h>
32
#include <linux/err.h>
L
Linus Torvalds 已提交
33
#include <asm/thread_info.h>
34
#include <asm/irqflags.h>
L
Linus Torvalds 已提交
35 36 37
#include <asm/errno.h>
#include <asm/segment.h>
#include <asm/smp.h>
S
Stas Sergeev 已提交
38
#include <asm/percpu.h>
39
#include <asm/processor-flags.h>
40
#include <asm/irq_vectors.h>
41
#include <asm/cpufeatures.h>
42
#include <asm/alternative-asm.h>
43
#include <asm/asm.h>
44
#include <asm/smap.h>
45
#include <asm/frame.h>
L
Linus Torvalds 已提交
46

J
Jiri Olsa 已提交
47 48
	.section .entry.text, "ax"

49 50 51 52 53
/*
 * We use macros for low-level operations which need to be overridden
 * for paravirtualization.  The following will never clobber any registers:
 *   INTERRUPT_RETURN (aka. "iret")
 *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
54
 *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
55 56 57 58 59 60 61
 *
 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
 * Allowing a register to be clobbered can shrink the paravirt replacement
 * enough to patch inline, increasing performance.
 */

L
Linus Torvalds 已提交
62
#ifdef CONFIG_PREEMPT
63
# define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
L
Linus Torvalds 已提交
64
#else
65 66
# define preempt_stop(clobbers)
# define resume_kernel		restore_all
L
Linus Torvalds 已提交
67 68
#endif

69 70
.macro TRACE_IRQS_IRET
#ifdef CONFIG_TRACE_IRQFLAGS
71 72
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)     # interrupts off?
	jz	1f
73 74 75 76 77
	TRACE_IRQS_ON
1:
#endif
.endm

78 79 80 81 82 83 84 85 86 87 88 89 90
/*
 * User gs save/restore
 *
 * %gs is used for userland TLS and kernel only uses it for stack
 * canary which is required to be at %gs:20 by gcc.  Read the comment
 * at the top of stackprotector.h for more info.
 *
 * Local labels 98 and 99 are used.
 */
#ifdef CONFIG_X86_32_LAZY_GS

 /* unfortunately push/pop can't be no-op */
.macro PUSH_GS
91
	pushl	$0
92 93
.endm
.macro POP_GS pop=0
94
	addl	$(4 + \pop), %esp
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
.endm
.macro POP_GS_EX
.endm

 /* all the rest are no-op */
.macro PTGS_TO_GS
.endm
.macro PTGS_TO_GS_EX
.endm
.macro GS_TO_REG reg
.endm
.macro REG_TO_PTGS reg
.endm
.macro SET_KERNEL_GS reg
.endm

#else	/* CONFIG_X86_32_LAZY_GS */

.macro PUSH_GS
114
	pushl	%gs
115 116 117
.endm

.macro POP_GS pop=0
118
98:	popl	%gs
119
  .if \pop <> 0
120
	add	$\pop, %esp
121 122 123 124
  .endif
.endm
.macro POP_GS_EX
.pushsection .fixup, "ax"
125 126
99:	movl	$0, (%esp)
	jmp	98b
127
.popsection
128
	_ASM_EXTABLE(98b, 99b)
129 130 131
.endm

.macro PTGS_TO_GS
132
98:	mov	PT_GS(%esp), %gs
133 134 135
.endm
.macro PTGS_TO_GS_EX
.pushsection .fixup, "ax"
136 137
99:	movl	$0, PT_GS(%esp)
	jmp	98b
138
.popsection
139
	_ASM_EXTABLE(98b, 99b)
140 141 142
.endm

.macro GS_TO_REG reg
143
	movl	%gs, \reg
144 145
.endm
.macro REG_TO_PTGS reg
146
	movl	\reg, PT_GS(%esp)
147 148
.endm
.macro SET_KERNEL_GS reg
149 150
	movl	$(__KERNEL_STACK_CANARY), \reg
	movl	\reg, %gs
151 152
.endm

153
#endif /* CONFIG_X86_32_LAZY_GS */
154

155
.macro SAVE_ALL pt_regs_ax=%eax
156
	cld
157
	PUSH_GS
158 159 160
	pushl	%fs
	pushl	%es
	pushl	%ds
161
	pushl	\pt_regs_ax
162 163 164 165 166 167 168 169 170 171 172
	pushl	%ebp
	pushl	%edi
	pushl	%esi
	pushl	%edx
	pushl	%ecx
	pushl	%ebx
	movl	$(__USER_DS), %edx
	movl	%edx, %ds
	movl	%edx, %es
	movl	$(__KERNEL_PERCPU), %edx
	movl	%edx, %fs
173
	SET_KERNEL_GS %edx
174
.endm
L
Linus Torvalds 已提交
175

176 177 178
/*
 * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
 * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
179
 * is just clearing the MSB, which makes it an invalid stack address and is also
180 181 182 183 184 185 186 187
 * a signal to the unwinder that it's a pt_regs pointer in disguise.
 *
 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
 * original rbp.
 */
.macro ENCODE_FRAME_POINTER
#ifdef CONFIG_FRAME_POINTER
	mov %esp, %ebp
188
	andl $0x7fffffff, %ebp
189 190 191
#endif
.endm

192
.macro RESTORE_INT_REGS
193 194 195 196 197 198 199
	popl	%ebx
	popl	%ecx
	popl	%edx
	popl	%esi
	popl	%edi
	popl	%ebp
	popl	%eax
200
.endm
L
Linus Torvalds 已提交
201

202
.macro RESTORE_REGS pop=0
203
	RESTORE_INT_REGS
204 205 206
1:	popl	%ds
2:	popl	%es
3:	popl	%fs
207
	POP_GS \pop
208
.pushsection .fixup, "ax"
209 210 211 212 213 214
4:	movl	$0, (%esp)
	jmp	1b
5:	movl	$0, (%esp)
	jmp	2b
6:	movl	$0, (%esp)
	jmp	3b
215
.popsection
216 217 218
	_ASM_EXTABLE(1b, 4b)
	_ASM_EXTABLE(2b, 5b)
	_ASM_EXTABLE(3b, 6b)
219
	POP_GS_EX
220
.endm
L
Linus Torvalds 已提交
221

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
/*
 * %eax: prev task
 * %edx: next task
 */
ENTRY(__switch_to_asm)
	/*
	 * Save callee-saved registers
	 * This must match the order in struct inactive_task_frame
	 */
	pushl	%ebp
	pushl	%ebx
	pushl	%edi
	pushl	%esi

	/* switch stack */
	movl	%esp, TASK_threadsp(%eax)
	movl	TASK_threadsp(%edx), %esp

#ifdef CONFIG_CC_STACKPROTECTOR
	movl	TASK_stack_canary(%edx), %ebx
	movl	%ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
#endif

	/* restore callee-saved registers */
	popl	%esi
	popl	%edi
	popl	%ebx
	popl	%ebp

	jmp	__switch_to
END(__switch_to_asm)

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
/*
 * The unwinder expects the last frame on the stack to always be at the same
 * offset from the end of the page, which allows it to validate the stack.
 * Calling schedule_tail() directly would break that convention because its an
 * asmlinkage function so its argument has to be pushed on the stack.  This
 * wrapper creates a proper "end of stack" frame header before the call.
 */
ENTRY(schedule_tail_wrapper)
	FRAME_BEGIN

	pushl	%eax
	call	schedule_tail
	popl	%eax

	FRAME_END
	ret
ENDPROC(schedule_tail_wrapper)
271 272 273 274
/*
 * A newly forked process directly context switches into this address.
 *
 * eax: prev task we switched from
275 276
 * ebx: kernel thread func (NULL for user thread)
 * edi: kernel thread arg
277
 */
L
Linus Torvalds 已提交
278
ENTRY(ret_from_fork)
279
	call	schedule_tail_wrapper
280

281 282 283 284
	testl	%ebx, %ebx
	jnz	1f		/* kernel threads are uncommon */

2:
285
	/* When we fork, we trace the syscall return in the child, too. */
286
	movl    %esp, %eax
287 288 289
	call    syscall_return_slowpath
	jmp     restore_all

290 291 292
	/* kernel thread */
1:	movl	%edi, %eax
	call	*%ebx
293
	/*
294 295 296
	 * A kernel thread is allowed to return here after successfully
	 * calling do_execve().  Exit to userspace to complete the execve()
	 * syscall.
297
	 */
298 299 300
	movl	$0, PT_EAX(%esp)
	jmp	2b
END(ret_from_fork)
301

L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309 310 311
/*
 * Return to user mode is not as complex as all this looks,
 * but we want the default path for a system call return to
 * go as quickly as possible which is why some of this is
 * less clear than it otherwise should be.
 */

	# userspace resumption stub bypassing syscall exit tracing
	ALIGN
ret_from_exception:
312
	preempt_stop(CLBR_ANY)
L
Linus Torvalds 已提交
313
ret_from_intr:
314
#ifdef CONFIG_VM86
315 316 317
	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS and CS
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
318 319
#else
	/*
320
	 * We can be coming here from child spawned by kernel_thread().
321
	 */
322 323
	movl	PT_CS(%esp), %eax
	andl	$SEGMENT_RPL_MASK, %eax
324
#endif
325 326
	cmpl	$USER_RPL, %eax
	jb	resume_kernel			# not returning to v8086 or userspace
327

L
Linus Torvalds 已提交
328
ENTRY(resume_userspace)
329
	DISABLE_INTERRUPTS(CLBR_ANY)
330
	TRACE_IRQS_OFF
331 332
	movl	%esp, %eax
	call	prepare_exit_to_usermode
333
	jmp	restore_all
334
END(ret_from_exception)
L
Linus Torvalds 已提交
335 336 337

#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
338
	DISABLE_INTERRUPTS(CLBR_ANY)
339
.Lneed_resched:
340 341 342 343 344
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	jnz	restore_all
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
	jz	restore_all
	call	preempt_schedule_irq
345
	jmp	.Lneed_resched
346
END(resume_kernel)
L
Linus Torvalds 已提交
347 348
#endif

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
GLOBAL(__begin_SYSENTER_singlestep_region)
/*
 * All code from here through __end_SYSENTER_singlestep_region is subject
 * to being single-stepped if a user program sets TF and executes SYSENTER.
 * There is absolutely nothing that we can do to prevent this from happening
 * (thanks Intel!).  To keep our handling of this situation as simple as
 * possible, we handle TF just like AC and NT, except that our #DB handler
 * will ignore all of the single-step traps generated in this range.
 */

#ifdef CONFIG_XEN
/*
 * Xen doesn't set %esp to be precisely what the normal SYSENTER
 * entry point expects, so fix it up before using the normal path.
 */
ENTRY(xen_sysenter_target)
	addl	$5*4, %esp			/* remove xen-provided frame */
366
	jmp	.Lsysenter_past_esp
367 368
#endif

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
/*
 * 32-bit SYSENTER entry.
 *
 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
 * if X86_FEATURE_SEP is available.  This is the preferred system call
 * entry on 32-bit systems.
 *
 * The SYSENTER instruction, in principle, should *only* occur in the
 * vDSO.  In practice, a small number of Android devices were shipped
 * with a copy of Bionic that inlined a SYSENTER instruction.  This
 * never happened in any of Google's Bionic versions -- it only happened
 * in a narrow range of Intel-provided versions.
 *
 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
 * SYSENTER does not save anything on the stack,
 * and does not save old EIP (!!!), ESP, or EFLAGS.
 *
 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
 * user and/or vm86 state), we explicitly disable the SYSENTER
 * instruction in vm86 mode by reprogramming the MSRs.
 *
 * Arguments:
 * eax  system call number
 * ebx  arg1
 * ecx  arg2
 * edx  arg3
 * esi  arg4
 * edi  arg5
 * ebp  user stack
 * 0(%ebp) arg6
 */
401
ENTRY(entry_SYSENTER_32)
402
	movl	TSS_sysenter_sp0(%esp), %esp
403
.Lsysenter_past_esp:
404
	pushl	$__USER_DS		/* pt_regs->ss */
405
	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
406 407 408 409 410 411 412
	pushfl				/* pt_regs->flags (except IF = 0) */
	orl	$X86_EFLAGS_IF, (%esp)	/* Fix IF */
	pushl	$__USER_CS		/* pt_regs->cs */
	pushl	$0			/* pt_regs->ip = 0 (placeholder) */
	pushl	%eax			/* pt_regs->orig_ax */
	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */

413
	/*
414 415
	 * SYSENTER doesn't filter flags, so we need to clear NT, AC
	 * and TF ourselves.  To save a few cycles, we can check whether
416 417 418 419
	 * either was set instead of doing an unconditional popfq.
	 * This needs to happen before enabling interrupts so that
	 * we don't get preempted with NT set.
	 *
420 421 422 423 424 425
	 * If TF is set, we will single-step all the way to here -- do_debug
	 * will ignore all the traps.  (Yes, this is slow, but so is
	 * single-stepping in general.  This allows us to avoid having
	 * a more complicated code to handle the case where a user program
	 * forces us to single-step through the SYSENTER entry code.)
	 *
426 427 428 429 430 431
	 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
	 * out-of-line as an optimization: NT is unlikely to be set in the
	 * majority of the cases and instead of polluting the I$ unnecessarily,
	 * we're keeping that code behind a branch which will predict as
	 * not-taken and therefore its instructions won't be fetched.
	 */
432
	testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
433 434 435
	jnz	.Lsysenter_fix_flags
.Lsysenter_flags_fixed:

436
	/*
437 438
	 * User mode is traced as though IRQs are on, and SYSENTER
	 * turned them off.
439
	 */
440
	TRACE_IRQS_OFF
441 442 443

	movl	%esp, %eax
	call	do_fast_syscall_32
444 445 446
	/* XEN PV guests always use IRET path */
	ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
		    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
447 448 449 450 451

/* Opportunistic SYSEXIT */
	TRACE_IRQS_ON			/* User mode traces as IRQs on. */
	movl	PT_EIP(%esp), %edx	/* pt_regs->ip */
	movl	PT_OLDESP(%esp), %ecx	/* pt_regs->sp */
452 453
1:	mov	PT_FS(%esp), %fs
	PTGS_TO_GS
454 455 456 457 458 459 460
	popl	%ebx			/* pt_regs->bx */
	addl	$2*4, %esp		/* skip pt_regs->cx and pt_regs->dx */
	popl	%esi			/* pt_regs->si */
	popl	%edi			/* pt_regs->di */
	popl	%ebp			/* pt_regs->bp */
	popl	%eax			/* pt_regs->ax */

461 462 463 464 465 466 467 468 469
	/*
	 * Restore all flags except IF. (We restore IF separately because
	 * STI gives a one-instruction window in which we won't be interrupted,
	 * whereas POPF does not.)
	 */
	addl	$PT_EFLAGS-PT_DS, %esp	/* point esp at pt_regs->flags */
	btr	$X86_EFLAGS_IF_BIT, (%esp)
	popfl

470 471 472 473
	/*
	 * Return back to the vDSO, which will pop ecx and edx.
	 * Don't bother with DS and ES (they already contain __USER_DS).
	 */
474 475
	sti
	sysexit
R
Roland McGrath 已提交
476

477 478 479
.pushsection .fixup, "ax"
2:	movl	$0, PT_FS(%esp)
	jmp	1b
480
.popsection
481
	_ASM_EXTABLE(1b, 2b)
482
	PTGS_TO_GS_EX
483 484 485 486 487

.Lsysenter_fix_flags:
	pushl	$X86_EFLAGS_FIXED
	popfl
	jmp	.Lsysenter_flags_fixed
488
GLOBAL(__end_SYSENTER_singlestep_region)
489
ENDPROC(entry_SYSENTER_32)
L
Linus Torvalds 已提交
490

491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
/*
 * 32-bit legacy system call entry.
 *
 * 32-bit x86 Linux system calls traditionally used the INT $0x80
 * instruction.  INT $0x80 lands here.
 *
 * This entry point can be used by any 32-bit perform system calls.
 * Instances of INT $0x80 can be found inline in various programs and
 * libraries.  It is also used by the vDSO's __kernel_vsyscall
 * fallback for hardware that doesn't support a faster entry method.
 * Restarted 32-bit system calls also fall back to INT $0x80
 * regardless of what instruction was originally used to do the system
 * call.  (64-bit programs can use INT $0x80 as well, but they can
 * only run on 64-bit kernels and therefore land in
 * entry_INT80_compat.)
 *
 * This is considered a slow path.  It is not used by most libc
 * implementations on modern hardware except during process startup.
 *
 * Arguments:
 * eax  system call number
 * ebx  arg1
 * ecx  arg2
 * edx  arg3
 * esi  arg4
 * edi  arg5
 * ebp  arg6
 */
519
ENTRY(entry_INT80_32)
520
	ASM_CLAC
521
	pushl	%eax			/* pt_regs->orig_ax */
522
	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */
523 524

	/*
525 526
	 * User mode is traced as though IRQs are on, and the interrupt gate
	 * turned them off.
527
	 */
528
	TRACE_IRQS_OFF
529 530

	movl	%esp, %eax
531
	call	do_int80_syscall_32
532
.Lsyscall_32_done:
L
Linus Torvalds 已提交
533 534

restore_all:
535
	TRACE_IRQS_IRET
536
.Lrestore_all_notrace:
537
#ifdef CONFIG_X86_ESPFIX32
538
	ALTERNATIVE	"jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
539

540 541 542 543 544 545 546 547 548 549
	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
	/*
	 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
	 * are returning to the kernel.
	 * See comments in process.c:copy_thread() for details.
	 */
	movb	PT_OLDSS(%esp), %ah
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
550
	je .Lldt_ss				# returning to user-space with LDT SS
551
#endif
552
.Lrestore_nocheck:
553
	RESTORE_REGS 4				# skip orig_eax/error_code
554
.Lirq_return:
I
Ingo Molnar 已提交
555
	INTERRUPT_RETURN
556

557 558 559 560
.section .fixup, "ax"
ENTRY(iret_exc	)
	pushl	$0				# no error code
	pushl	$do_iret_error
561
	jmp	common_exception
L
Linus Torvalds 已提交
562
.previous
563
	_ASM_EXTABLE(.Lirq_return, iret_exc)
L
Linus Torvalds 已提交
564

565
#ifdef CONFIG_X86_ESPFIX32
566
.Lldt_ss:
567 568 569 570 571 572 573 574 575 576 577
/*
 * Setup and switch to ESPFIX stack
 *
 * We're returning to userspace with a 16 bit stack. The CPU will not
 * restore the high word of ESP for us on executing iret... This is an
 * "official" bug of all the x86-compatible CPUs, which we can work
 * around to make dosemu and wine happy. We do this by preloading the
 * high word of ESP with the high word of the userspace ESP while
 * compensating for the offset by changing to the ESPFIX segment with
 * a base address that matches for the difference.
 */
578
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
579 580 581
	mov	%esp, %edx			/* load kernel esp */
	mov	PT_OLDESP(%esp), %eax		/* load userspace esp */
	mov	%dx, %ax			/* eax: new kernel esp */
582 583
	sub	%eax, %edx			/* offset (low word is 0) */
	shr	$16, %edx
584 585 586 587 588 589
	mov	%dl, GDT_ESPFIX_SS + 4		/* bits 16..23 */
	mov	%dh, GDT_ESPFIX_SS + 7		/* bits 24..31 */
	pushl	$__ESPFIX_SS
	pushl	%eax				/* new kernel esp */
	/*
	 * Disable interrupts, but do not irqtrace this section: we
590
	 * will soon execute iret and the tracer was already set to
591 592
	 * the irqstate after the IRET:
	 */
593
	DISABLE_INTERRUPTS(CLBR_ANY)
594
	lss	(%esp), %esp			/* switch to espfix segment */
595
	jmp	.Lrestore_nocheck
596
#endif
597
ENDPROC(entry_INT80_32)
L
Linus Torvalds 已提交
598

599
.macro FIXUP_ESPFIX_STACK
600 601 602 603 604 605 606
/*
 * Switch back for ESPFIX stack to the normal zerobased stack
 *
 * We can't call C functions using the ESPFIX stack. This code reads
 * the high word of the segment base from the GDT and swiches to the
 * normal stack and adjusts ESP with the matching offset.
 */
607
#ifdef CONFIG_X86_ESPFIX32
608
	/* fixup the stack */
609 610
	mov	GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
	mov	GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
611
	shl	$16, %eax
612 613 614 615
	addl	%esp, %eax			/* the adjusted stack pointer */
	pushl	$__KERNEL_DS
	pushl	%eax
	lss	(%esp), %esp			/* switch to the normal stack segment */
616
#endif
617 618
.endm
.macro UNWIND_ESPFIX_STACK
619
#ifdef CONFIG_X86_ESPFIX32
620
	movl	%ss, %eax
621
	/* see if on espfix stack */
622 623 624 625 626
	cmpw	$__ESPFIX_SS, %ax
	jne	27f
	movl	$__KERNEL_DS, %eax
	movl	%eax, %ds
	movl	%eax, %es
627 628 629
	/* switch to normal stack */
	FIXUP_ESPFIX_STACK
27:
630
#endif
631
.endm
L
Linus Torvalds 已提交
632 633

/*
634 635
 * Build the entry stubs with some assembler magic.
 * We pack 1 stub into every 8-byte block.
L
Linus Torvalds 已提交
636
 */
637
	.align 8
L
Linus Torvalds 已提交
638
ENTRY(irq_entries_start)
639 640
    vector=FIRST_EXTERNAL_VECTOR
    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
641
	pushl	$(~vector+0x80)			/* Note: always in signed byte range */
642 643 644 645
    vector=vector+1
	jmp	common_interrupt
	.align	8
    .endr
646 647
END(irq_entries_start)

648 649 650 651
/*
 * the CPU automatically disables interrupts when executing an IRQ vector,
 * so IRQ-flags tracing has to follow that:
 */
652
	.p2align CONFIG_X86_L1_CACHE_SHIFT
L
Linus Torvalds 已提交
653
common_interrupt:
654
	ASM_CLAC
655
	addl	$-0x80, (%esp)			/* Adjust vector into the [-256, -1] range */
L
Linus Torvalds 已提交
656
	SAVE_ALL
657
	ENCODE_FRAME_POINTER
658
	TRACE_IRQS_OFF
659 660 661
	movl	%esp, %eax
	call	do_IRQ
	jmp	ret_from_intr
662
ENDPROC(common_interrupt)
L
Linus Torvalds 已提交
663

T
Tejun Heo 已提交
664
#define BUILD_INTERRUPT3(name, nr, fn)	\
L
Linus Torvalds 已提交
665
ENTRY(name)				\
666
	ASM_CLAC;			\
667
	pushl	$~(nr);			\
668
	SAVE_ALL;			\
669
	ENCODE_FRAME_POINTER;		\
670
	TRACE_IRQS_OFF			\
671 672 673
	movl	%esp, %eax;		\
	call	fn;			\
	jmp	ret_from_intr;		\
674
ENDPROC(name)
L
Linus Torvalds 已提交
675

676 677
#define BUILD_INTERRUPT(name, nr)		\
	BUILD_INTERRUPT3(name, nr, smp_##name);	\
T
Tejun Heo 已提交
678

L
Linus Torvalds 已提交
679
/* The include is where all of the SMP etc. interrupts come from */
680
#include <asm/entry_arch.h>
L
Linus Torvalds 已提交
681 682

ENTRY(coprocessor_error)
683
	ASM_CLAC
684 685
	pushl	$0
	pushl	$do_coprocessor_error
686
	jmp	common_exception
687
END(coprocessor_error)
L
Linus Torvalds 已提交
688 689

ENTRY(simd_coprocessor_error)
690
	ASM_CLAC
691
	pushl	$0
692 693
#ifdef CONFIG_X86_INVD_BUG
	/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
694 695
	ALTERNATIVE "pushl	$do_general_protection",	\
		    "pushl	$do_simd_coprocessor_error",	\
696
		    X86_FEATURE_XMM
697
#else
698
	pushl	$do_simd_coprocessor_error
699
#endif
700
	jmp	common_exception
701
END(simd_coprocessor_error)
L
Linus Torvalds 已提交
702 703

ENTRY(device_not_available)
704
	ASM_CLAC
705 706
	pushl	$-1				# mark this as an int
	pushl	$do_device_not_available
707
	jmp	common_exception
708
END(device_not_available)
L
Linus Torvalds 已提交
709

710 711
#ifdef CONFIG_PARAVIRT
ENTRY(native_iret)
I
Ingo Molnar 已提交
712
	iret
713
	_ASM_EXTABLE(native_iret, iret_exc)
714
END(native_iret)
715 716
#endif

L
Linus Torvalds 已提交
717
ENTRY(overflow)
718
	ASM_CLAC
719 720
	pushl	$0
	pushl	$do_overflow
721
	jmp	common_exception
722
END(overflow)
L
Linus Torvalds 已提交
723 724

ENTRY(bounds)
725
	ASM_CLAC
726 727
	pushl	$0
	pushl	$do_bounds
728
	jmp	common_exception
729
END(bounds)
L
Linus Torvalds 已提交
730 731

ENTRY(invalid_op)
732
	ASM_CLAC
733 734
	pushl	$0
	pushl	$do_invalid_op
735
	jmp	common_exception
736
END(invalid_op)
L
Linus Torvalds 已提交
737 738

ENTRY(coprocessor_segment_overrun)
739
	ASM_CLAC
740 741
	pushl	$0
	pushl	$do_coprocessor_segment_overrun
742
	jmp	common_exception
743
END(coprocessor_segment_overrun)
L
Linus Torvalds 已提交
744 745

ENTRY(invalid_TSS)
746
	ASM_CLAC
747
	pushl	$do_invalid_TSS
748
	jmp	common_exception
749
END(invalid_TSS)
L
Linus Torvalds 已提交
750 751

ENTRY(segment_not_present)
752
	ASM_CLAC
753
	pushl	$do_segment_not_present
754
	jmp	common_exception
755
END(segment_not_present)
L
Linus Torvalds 已提交
756 757

ENTRY(stack_segment)
758
	ASM_CLAC
759
	pushl	$do_stack_segment
760
	jmp	common_exception
761
END(stack_segment)
L
Linus Torvalds 已提交
762 763

ENTRY(alignment_check)
764
	ASM_CLAC
765
	pushl	$do_alignment_check
766
	jmp	common_exception
767
END(alignment_check)
L
Linus Torvalds 已提交
768

769
ENTRY(divide_error)
770
	ASM_CLAC
771 772
	pushl	$0				# no error code
	pushl	$do_divide_error
773
	jmp	common_exception
774
END(divide_error)
L
Linus Torvalds 已提交
775 776 777

#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
778
	ASM_CLAC
779 780
	pushl	$0
	pushl	machine_check_vector
781
	jmp	common_exception
782
END(machine_check)
L
Linus Torvalds 已提交
783 784 785
#endif

ENTRY(spurious_interrupt_bug)
786
	ASM_CLAC
787 788
	pushl	$0
	pushl	$do_spurious_interrupt_bug
789
	jmp	common_exception
790
END(spurious_interrupt_bug)
L
Linus Torvalds 已提交
791

792 793
#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
794
	pushl	$-1				/* orig_ax = -1 => not a system call */
795
	SAVE_ALL
796
	ENCODE_FRAME_POINTER
797
	TRACE_IRQS_OFF
798

799 800 801 802 803 804 805 806 807 808 809 810
	/*
	 * Check to see if we got the event in the critical
	 * region in xen_iret_direct, after we've reenabled
	 * events and checked for pending events.  This simulates
	 * iret instruction's behaviour where it delivers a
	 * pending interrupt when enabling interrupts:
	 */
	movl	PT_EIP(%esp), %eax
	cmpl	$xen_iret_start_crit, %eax
	jb	1f
	cmpl	$xen_iret_end_crit, %eax
	jae	1f
811

812
	jmp	xen_iret_crit_fixup
813 814

ENTRY(xen_do_upcall)
815 816
1:	mov	%esp, %eax
	call	xen_evtchn_do_upcall
817
#ifndef CONFIG_PREEMPT
818
	call	xen_maybe_preempt_hcall
819
#endif
820
	jmp	ret_from_intr
821 822
ENDPROC(xen_hypervisor_callback)

823 824 825 826 827 828 829 830 831 832 833 834
/*
 * Hypervisor uses this for application faults while it executes.
 * We get here for two reasons:
 *  1. Fault while reloading DS, ES, FS or GS
 *  2. Fault while executing IRET
 * Category 1 we fix up by reattempting the load, and zeroing the segment
 * register if the load fails.
 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
 * normal Linux return path in this case because if we use the IRET hypercall
 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 * We distinguish between categories by maintaining a status value in EAX.
 */
835
ENTRY(xen_failsafe_callback)
836 837 838 839 840 841
	pushl	%eax
	movl	$1, %eax
1:	mov	4(%esp), %ds
2:	mov	8(%esp), %es
3:	mov	12(%esp), %fs
4:	mov	16(%esp), %gs
842 843
	/* EAX == 0 => Category 1 (Bad segment)
	   EAX != 0 => Category 2 (Bad IRET) */
844 845 846 847 848 849
	testl	%eax, %eax
	popl	%eax
	lea	16(%esp), %esp
	jz	5f
	jmp	iret_exc
5:	pushl	$-1				/* orig_ax = -1 => not a system call */
850
	SAVE_ALL
851
	ENCODE_FRAME_POINTER
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
	jmp	ret_from_exception

.section .fixup, "ax"
6:	xorl	%eax, %eax
	movl	%eax, 4(%esp)
	jmp	1b
7:	xorl	%eax, %eax
	movl	%eax, 8(%esp)
	jmp	2b
8:	xorl	%eax, %eax
	movl	%eax, 12(%esp)
	jmp	3b
9:	xorl	%eax, %eax
	movl	%eax, 16(%esp)
	jmp	4b
867
.previous
868 869 870 871
	_ASM_EXTABLE(1b, 6b)
	_ASM_EXTABLE(2b, 7b)
	_ASM_EXTABLE(3b, 8b)
	_ASM_EXTABLE(4b, 9b)
872 873
ENDPROC(xen_failsafe_callback)

874
BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
875
		 xen_evtchn_do_upcall)
876

877
#endif /* CONFIG_XEN */
878 879 880 881

#if IS_ENABLED(CONFIG_HYPERV)

BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
882
		 hyperv_vector_handler)
883 884

#endif /* CONFIG_HYPERV */
885

886
ENTRY(page_fault)
887
	ASM_CLAC
888
	pushl	$do_page_fault
889
	ALIGN
890 891 892 893
	jmp common_exception
END(page_fault)

common_exception:
894
	/* the function address is in %gs's slot on the stack */
895 896 897 898 899 900 901 902 903 904
	pushl	%fs
	pushl	%es
	pushl	%ds
	pushl	%eax
	pushl	%ebp
	pushl	%edi
	pushl	%esi
	pushl	%edx
	pushl	%ecx
	pushl	%ebx
905
	ENCODE_FRAME_POINTER
906
	cld
907 908
	movl	$(__KERNEL_PERCPU), %ecx
	movl	%ecx, %fs
909
	UNWIND_ESPFIX_STACK
910
	GS_TO_REG %ecx
911 912 913
	movl	PT_GS(%esp), %edi		# get the function address
	movl	PT_ORIG_EAX(%esp), %edx		# get the error code
	movl	$-1, PT_ORIG_EAX(%esp)		# no syscall to restart
914 915
	REG_TO_PTGS %ecx
	SET_KERNEL_GS %ecx
916 917 918
	movl	$(__USER_DS), %ecx
	movl	%ecx, %ds
	movl	%ecx, %es
919
	TRACE_IRQS_OFF
920 921 922
	movl	%esp, %eax			# pt_regs pointer
	call	*%edi
	jmp	ret_from_exception
923
END(common_exception)
924 925

ENTRY(debug)
926 927 928 929 930 931 932 933 934
	/*
	 * #DB can happen at the first instruction of
	 * entry_SYSENTER_32 or in Xen's SYSENTER prologue.  If this
	 * happens, then we will be running on a very small stack.  We
	 * need to detect this condition and switch to the thread
	 * stack before calling any C code at all.
	 *
	 * If you edit this code, keep in mind that NMIs can happen in here.
	 */
935
	ASM_CLAC
936
	pushl	$-1				# mark this as an int
937
	SAVE_ALL
938
	ENCODE_FRAME_POINTER
939 940
	xorl	%edx, %edx			# error code 0
	movl	%esp, %eax			# pt_regs pointer
941 942 943 944 945 946 947 948 949 950 951 952 953

	/* Are we currently on the SYSENTER stack? */
	PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
	subl	%eax, %ecx	/* ecx = (end of SYSENTER_stack) - esp */
	cmpl	$SIZEOF_SYSENTER_stack, %ecx
	jb	.Ldebug_from_sysenter_stack

	TRACE_IRQS_OFF
	call	do_debug
	jmp	ret_from_exception

.Ldebug_from_sysenter_stack:
	/* We're on the SYSENTER stack.  Switch off. */
954
	movl	%esp, %ebx
955 956
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	TRACE_IRQS_OFF
957
	call	do_debug
958
	movl	%ebx, %esp
959
	jmp	ret_from_exception
960 961 962
END(debug)

/*
963 964 965 966 967
 * NMI is doubly nasty.  It can happen on the first instruction of
 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
 * switched stacks.  We handle both conditions by simply checking whether we
 * interrupted kernel code running on the SYSENTER stack.
968 969
 */
ENTRY(nmi)
970
	ASM_CLAC
971
#ifdef CONFIG_X86_ESPFIX32
972 973 974 975
	pushl	%eax
	movl	%ss, %eax
	cmpw	$__ESPFIX_SS, %ax
	popl	%eax
976
	je	.Lnmi_espfix_stack
977
#endif
978 979

	pushl	%eax				# pt_regs->orig_ax
980
	SAVE_ALL
981
	ENCODE_FRAME_POINTER
982 983
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
984 985 986 987 988 989 990 991

	/* Are we currently on the SYSENTER stack? */
	PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
	subl	%eax, %ecx	/* ecx = (end of SYSENTER_stack) - esp */
	cmpl	$SIZEOF_SYSENTER_stack, %ecx
	jb	.Lnmi_from_sysenter_stack

	/* Not on SYSENTER stack. */
992
	call	do_nmi
993
	jmp	.Lrestore_all_notrace
994

995 996 997 998 999
.Lnmi_from_sysenter_stack:
	/*
	 * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
	 * is using the thread stack right now, so it's safe for us to use it.
	 */
1000
	movl	%esp, %ebx
1001 1002
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	call	do_nmi
1003
	movl	%ebx, %esp
1004
	jmp	.Lrestore_all_notrace
1005

1006
#ifdef CONFIG_X86_ESPFIX32
1007
.Lnmi_espfix_stack:
1008
	/*
1009 1010
	 * create the pointer to lss back
	 */
1011 1012 1013
	pushl	%ss
	pushl	%esp
	addl	$4, (%esp)
1014 1015
	/* copy the iret frame of 12 bytes */
	.rept 3
1016
	pushl	16(%esp)
1017
	.endr
1018
	pushl	%eax
1019
	SAVE_ALL
1020
	ENCODE_FRAME_POINTER
1021 1022 1023
	FIXUP_ESPFIX_STACK			# %eax == %esp
	xorl	%edx, %edx			# zero error code
	call	do_nmi
1024
	RESTORE_REGS
1025
	lss	12+4(%esp), %esp		# back to espfix stack
1026
	jmp	.Lirq_return
1027
#endif
1028 1029 1030
END(nmi)

ENTRY(int3)
1031
	ASM_CLAC
1032
	pushl	$-1				# mark this as an int
1033
	SAVE_ALL
1034
	ENCODE_FRAME_POINTER
1035
	TRACE_IRQS_OFF
1036 1037 1038 1039
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
	call	do_int3
	jmp	ret_from_exception
1040 1041 1042
END(int3)

ENTRY(general_protection)
1043
	pushl	$do_general_protection
1044
	jmp	common_exception
1045 1046
END(general_protection)

G
Gleb Natapov 已提交
1047 1048
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
1049
	ASM_CLAC
1050
	pushl	$do_async_page_fault
1051
	jmp	common_exception
1052
END(async_page_fault)
G
Gleb Natapov 已提交
1053
#endif
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064

ENTRY(rewind_stack_do_exit)
	/* Prevent any naive code from trying to unwind to our caller. */
	xorl	%ebp, %ebp

	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esi
	leal	-TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp

	call	do_exit
1:	jmp 1b
END(rewind_stack_do_exit)