提交 4d732138 编写于 作者: I Ingo Molnar

x86/asm/entry/64: Clean up entry_64.S

Make the 64-bit syscall entry code a bit more readable:

 - use consistent assembly coding style similar to the other entry_*.S files

 - remove old comments that are not true anymore

 - eliminate whitespace noise

 - use consistent vertical spacing

 - fix various comments

 - reorganize entry point generation tables to be more readable

No code changed:

  # arch/x86/entry/entry_64.o:

   text    data     bss     dec     hex filename
  12282       0       0   12282    2ffa entry_64.o.before
  12282       0       0   12282    2ffa entry_64.o.after

md5:
   cbab1f2d727a2a8a87618eeb79f391b7  entry_64.o.before.asm
   cbab1f2d727a2a8a87618eeb79f391b7  entry_64.o.after.asm

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 9dda1658
...@@ -4,26 +4,20 @@ ...@@ -4,26 +4,20 @@
* Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
* Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
*/ *
/*
* entry.S contains the system-call and fault low-level handling routines. * entry.S contains the system-call and fault low-level handling routines.
* *
* Some of this is documented in Documentation/x86/entry_64.txt * Some of this is documented in Documentation/x86/entry_64.txt
* *
* NOTE: This code handles signal-recognition, which happens every time
* after an interrupt and after each system call.
*
* A note on terminology: * A note on terminology:
* - iret frame: Architecture defined interrupt frame from SS to RIP * - iret frame: Architecture defined interrupt frame from SS to RIP
* at the top of the kernel process stack. * at the top of the kernel process stack.
* *
* Some macro usage: * Some macro usage:
* - ENTRY/END Define functions in the symbol table. * - ENTRY/END: Define functions in the symbol table.
* - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. * - TRACE_IRQ_*: Trace hardirq state for lock debugging.
* - idtentry - Define exception entry points. * - idtentry: Define exception entry points.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/cache.h> #include <asm/cache.h>
...@@ -50,9 +44,8 @@ ...@@ -50,9 +44,8 @@
#define __AUDIT_ARCH_64BIT 0x80000000 #define __AUDIT_ARCH_64BIT 0x80000000
#define __AUDIT_ARCH_LE 0x40000000 #define __AUDIT_ARCH_LE 0x40000000
.code64 .code64
.section .entry.text, "ax" .section .entry.text, "ax"
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
ENTRY(native_usergs_sysret64) ENTRY(native_usergs_sysret64)
...@@ -61,10 +54,9 @@ ENTRY(native_usergs_sysret64) ...@@ -61,10 +54,9 @@ ENTRY(native_usergs_sysret64)
ENDPROC(native_usergs_sysret64) ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
.macro TRACE_IRQS_IRETQ .macro TRACE_IRQS_IRETQ
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
bt $9,EFLAGS(%rsp) /* interrupts off? */ bt $9, EFLAGS(%rsp) /* interrupts off? */
jnc 1f jnc 1f
TRACE_IRQS_ON TRACE_IRQS_ON
1: 1:
...@@ -97,7 +89,7 @@ ENDPROC(native_usergs_sysret64) ...@@ -97,7 +89,7 @@ ENDPROC(native_usergs_sysret64)
.endm .endm
.macro TRACE_IRQS_IRETQ_DEBUG .macro TRACE_IRQS_IRETQ_DEBUG
bt $9,EFLAGS(%rsp) /* interrupts off? */ bt $9, EFLAGS(%rsp) /* interrupts off? */
jnc 1f jnc 1f
TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON_DEBUG
1: 1:
...@@ -110,9 +102,9 @@ ENDPROC(native_usergs_sysret64) ...@@ -110,9 +102,9 @@ ENDPROC(native_usergs_sysret64)
#endif #endif
/* /*
* 64bit SYSCALL instruction entry. Up to 6 arguments in registers. * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
* *
* 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
* then loads new ss, cs, and rip from previously programmed MSRs. * then loads new ss, cs, and rip from previously programmed MSRs.
* rflags gets masked by a value from another MSR (so CLD and CLAC * rflags gets masked by a value from another MSR (so CLD and CLAC
* are not needed). SYSCALL does not save anything on the stack * are not needed). SYSCALL does not save anything on the stack
...@@ -128,7 +120,7 @@ ENDPROC(native_usergs_sysret64) ...@@ -128,7 +120,7 @@ ENDPROC(native_usergs_sysret64)
* r10 arg3 (needs to be moved to rcx to conform to C ABI) * r10 arg3 (needs to be moved to rcx to conform to C ABI)
* r8 arg4 * r8 arg4
* r9 arg5 * r9 arg5
* (note: r12-r15,rbp,rbx are callee-preserved in C ABI) * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
* *
* Only called from user space. * Only called from user space.
* *
...@@ -151,8 +143,8 @@ ENTRY(entry_SYSCALL_64) ...@@ -151,8 +143,8 @@ ENTRY(entry_SYSCALL_64)
*/ */
GLOBAL(entry_SYSCALL_64_after_swapgs) GLOBAL(entry_SYSCALL_64_after_swapgs)
movq %rsp,PER_CPU_VAR(rsp_scratch) movq %rsp, PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
/* Construct struct pt_regs on stack */ /* Construct struct pt_regs on stack */
pushq $__USER_DS /* pt_regs->ss */ pushq $__USER_DS /* pt_regs->ss */
...@@ -178,21 +170,21 @@ GLOBAL(entry_SYSCALL_64_after_swapgs) ...@@ -178,21 +170,21 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
pushq %r9 /* pt_regs->r9 */ pushq %r9 /* pt_regs->r9 */
pushq %r10 /* pt_regs->r10 */ pushq %r10 /* pt_regs->r10 */
pushq %r11 /* pt_regs->r11 */ pushq %r11 /* pt_regs->r11 */
sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz tracesys jnz tracesys
entry_SYSCALL_64_fastpath: entry_SYSCALL_64_fastpath:
#if __SYSCALL_MASK == ~0 #if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax cmpq $__NR_syscall_max, %rax
#else #else
andl $__SYSCALL_MASK,%eax andl $__SYSCALL_MASK, %eax
cmpl $__NR_syscall_max,%eax cmpl $__NR_syscall_max, %eax
#endif #endif
ja 1f /* return -ENOSYS (already in pt_regs->ax) */ ja 1f /* return -ENOSYS (already in pt_regs->ax) */
movq %r10,%rcx movq %r10, %rcx
call *sys_call_table(,%rax,8) call *sys_call_table(, %rax, 8)
movq %rax,RAX(%rsp) movq %rax, RAX(%rsp)
1: 1:
/* /*
* Syscall return path ending with SYSRET (fast path). * Syscall return path ending with SYSRET (fast path).
...@@ -217,11 +209,11 @@ entry_SYSCALL_64_fastpath: ...@@ -217,11 +209,11 @@ entry_SYSCALL_64_fastpath:
jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
RESTORE_C_REGS_EXCEPT_RCX_R11 RESTORE_C_REGS_EXCEPT_RCX_R11
movq RIP(%rsp),%rcx movq RIP(%rsp), %rcx
movq EFLAGS(%rsp),%r11 movq EFLAGS(%rsp), %r11
movq RSP(%rsp),%rsp movq RSP(%rsp), %rsp
/* /*
* 64bit SYSRET restores rip from rcx, * 64-bit SYSRET restores rip from rcx,
* rflags from r11 (but RF and VM bits are forced to 0), * rflags from r11 (but RF and VM bits are forced to 0),
* cs and ss are loaded from MSRs. * cs and ss are loaded from MSRs.
* Restoration of rflags re-enables interrupts. * Restoration of rflags re-enables interrupts.
...@@ -252,7 +244,7 @@ tracesys_phase2: ...@@ -252,7 +244,7 @@ tracesys_phase2:
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
movq %rsp, %rdi movq %rsp, %rdi
movl $AUDIT_ARCH_X86_64, %esi movl $AUDIT_ARCH_X86_64, %esi
movq %rax,%rdx movq %rax, %rdx
call syscall_trace_enter_phase2 call syscall_trace_enter_phase2
/* /*
...@@ -263,15 +255,15 @@ tracesys_phase2: ...@@ -263,15 +255,15 @@ tracesys_phase2:
RESTORE_C_REGS_EXCEPT_RAX RESTORE_C_REGS_EXCEPT_RAX
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
#if __SYSCALL_MASK == ~0 #if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax cmpq $__NR_syscall_max, %rax
#else #else
andl $__SYSCALL_MASK,%eax andl $__SYSCALL_MASK, %eax
cmpl $__NR_syscall_max,%eax cmpl $__NR_syscall_max, %eax
#endif #endif
ja 1f /* return -ENOSYS (already in pt_regs->ax) */ ja 1f /* return -ENOSYS (already in pt_regs->ax) */
movq %r10,%rcx /* fixup for C */ movq %r10, %rcx /* fixup for C */
call *sys_call_table(,%rax,8) call *sys_call_table(, %rax, 8)
movq %rax,RAX(%rsp) movq %rax, RAX(%rsp)
1: 1:
/* Use IRET because user could have changed pt_regs->foo */ /* Use IRET because user could have changed pt_regs->foo */
...@@ -283,22 +275,24 @@ GLOBAL(int_ret_from_sys_call) ...@@ -283,22 +275,24 @@ GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */ int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl $_TIF_ALLWORK_MASK,%edi movl $_TIF_ALLWORK_MASK, %edi
/* edi: mask to check */ /* edi: mask to check */
GLOBAL(int_with_check) GLOBAL(int_with_check)
LOCKDEP_SYS_EXIT_IRQ LOCKDEP_SYS_EXIT_IRQ
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
movl TI_flags(%rcx),%edx movl TI_flags(%rcx), %edx
andl %edi,%edx andl %edi, %edx
jnz int_careful jnz int_careful
andl $~TS_COMPAT,TI_status(%rcx) andl $~TS_COMPAT, TI_status(%rcx)
jmp syscall_return jmp syscall_return
/* Either reschedule or signal or syscall exit tracking needed. */ /*
/* First do a reschedule test. */ * Either reschedule or signal or syscall exit tracking needed.
/* edx: work, edi: workmask */ * First do a reschedule test.
* edx: work, edi: workmask
*/
int_careful: int_careful:
bt $TIF_NEED_RESCHED,%edx bt $TIF_NEED_RESCHED, %edx
jnc int_very_careful jnc int_very_careful
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
...@@ -315,22 +309,22 @@ int_very_careful: ...@@ -315,22 +309,22 @@ int_very_careful:
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
/* Check for syscall exit trace */ /* Check for syscall exit trace */
testl $_TIF_WORK_SYSCALL_EXIT,%edx testl $_TIF_WORK_SYSCALL_EXIT, %edx
jz int_signal jz int_signal
pushq %rdi pushq %rdi
leaq 8(%rsp),%rdi # &ptregs -> arg1 leaq 8(%rsp), %rdi /* &ptregs -> arg1 */
call syscall_trace_leave call syscall_trace_leave
popq %rdi popq %rdi
andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU), %edi
jmp int_restore_rest jmp int_restore_rest
int_signal: int_signal:
testl $_TIF_DO_NOTIFY_MASK,%edx testl $_TIF_DO_NOTIFY_MASK, %edx
jz 1f jz 1f
movq %rsp,%rdi # &ptregs -> arg1 movq %rsp, %rdi /* &ptregs -> arg1 */
xorl %esi,%esi # oldset -> arg2 xorl %esi, %esi /* oldset -> arg2 */
call do_notify_resume call do_notify_resume
1: movl $_TIF_WORK_MASK,%edi 1: movl $_TIF_WORK_MASK, %edi
int_restore_rest: int_restore_rest:
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
...@@ -346,9 +340,9 @@ syscall_return: ...@@ -346,9 +340,9 @@ syscall_return:
* Try to use SYSRET instead of IRET if we're returning to * Try to use SYSRET instead of IRET if we're returning to
* a completely clean 64-bit userspace context. * a completely clean 64-bit userspace context.
*/ */
movq RCX(%rsp),%rcx movq RCX(%rsp), %rcx
movq RIP(%rsp),%r11 movq RIP(%rsp), %r11
cmpq %rcx,%r11 /* RCX == RIP */ cmpq %rcx, %r11 /* RCX == RIP */
jne opportunistic_sysret_failed jne opportunistic_sysret_failed
/* /*
...@@ -362,18 +356,20 @@ syscall_return: ...@@ -362,18 +356,20 @@ syscall_return:
.ifne __VIRTUAL_MASK_SHIFT - 47 .ifne __VIRTUAL_MASK_SHIFT - 47
.error "virtual address width changed -- SYSRET checks need update" .error "virtual address width changed -- SYSRET checks need update"
.endif .endif
/* Change top 16 bits to be the sign-extension of 47th bit */ /* Change top 16 bits to be the sign-extension of 47th bit */
shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
/* If this changed %rcx, it was not canonical */ /* If this changed %rcx, it was not canonical */
cmpq %rcx, %r11 cmpq %rcx, %r11
jne opportunistic_sysret_failed jne opportunistic_sysret_failed
cmpq $__USER_CS,CS(%rsp) /* CS must match SYSRET */ cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
jne opportunistic_sysret_failed jne opportunistic_sysret_failed
movq R11(%rsp),%r11 movq R11(%rsp), %r11
cmpq %r11,EFLAGS(%rsp) /* R11 == RFLAGS */ cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */
jne opportunistic_sysret_failed jne opportunistic_sysret_failed
/* /*
...@@ -383,7 +379,7 @@ syscall_return: ...@@ -383,7 +379,7 @@ syscall_return:
* with register state that satisfies the opportunistic SYSRET * with register state that satisfies the opportunistic SYSRET
* conditions. For example, single-stepping this user code: * conditions. For example, single-stepping this user code:
* *
* movq $stuck_here,%rcx * movq $stuck_here, %rcx
* pushfq * pushfq
* popq %r11 * popq %r11
* stuck_here: * stuck_here:
...@@ -395,7 +391,7 @@ syscall_return: ...@@ -395,7 +391,7 @@ syscall_return:
/* nothing to check for RSP */ /* nothing to check for RSP */
cmpq $__USER_DS,SS(%rsp) /* SS must match SYSRET */ cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
jne opportunistic_sysret_failed jne opportunistic_sysret_failed
/* /*
...@@ -405,7 +401,7 @@ syscall_return: ...@@ -405,7 +401,7 @@ syscall_return:
syscall_return_via_sysret: syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */ /* rcx and r11 are already restored (see code above) */
RESTORE_C_REGS_EXCEPT_RCX_R11 RESTORE_C_REGS_EXCEPT_RCX_R11
movq RSP(%rsp),%rsp movq RSP(%rsp), %rsp
USERGS_SYSRET64 USERGS_SYSRET64
opportunistic_sysret_failed: opportunistic_sysret_failed:
...@@ -436,7 +432,7 @@ return_from_execve: ...@@ -436,7 +432,7 @@ return_from_execve:
/* must use IRET code path (pt_regs->cs may have changed) */ /* must use IRET code path (pt_regs->cs may have changed) */
addq $8, %rsp addq $8, %rsp
ZERO_EXTRA_REGS ZERO_EXTRA_REGS
movq %rax,RAX(%rsp) movq %rax, RAX(%rsp)
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
END(stub_execve) END(stub_execve)
/* /*
...@@ -483,7 +479,7 @@ ENTRY(stub_rt_sigreturn) ...@@ -483,7 +479,7 @@ ENTRY(stub_rt_sigreturn)
return_from_stub: return_from_stub:
addq $8, %rsp addq $8, %rsp
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
movq %rax,RAX(%rsp) movq %rax, RAX(%rsp)
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
END(stub_rt_sigreturn) END(stub_rt_sigreturn)
...@@ -502,16 +498,16 @@ END(stub_x32_rt_sigreturn) ...@@ -502,16 +498,16 @@ END(stub_x32_rt_sigreturn)
*/ */
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
LOCK ; btr $TIF_FORK,TI_flags(%r8) LOCK ; btr $TIF_FORK, TI_flags(%r8)
pushq $0x0002 pushq $0x0002
popfq # reset kernel eflags popfq /* reset kernel eflags */
call schedule_tail # rdi: 'prev' task parameter call schedule_tail /* rdi: 'prev' task parameter */
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
testb $3, CS(%rsp) # from kernel_thread? testb $3, CS(%rsp) /* from kernel_thread? */
/* /*
* By the time we get here, we have no idea whether our pt_regs, * By the time we get here, we have no idea whether our pt_regs,
...@@ -522,8 +518,10 @@ ENTRY(ret_from_fork) ...@@ -522,8 +518,10 @@ ENTRY(ret_from_fork)
*/ */
jnz int_ret_from_sys_call jnz int_ret_from_sys_call
/* We came from kernel_thread */ /*
/* nb: we depend on RESTORE_EXTRA_REGS above */ * We came from kernel_thread
* nb: we depend on RESTORE_EXTRA_REGS above
*/
movq %rbp, %rdi movq %rbp, %rdi
call *%rbx call *%rbx
movl $0, RAX(%rsp) movl $0, RAX(%rsp)
...@@ -569,7 +567,7 @@ END(irq_entries_start) ...@@ -569,7 +567,7 @@ END(irq_entries_start)
/* this goes to 0(%rsp) for unwinder, not for saving the value: */ /* this goes to 0(%rsp) for unwinder, not for saving the value: */
SAVE_EXTRA_REGS_RBP -RBP SAVE_EXTRA_REGS_RBP -RBP
leaq -RBP(%rsp),%rdi /* arg1 for \func (pointer to pt_regs) */ leaq -RBP(%rsp), %rdi /* arg1 for \func (pointer to pt_regs) */
testb $3, CS-RBP(%rsp) testb $3, CS-RBP(%rsp)
jz 1f jz 1f
...@@ -584,7 +582,7 @@ END(irq_entries_start) ...@@ -584,7 +582,7 @@ END(irq_entries_start)
*/ */
movq %rsp, %rsi movq %rsp, %rsi
incl PER_CPU_VAR(irq_count) incl PER_CPU_VAR(irq_count)
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
pushq %rsi pushq %rsi
/* We entered an interrupt context - irqs are off: */ /* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -599,7 +597,7 @@ END(irq_entries_start) ...@@ -599,7 +597,7 @@ END(irq_entries_start)
.p2align CONFIG_X86_L1_CACHE_SHIFT .p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt: common_interrupt:
ASM_CLAC ASM_CLAC
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
interrupt do_IRQ interrupt do_IRQ
/* 0(%rsp): old RSP */ /* 0(%rsp): old RSP */
ret_from_intr: ret_from_intr:
...@@ -610,22 +608,21 @@ ret_from_intr: ...@@ -610,22 +608,21 @@ ret_from_intr:
/* Restore saved previous stack */ /* Restore saved previous stack */
popq %rsi popq %rsi
/* return code expects complete pt_regs - adjust rsp accordingly: */ /* return code expects complete pt_regs - adjust rsp accordingly: */
leaq -RBP(%rsi),%rsp leaq -RBP(%rsi), %rsp
testb $3, CS(%rsp) testb $3, CS(%rsp)
jz retint_kernel jz retint_kernel
/* Interrupt came from user space */ /* Interrupt came from user space */
retint_user: retint_user:
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
/*
* %rcx: thread info. Interrupts off. /* %rcx: thread info. Interrupts are off. */
*/
retint_with_reschedule: retint_with_reschedule:
movl $_TIF_WORK_MASK,%edi movl $_TIF_WORK_MASK, %edi
retint_check: retint_check:
LOCKDEP_SYS_EXIT_IRQ LOCKDEP_SYS_EXIT_IRQ
movl TI_flags(%rcx),%edx movl TI_flags(%rcx), %edx
andl %edi,%edx andl %edi, %edx
jnz retint_careful jnz retint_careful
retint_swapgs: /* return to user-space */ retint_swapgs: /* return to user-space */
...@@ -643,9 +640,9 @@ retint_kernel: ...@@ -643,9 +640,9 @@ retint_kernel:
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
/* Interrupts are off */ /* Interrupts are off */
/* Check if we need preemption */ /* Check if we need preemption */
bt $9,EFLAGS(%rsp) /* interrupts were off? */ bt $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f jnc 1f
0: cmpl $0,PER_CPU_VAR(__preempt_count) 0: cmpl $0, PER_CPU_VAR(__preempt_count)
jnz 1f jnz 1f
call preempt_schedule_irq call preempt_schedule_irq
jmp 0b jmp 0b
...@@ -671,7 +668,7 @@ ENTRY(native_iret) ...@@ -671,7 +668,7 @@ ENTRY(native_iret)
* 64-bit mode SS:RSP on the exception stack is always valid. * 64-bit mode SS:RSP on the exception stack is always valid.
*/ */
#ifdef CONFIG_X86_ESPFIX64 #ifdef CONFIG_X86_ESPFIX64
testb $4,(SS-RIP)(%rsp) testb $4, (SS-RIP)(%rsp)
jnz native_irq_return_ldt jnz native_irq_return_ldt
#endif #endif
...@@ -690,30 +687,30 @@ native_irq_return_ldt: ...@@ -690,30 +687,30 @@ native_irq_return_ldt:
pushq %rax pushq %rax
pushq %rdi pushq %rdi
SWAPGS SWAPGS
movq PER_CPU_VAR(espfix_waddr),%rdi movq PER_CPU_VAR(espfix_waddr), %rdi
movq %rax,(0*8)(%rdi) /* RAX */ movq %rax, (0*8)(%rdi) /* RAX */
movq (2*8)(%rsp),%rax /* RIP */ movq (2*8)(%rsp), %rax /* RIP */
movq %rax,(1*8)(%rdi) movq %rax, (1*8)(%rdi)
movq (3*8)(%rsp),%rax /* CS */ movq (3*8)(%rsp), %rax /* CS */
movq %rax,(2*8)(%rdi) movq %rax, (2*8)(%rdi)
movq (4*8)(%rsp),%rax /* RFLAGS */ movq (4*8)(%rsp), %rax /* RFLAGS */
movq %rax,(3*8)(%rdi) movq %rax, (3*8)(%rdi)
movq (6*8)(%rsp),%rax /* SS */ movq (6*8)(%rsp), %rax /* SS */
movq %rax,(5*8)(%rdi) movq %rax, (5*8)(%rdi)
movq (5*8)(%rsp),%rax /* RSP */ movq (5*8)(%rsp), %rax /* RSP */
movq %rax,(4*8)(%rdi) movq %rax, (4*8)(%rdi)
andl $0xffff0000,%eax andl $0xffff0000, %eax
popq %rdi popq %rdi
orq PER_CPU_VAR(espfix_stack),%rax orq PER_CPU_VAR(espfix_stack), %rax
SWAPGS SWAPGS
movq %rax,%rsp movq %rax, %rsp
popq %rax popq %rax
jmp native_irq_return_iret jmp native_irq_return_iret
#endif #endif
/* edi: workmask, edx: work */ /* edi: workmask, edx: work */
retint_careful: retint_careful:
bt $TIF_NEED_RESCHED,%edx bt $TIF_NEED_RESCHED, %edx
jnc retint_signal jnc retint_signal
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
...@@ -726,14 +723,14 @@ retint_careful: ...@@ -726,14 +723,14 @@ retint_careful:
jmp retint_check jmp retint_check
retint_signal: retint_signal:
testl $_TIF_DO_NOTIFY_MASK,%edx testl $_TIF_DO_NOTIFY_MASK, %edx
jz retint_swapgs jz retint_swapgs
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
movq $-1,ORIG_RAX(%rsp) movq $-1, ORIG_RAX(%rsp)
xorl %esi,%esi # oldset xorl %esi, %esi /* oldset */
movq %rsp,%rdi # &pt_regs movq %rsp, %rdi /* &pt_regs */
call do_notify_resume call do_notify_resume
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
...@@ -774,60 +771,45 @@ trace_apicinterrupt \num \sym ...@@ -774,60 +771,45 @@ trace_apicinterrupt \num \sym
.endm .endm
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR \ apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt
apicinterrupt3 REBOOT_VECTOR \
reboot_interrupt smp_reboot_interrupt
#endif #endif
#ifdef CONFIG_X86_UV #ifdef CONFIG_X86_UV
apicinterrupt3 UV_BAU_MESSAGE \ apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt
uv_bau_message_intr1 uv_bau_message_interrupt
#endif #endif
apicinterrupt LOCAL_TIMER_VECTOR \
apic_timer_interrupt smp_apic_timer_interrupt apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt
apicinterrupt X86_PLATFORM_IPI_VECTOR \ apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi
x86_platform_ipi smp_x86_platform_ipi
#ifdef CONFIG_HAVE_KVM #ifdef CONFIG_HAVE_KVM
apicinterrupt3 POSTED_INTR_VECTOR \ apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
kvm_posted_intr_ipi smp_kvm_posted_intr_ipi apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR \
kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
#endif #endif
#ifdef CONFIG_X86_MCE_THRESHOLD #ifdef CONFIG_X86_MCE_THRESHOLD
apicinterrupt THRESHOLD_APIC_VECTOR \ apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt
threshold_interrupt smp_threshold_interrupt
#endif #endif
#ifdef CONFIG_X86_MCE_AMD #ifdef CONFIG_X86_MCE_AMD
apicinterrupt DEFERRED_ERROR_VECTOR \ apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt
deferred_error_interrupt smp_deferred_error_interrupt
#endif #endif
#ifdef CONFIG_X86_THERMAL_VECTOR #ifdef CONFIG_X86_THERMAL_VECTOR
apicinterrupt THERMAL_APIC_VECTOR \ apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt
thermal_interrupt smp_thermal_interrupt
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt
call_function_single_interrupt smp_call_function_single_interrupt apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt
apicinterrupt CALL_FUNCTION_VECTOR \ apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt
call_function_interrupt smp_call_function_interrupt
apicinterrupt RESCHEDULE_VECTOR \
reschedule_interrupt smp_reschedule_interrupt
#endif #endif
apicinterrupt ERROR_APIC_VECTOR \ apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt
error_interrupt smp_error_interrupt apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt
apicinterrupt SPURIOUS_APIC_VECTOR \
spurious_interrupt smp_spurious_interrupt
#ifdef CONFIG_IRQ_WORK #ifdef CONFIG_IRQ_WORK
apicinterrupt IRQ_WORK_VECTOR \ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
irq_work_interrupt smp_irq_work_interrupt
#endif #endif
/* /*
...@@ -853,8 +835,8 @@ ENTRY(\sym) ...@@ -853,8 +835,8 @@ ENTRY(\sym)
.if \paranoid .if \paranoid
.if \paranoid == 1 .if \paranoid == 1
testb $3, CS(%rsp) /* If coming from userspace, switch */ testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
jnz 1f /* stacks. */ jnz 1f
.endif .endif
call paranoid_entry call paranoid_entry
.else .else
...@@ -870,13 +852,13 @@ ENTRY(\sym) ...@@ -870,13 +852,13 @@ ENTRY(\sym)
.endif .endif
.endif .endif
movq %rsp,%rdi /* pt_regs pointer */ movq %rsp, %rdi /* pt_regs pointer */
.if \has_error_code .if \has_error_code
movq ORIG_RAX(%rsp),%rsi /* get error code */ movq ORIG_RAX(%rsp), %rsi /* get error code */
movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
.else .else
xorl %esi,%esi /* no error code */ xorl %esi, %esi /* no error code */
.endif .endif
.if \shift_ist != -1 .if \shift_ist != -1
...@@ -906,17 +888,17 @@ ENTRY(\sym) ...@@ -906,17 +888,17 @@ ENTRY(\sym)
call error_entry call error_entry
movq %rsp,%rdi /* pt_regs pointer */ movq %rsp, %rdi /* pt_regs pointer */
call sync_regs call sync_regs
movq %rax,%rsp /* switch stack */ movq %rax, %rsp /* switch stack */
movq %rsp,%rdi /* pt_regs pointer */ movq %rsp, %rdi /* pt_regs pointer */
.if \has_error_code .if \has_error_code
movq ORIG_RAX(%rsp),%rsi /* get error code */ movq ORIG_RAX(%rsp), %rsi /* get error code */
movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
.else .else
xorl %esi,%esi /* no error code */ xorl %esi, %esi /* no error code */
.endif .endif
call \do_sym call \do_sym
...@@ -952,37 +934,39 @@ idtentry alignment_check do_alignment_check has_error_code=1 ...@@ -952,37 +934,39 @@ idtentry alignment_check do_alignment_check has_error_code=1
idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
/* Reload gs selector with exception handling */ /*
/* edi: new selector */ * Reload gs selector with exception handling
* edi: new selector
*/
ENTRY(native_load_gs_index) ENTRY(native_load_gs_index)
pushfq pushfq
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
SWAPGS SWAPGS
gs_change: gs_change:
movl %edi,%gs movl %edi, %gs
2: mfence /* workaround */ 2: mfence /* workaround */
SWAPGS SWAPGS
popfq popfq
ret ret
END(native_load_gs_index) END(native_load_gs_index)
_ASM_EXTABLE(gs_change,bad_gs) _ASM_EXTABLE(gs_change, bad_gs)
.section .fixup,"ax" .section .fixup, "ax"
/* running with kernelgs */ /* running with kernelgs */
bad_gs: bad_gs:
SWAPGS /* switch back to user gs */ SWAPGS /* switch back to user gs */
xorl %eax,%eax xorl %eax, %eax
movl %eax,%gs movl %eax, %gs
jmp 2b jmp 2b
.previous .previous
/* Call softirq on interrupt stack. Interrupts are off. */ /* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack) ENTRY(do_softirq_own_stack)
pushq %rbp pushq %rbp
mov %rsp,%rbp mov %rsp, %rbp
incl PER_CPU_VAR(irq_count) incl PER_CPU_VAR(irq_count)
cmove PER_CPU_VAR(irq_stack_ptr),%rsp cmove PER_CPU_VAR(irq_stack_ptr), %rsp
push %rbp # backlink for old unwinder push %rbp /* frame pointer backlink */
call __do_softirq call __do_softirq
leaveq leaveq
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
...@@ -1005,16 +989,17 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 ...@@ -1005,16 +989,17 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
* existing activation in its critical region -- if so, we pop the current * existing activation in its critical region -- if so, we pop the current
* activation and restart the handler using the previous one. * activation and restart the handler using the previous one.
*/ */
ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
/* /*
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
* see the correct pointer to the pt_regs * see the correct pointer to the pt_regs
*/ */
movq %rdi, %rsp # we don't return, adjust the stack frame movq %rdi, %rsp /* we don't return, adjust the stack frame */
11: incl PER_CPU_VAR(irq_count) 11: incl PER_CPU_VAR(irq_count)
movq %rsp,%rbp movq %rsp, %rbp
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
pushq %rbp # backlink for old unwinder pushq %rbp /* frame pointer backlink */
call xen_evtchn_do_upcall call xen_evtchn_do_upcall
popq %rsp popq %rsp
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
...@@ -1038,30 +1023,30 @@ END(xen_do_hypervisor_callback) ...@@ -1038,30 +1023,30 @@ END(xen_do_hypervisor_callback)
* with its current contents: any discrepancy means we in category 1. * with its current contents: any discrepancy means we in category 1.
*/ */
ENTRY(xen_failsafe_callback) ENTRY(xen_failsafe_callback)
movl %ds,%ecx movl %ds, %ecx
cmpw %cx,0x10(%rsp) cmpw %cx, 0x10(%rsp)
jne 1f jne 1f
movl %es,%ecx movl %es, %ecx
cmpw %cx,0x18(%rsp) cmpw %cx, 0x18(%rsp)
jne 1f jne 1f
movl %fs,%ecx movl %fs, %ecx
cmpw %cx,0x20(%rsp) cmpw %cx, 0x20(%rsp)
jne 1f jne 1f
movl %gs,%ecx movl %gs, %ecx
cmpw %cx,0x28(%rsp) cmpw %cx, 0x28(%rsp)
jne 1f jne 1f
/* All segments match their saved values => Category 2 (Bad IRET). */ /* All segments match their saved values => Category 2 (Bad IRET). */
movq (%rsp),%rcx movq (%rsp), %rcx
movq 8(%rsp),%r11 movq 8(%rsp), %r11
addq $0x30,%rsp addq $0x30, %rsp
pushq $0 /* RIP */ pushq $0 /* RIP */
pushq %r11 pushq %r11
pushq %rcx pushq %rcx
jmp general_protection jmp general_protection
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
movq (%rsp),%rcx movq (%rsp), %rcx
movq 8(%rsp),%r11 movq 8(%rsp), %r11
addq $0x30,%rsp addq $0x30, %rsp
pushq $-1 /* orig_ax = -1 => not a system call */ pushq $-1 /* orig_ax = -1 => not a system call */
ALLOC_PT_GPREGS_ON_STACK ALLOC_PT_GPREGS_ON_STACK
SAVE_C_REGS SAVE_C_REGS
...@@ -1082,16 +1067,20 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ ...@@ -1082,16 +1067,20 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry stack_segment do_stack_segment has_error_code=1 idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
idtentry xen_debug do_debug has_error_code=0 idtentry xen_debug do_debug has_error_code=0
idtentry xen_int3 do_int3 has_error_code=0 idtentry xen_int3 do_int3 has_error_code=0
idtentry xen_stack_segment do_stack_segment has_error_code=1 idtentry xen_stack_segment do_stack_segment has_error_code=1
#endif #endif
idtentry general_protection do_general_protection has_error_code=1 idtentry general_protection do_general_protection has_error_code=1
trace_idtentry page_fault do_page_fault has_error_code=1 trace_idtentry page_fault do_page_fault has_error_code=1
#ifdef CONFIG_KVM_GUEST #ifdef CONFIG_KVM_GUEST
idtentry async_page_fault do_async_page_fault has_error_code=1 idtentry async_page_fault do_async_page_fault has_error_code=1
#endif #endif
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
#endif #endif
...@@ -1105,13 +1094,13 @@ ENTRY(paranoid_entry) ...@@ -1105,13 +1094,13 @@ ENTRY(paranoid_entry)
cld cld
SAVE_C_REGS 8 SAVE_C_REGS 8
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
movl $1,%ebx movl $1, %ebx
movl $MSR_GS_BASE,%ecx movl $MSR_GS_BASE, %ecx
rdmsr rdmsr
testl %edx,%edx testl %edx, %edx
js 1f /* negative -> in kernel */ js 1f /* negative -> in kernel */
SWAPGS SWAPGS
xorl %ebx,%ebx xorl %ebx, %ebx
1: ret 1: ret
END(paranoid_entry) END(paranoid_entry)
...@@ -1124,12 +1113,13 @@ END(paranoid_entry) ...@@ -1124,12 +1113,13 @@ END(paranoid_entry)
* in syscall entry), so checking for preemption here would * in syscall entry), so checking for preemption here would
* be complicated. Fortunately, we there's no good reason * be complicated. Fortunately, we there's no good reason
* to try to handle preemption here. * to try to handle preemption here.
*
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
*/ */
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
ENTRY(paranoid_exit) ENTRY(paranoid_exit)
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF_DEBUG
testl %ebx,%ebx /* swapgs needed? */ testl %ebx, %ebx /* swapgs needed? */
jnz paranoid_exit_no_swapgs jnz paranoid_exit_no_swapgs
TRACE_IRQS_IRETQ TRACE_IRQS_IRETQ
SWAPGS_UNSAFE_STACK SWAPGS_UNSAFE_STACK
...@@ -1151,7 +1141,7 @@ ENTRY(error_entry) ...@@ -1151,7 +1141,7 @@ ENTRY(error_entry)
cld cld
SAVE_C_REGS 8 SAVE_C_REGS 8
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
xorl %ebx,%ebx xorl %ebx, %ebx
testb $3, CS+8(%rsp) testb $3, CS+8(%rsp)
jz error_kernelspace jz error_kernelspace
error_swapgs: error_swapgs:
...@@ -1168,26 +1158,26 @@ error_sti: ...@@ -1168,26 +1158,26 @@ error_sti:
*/ */
error_kernelspace: error_kernelspace:
incl %ebx incl %ebx
leaq native_irq_return_iret(%rip),%rcx leaq native_irq_return_iret(%rip), %rcx
cmpq %rcx,RIP+8(%rsp) cmpq %rcx, RIP+8(%rsp)
je error_bad_iret je error_bad_iret
movl %ecx,%eax /* zero extend */ movl %ecx, %eax /* zero extend */
cmpq %rax,RIP+8(%rsp) cmpq %rax, RIP+8(%rsp)
je bstep_iret je bstep_iret
cmpq $gs_change,RIP+8(%rsp) cmpq $gs_change, RIP+8(%rsp)
je error_swapgs je error_swapgs
jmp error_sti jmp error_sti
bstep_iret: bstep_iret:
/* Fix truncated RIP */ /* Fix truncated RIP */
movq %rcx,RIP+8(%rsp) movq %rcx, RIP+8(%rsp)
/* fall through */ /* fall through */
error_bad_iret: error_bad_iret:
SWAPGS SWAPGS
mov %rsp,%rdi mov %rsp, %rdi
call fixup_bad_iret call fixup_bad_iret
mov %rax,%rsp mov %rax, %rsp
decl %ebx /* Return to usergs */ decl %ebx /* Return to usergs */
jmp error_sti jmp error_sti
END(error_entry) END(error_entry)
...@@ -1195,11 +1185,11 @@ END(error_entry) ...@@ -1195,11 +1185,11 @@ END(error_entry)
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
ENTRY(error_exit) ENTRY(error_exit)
movl %ebx,%eax movl %ebx, %eax
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
testl %eax,%eax testl %eax, %eax
jnz retint_kernel jnz retint_kernel
jmp retint_user jmp retint_user
END(error_exit) END(error_exit)
...@@ -1268,6 +1258,7 @@ ENTRY(nmi) ...@@ -1268,6 +1258,7 @@ ENTRY(nmi)
cmpq %rdx, 4*8(%rsp) cmpq %rdx, 4*8(%rsp)
/* If the stack pointer is above the NMI stack, this is a normal NMI */ /* If the stack pointer is above the NMI stack, this is a normal NMI */
ja first_nmi ja first_nmi
subq $EXCEPTION_STKSZ, %rdx subq $EXCEPTION_STKSZ, %rdx
cmpq %rdx, 4*8(%rsp) cmpq %rdx, 4*8(%rsp)
/* If it is below the NMI stack, it is a normal NMI */ /* If it is below the NMI stack, it is a normal NMI */
...@@ -1349,9 +1340,7 @@ first_nmi: ...@@ -1349,9 +1340,7 @@ first_nmi:
/* Set the NMI executing variable on the stack. */ /* Set the NMI executing variable on the stack. */
pushq $1 pushq $1
/* /* Leave room for the "copied" frame */
* Leave room for the "copied" frame
*/
subq $(5*8), %rsp subq $(5*8), %rsp
/* Copy the stack frame to the Saved frame */ /* Copy the stack frame to the Saved frame */
...@@ -1415,8 +1404,8 @@ end_repeat_nmi: ...@@ -1415,8 +1404,8 @@ end_repeat_nmi:
movq %cr2, %r12 movq %cr2, %r12
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi movq %rsp, %rdi
movq $-1,%rsi movq $-1, %rsi
call do_nmi call do_nmi
/* Did the NMI take a page fault? Restore cr2 if it did */ /* Did the NMI take a page fault? Restore cr2 if it did */
...@@ -1425,7 +1414,7 @@ end_repeat_nmi: ...@@ -1425,7 +1414,7 @@ end_repeat_nmi:
je 1f je 1f
movq %r12, %cr2 movq %r12, %cr2
1: 1:
testl %ebx,%ebx /* swapgs needed? */ testl %ebx, %ebx /* swapgs needed? */
jnz nmi_restore jnz nmi_restore
nmi_swapgs: nmi_swapgs:
SWAPGS_UNSAFE_STACK SWAPGS_UNSAFE_STACK
...@@ -1441,7 +1430,6 @@ nmi_restore: ...@@ -1441,7 +1430,6 @@ nmi_restore:
END(nmi) END(nmi)
ENTRY(ignore_sysret) ENTRY(ignore_sysret)
mov $-ENOSYS,%eax mov $-ENOSYS, %eax
sysret sysret
END(ignore_sysret) END(ignore_sysret)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册