提交 8c1f7558 编写于 作者: J Josh Poimboeuf 提交者: Ingo Molnar

x86/entry/64: Add unwind hint annotations

Add unwind hint annotations to entry_64.S.  This will enable the ORC
unwinder to unwind through any location in the entry code including
syscalls, interrupts, and exceptions.
Signed-off-by: NJosh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: live-patching@vger.kernel.org
Link: http://lkml.kernel.org/r/b9f6d478aadf68ba57c739dcfac34ec0dc021c4c.1499786555.git.jpoimboe@redhat.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 39358a03
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
# Makefile for the x86 low level entry code # Makefile for the x86 low level entry code
# #
OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
......
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <asm/unwind_hints.h>
/* /*
...@@ -112,6 +113,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -112,6 +113,7 @@ For 32-bit we have the following conventions - kernel is built with
movq %rdx, 12*8+\offset(%rsp) movq %rdx, 12*8+\offset(%rsp)
movq %rsi, 13*8+\offset(%rsp) movq %rsi, 13*8+\offset(%rsp)
movq %rdi, 14*8+\offset(%rsp) movq %rdi, 14*8+\offset(%rsp)
UNWIND_HINT_REGS offset=\offset extra=0
.endm .endm
.macro SAVE_C_REGS offset=0 .macro SAVE_C_REGS offset=0
SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
...@@ -136,6 +138,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -136,6 +138,7 @@ For 32-bit we have the following conventions - kernel is built with
movq %r12, 3*8+\offset(%rsp) movq %r12, 3*8+\offset(%rsp)
movq %rbp, 4*8+\offset(%rsp) movq %rbp, 4*8+\offset(%rsp)
movq %rbx, 5*8+\offset(%rsp) movq %rbx, 5*8+\offset(%rsp)
UNWIND_HINT_REGS offset=\offset
.endm .endm
.macro RESTORE_EXTRA_REGS offset=0 .macro RESTORE_EXTRA_REGS offset=0
...@@ -145,6 +148,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -145,6 +148,7 @@ For 32-bit we have the following conventions - kernel is built with
movq 3*8+\offset(%rsp), %r12 movq 3*8+\offset(%rsp), %r12
movq 4*8+\offset(%rsp), %rbp movq 4*8+\offset(%rsp), %rbp
movq 5*8+\offset(%rsp), %rbx movq 5*8+\offset(%rsp), %rbx
UNWIND_HINT_REGS offset=\offset extra=0
.endm .endm
.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
...@@ -167,6 +171,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -167,6 +171,7 @@ For 32-bit we have the following conventions - kernel is built with
.endif .endif
movq 13*8(%rsp), %rsi movq 13*8(%rsp), %rsi
movq 14*8(%rsp), %rdi movq 14*8(%rsp), %rdi
UNWIND_HINT_IRET_REGS offset=16*8
.endm .endm
.macro RESTORE_C_REGS .macro RESTORE_C_REGS
RESTORE_C_REGS_HELPER 1,1,1,1,1 RESTORE_C_REGS_HELPER 1,1,1,1,1
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/frame.h>
#include <linux/err.h> #include <linux/err.h>
.code64 .code64
...@@ -43,9 +44,10 @@ ...@@ -43,9 +44,10 @@
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
ENTRY(native_usergs_sysret64) ENTRY(native_usergs_sysret64)
UNWIND_HINT_EMPTY
swapgs swapgs
sysretq sysretq
ENDPROC(native_usergs_sysret64) END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
.macro TRACE_IRQS_IRETQ .macro TRACE_IRQS_IRETQ
...@@ -134,6 +136,7 @@ ENDPROC(native_usergs_sysret64) ...@@ -134,6 +136,7 @@ ENDPROC(native_usergs_sysret64)
*/ */
ENTRY(entry_SYSCALL_64) ENTRY(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -169,6 +172,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs) ...@@ -169,6 +172,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
pushq %r10 /* pt_regs->r10 */ pushq %r10 /* pt_regs->r10 */
pushq %r11 /* pt_regs->r11 */ pushq %r11 /* pt_regs->r11 */
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
UNWIND_HINT_REGS extra=0
/* /*
* If we need to do entry work or if we guess we'll need to do * If we need to do entry work or if we guess we'll need to do
...@@ -223,6 +227,7 @@ entry_SYSCALL_64_fastpath: ...@@ -223,6 +227,7 @@ entry_SYSCALL_64_fastpath:
movq EFLAGS(%rsp), %r11 movq EFLAGS(%rsp), %r11
RESTORE_C_REGS_EXCEPT_RCX_R11 RESTORE_C_REGS_EXCEPT_RCX_R11
movq RSP(%rsp), %rsp movq RSP(%rsp), %rsp
UNWIND_HINT_EMPTY
USERGS_SYSRET64 USERGS_SYSRET64
1: 1:
...@@ -316,6 +321,7 @@ syscall_return_via_sysret: ...@@ -316,6 +321,7 @@ syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */ /* rcx and r11 are already restored (see code above) */
RESTORE_C_REGS_EXCEPT_RCX_R11 RESTORE_C_REGS_EXCEPT_RCX_R11
movq RSP(%rsp), %rsp movq RSP(%rsp), %rsp
UNWIND_HINT_EMPTY
USERGS_SYSRET64 USERGS_SYSRET64
opportunistic_sysret_failed: opportunistic_sysret_failed:
...@@ -343,6 +349,7 @@ ENTRY(stub_ptregs_64) ...@@ -343,6 +349,7 @@ ENTRY(stub_ptregs_64)
DISABLE_INTERRUPTS(CLBR_ANY) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
popq %rax popq %rax
UNWIND_HINT_REGS extra=0
jmp entry_SYSCALL64_slow_path jmp entry_SYSCALL64_slow_path
1: 1:
...@@ -351,6 +358,7 @@ END(stub_ptregs_64) ...@@ -351,6 +358,7 @@ END(stub_ptregs_64)
.macro ptregs_stub func .macro ptregs_stub func
ENTRY(ptregs_\func) ENTRY(ptregs_\func)
UNWIND_HINT_FUNC
leaq \func(%rip), %rax leaq \func(%rip), %rax
jmp stub_ptregs_64 jmp stub_ptregs_64
END(ptregs_\func) END(ptregs_\func)
...@@ -367,6 +375,7 @@ END(ptregs_\func) ...@@ -367,6 +375,7 @@ END(ptregs_\func)
* %rsi: next task * %rsi: next task
*/ */
ENTRY(__switch_to_asm) ENTRY(__switch_to_asm)
UNWIND_HINT_FUNC
/* /*
* Save callee-saved registers * Save callee-saved registers
* This must match the order in inactive_task_frame * This must match the order in inactive_task_frame
...@@ -406,6 +415,7 @@ END(__switch_to_asm) ...@@ -406,6 +415,7 @@ END(__switch_to_asm)
* r12: kernel thread arg * r12: kernel thread arg
*/ */
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
UNWIND_HINT_EMPTY
movq %rax, %rdi movq %rax, %rdi
call schedule_tail /* rdi: 'prev' task parameter */ call schedule_tail /* rdi: 'prev' task parameter */
...@@ -413,6 +423,7 @@ ENTRY(ret_from_fork) ...@@ -413,6 +423,7 @@ ENTRY(ret_from_fork)
jnz 1f /* kernel threads are uncommon */ jnz 1f /* kernel threads are uncommon */
2: 2:
UNWIND_HINT_REGS
movq %rsp, %rdi movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */ call syscall_return_slowpath /* returns with IRQs disabled */
TRACE_IRQS_ON /* user mode is traced as IRQS on */ TRACE_IRQS_ON /* user mode is traced as IRQS on */
...@@ -440,10 +451,11 @@ END(ret_from_fork) ...@@ -440,10 +451,11 @@ END(ret_from_fork)
ENTRY(irq_entries_start) ENTRY(irq_entries_start)
vector=FIRST_EXTERNAL_VECTOR vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
UNWIND_HINT_IRET_REGS
pushq $(~vector+0x80) /* Note: always in signed byte range */ pushq $(~vector+0x80) /* Note: always in signed byte range */
vector=vector+1
jmp common_interrupt jmp common_interrupt
.align 8 .align 8
vector=vector+1
.endr .endr
END(irq_entries_start) END(irq_entries_start)
...@@ -465,9 +477,14 @@ END(irq_entries_start) ...@@ -465,9 +477,14 @@ END(irq_entries_start)
* *
* The invariant is that, if irq_count != -1, then the IRQ stack is in use. * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
*/ */
.macro ENTER_IRQ_STACK old_rsp .macro ENTER_IRQ_STACK regs=1 old_rsp
DEBUG_ENTRY_ASSERT_IRQS_OFF DEBUG_ENTRY_ASSERT_IRQS_OFF
movq %rsp, \old_rsp movq %rsp, \old_rsp
.if \regs
UNWIND_HINT_REGS base=\old_rsp
.endif
incl PER_CPU_VAR(irq_count) incl PER_CPU_VAR(irq_count)
jnz .Lirq_stack_push_old_rsp_\@ jnz .Lirq_stack_push_old_rsp_\@
...@@ -504,16 +521,24 @@ END(irq_entries_start) ...@@ -504,16 +521,24 @@ END(irq_entries_start)
.Lirq_stack_push_old_rsp_\@: .Lirq_stack_push_old_rsp_\@:
pushq \old_rsp pushq \old_rsp
.if \regs
UNWIND_HINT_REGS indirect=1
.endif
.endm .endm
/* /*
* Undoes ENTER_IRQ_STACK. * Undoes ENTER_IRQ_STACK.
*/ */
.macro LEAVE_IRQ_STACK .macro LEAVE_IRQ_STACK regs=1
DEBUG_ENTRY_ASSERT_IRQS_OFF DEBUG_ENTRY_ASSERT_IRQS_OFF
/* We need to be off the IRQ stack before decrementing irq_count. */ /* We need to be off the IRQ stack before decrementing irq_count. */
popq %rsp popq %rsp
.if \regs
UNWIND_HINT_REGS
.endif
/* /*
* As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
* the irq stack but we're not on it. * the irq stack but we're not on it.
...@@ -624,6 +649,7 @@ restore_c_regs_and_iret: ...@@ -624,6 +649,7 @@ restore_c_regs_and_iret:
INTERRUPT_RETURN INTERRUPT_RETURN
ENTRY(native_iret) ENTRY(native_iret)
UNWIND_HINT_IRET_REGS
/* /*
* Are we returning to a stack segment from the LDT? Note: in * Are we returning to a stack segment from the LDT? Note: in
* 64-bit mode SS:RSP on the exception stack is always valid. * 64-bit mode SS:RSP on the exception stack is always valid.
...@@ -696,6 +722,7 @@ native_irq_return_ldt: ...@@ -696,6 +722,7 @@ native_irq_return_ldt:
orq PER_CPU_VAR(espfix_stack), %rax orq PER_CPU_VAR(espfix_stack), %rax
SWAPGS SWAPGS
movq %rax, %rsp movq %rax, %rsp
UNWIND_HINT_IRET_REGS offset=8
/* /*
* At this point, we cannot write to the stack any more, but we can * At this point, we cannot write to the stack any more, but we can
...@@ -717,6 +744,7 @@ END(common_interrupt) ...@@ -717,6 +744,7 @@ END(common_interrupt)
*/ */
.macro apicinterrupt3 num sym do_sym .macro apicinterrupt3 num sym do_sym
ENTRY(\sym) ENTRY(\sym)
UNWIND_HINT_IRET_REGS
ASM_CLAC ASM_CLAC
pushq $~(\num) pushq $~(\num)
.Lcommon_\sym: .Lcommon_\sym:
...@@ -802,6 +830,8 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt ...@@ -802,6 +830,8 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
ENTRY(\sym) ENTRY(\sym)
UNWIND_HINT_IRET_REGS offset=8
/* Sanity check */ /* Sanity check */
.if \shift_ist != -1 && \paranoid == 0 .if \shift_ist != -1 && \paranoid == 0
.error "using shift_ist requires paranoid=1" .error "using shift_ist requires paranoid=1"
...@@ -825,6 +855,7 @@ ENTRY(\sym) ...@@ -825,6 +855,7 @@ ENTRY(\sym)
.else .else
call error_entry call error_entry
.endif .endif
UNWIND_HINT_REGS
/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
.if \paranoid .if \paranoid
...@@ -922,6 +953,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 ...@@ -922,6 +953,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
* edi: new selector * edi: new selector
*/ */
ENTRY(native_load_gs_index) ENTRY(native_load_gs_index)
FRAME_BEGIN
pushfq pushfq
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
SWAPGS SWAPGS
...@@ -930,8 +962,9 @@ ENTRY(native_load_gs_index) ...@@ -930,8 +962,9 @@ ENTRY(native_load_gs_index)
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
SWAPGS SWAPGS
popfq popfq
FRAME_END
ret ret
END(native_load_gs_index) ENDPROC(native_load_gs_index)
EXPORT_SYMBOL(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index)
_ASM_EXTABLE(.Lgs_change, bad_gs) _ASM_EXTABLE(.Lgs_change, bad_gs)
...@@ -954,12 +987,12 @@ bad_gs: ...@@ -954,12 +987,12 @@ bad_gs:
ENTRY(do_softirq_own_stack) ENTRY(do_softirq_own_stack)
pushq %rbp pushq %rbp
mov %rsp, %rbp mov %rsp, %rbp
ENTER_IRQ_STACK old_rsp=%r11 ENTER_IRQ_STACK regs=0 old_rsp=%r11
call __do_softirq call __do_softirq
LEAVE_IRQ_STACK LEAVE_IRQ_STACK regs=0
leaveq leaveq
ret ret
END(do_softirq_own_stack) ENDPROC(do_softirq_own_stack)
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
...@@ -983,7 +1016,9 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ ...@@ -983,7 +1016,9 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
* see the correct pointer to the pt_regs * see the correct pointer to the pt_regs
*/ */
UNWIND_HINT_FUNC
movq %rdi, %rsp /* we don't return, adjust the stack frame */ movq %rdi, %rsp /* we don't return, adjust the stack frame */
UNWIND_HINT_REGS
ENTER_IRQ_STACK old_rsp=%r10 ENTER_IRQ_STACK old_rsp=%r10
call xen_evtchn_do_upcall call xen_evtchn_do_upcall
...@@ -1009,6 +1044,7 @@ END(xen_do_hypervisor_callback) ...@@ -1009,6 +1044,7 @@ END(xen_do_hypervisor_callback)
* with its current contents: any discrepancy means we in category 1. * with its current contents: any discrepancy means we in category 1.
*/ */
ENTRY(xen_failsafe_callback) ENTRY(xen_failsafe_callback)
UNWIND_HINT_EMPTY
movl %ds, %ecx movl %ds, %ecx
cmpw %cx, 0x10(%rsp) cmpw %cx, 0x10(%rsp)
jne 1f jne 1f
...@@ -1028,11 +1064,13 @@ ENTRY(xen_failsafe_callback) ...@@ -1028,11 +1064,13 @@ ENTRY(xen_failsafe_callback)
pushq $0 /* RIP */ pushq $0 /* RIP */
pushq %r11 pushq %r11
pushq %rcx pushq %rcx
UNWIND_HINT_IRET_REGS offset=8
jmp general_protection jmp general_protection
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
movq (%rsp), %rcx movq (%rsp), %rcx
movq 8(%rsp), %r11 movq 8(%rsp), %r11
addq $0x30, %rsp addq $0x30, %rsp
UNWIND_HINT_IRET_REGS
pushq $-1 /* orig_ax = -1 => not a system call */ pushq $-1 /* orig_ax = -1 => not a system call */
ALLOC_PT_GPREGS_ON_STACK ALLOC_PT_GPREGS_ON_STACK
SAVE_C_REGS SAVE_C_REGS
...@@ -1078,6 +1116,7 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vec ...@@ -1078,6 +1116,7 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vec
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/ */
ENTRY(paranoid_entry) ENTRY(paranoid_entry)
UNWIND_HINT_FUNC
cld cld
SAVE_C_REGS 8 SAVE_C_REGS 8
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
...@@ -1105,6 +1144,7 @@ END(paranoid_entry) ...@@ -1105,6 +1144,7 @@ END(paranoid_entry)
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
*/ */
ENTRY(paranoid_exit) ENTRY(paranoid_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF_DEBUG
testl %ebx, %ebx /* swapgs needed? */ testl %ebx, %ebx /* swapgs needed? */
...@@ -1126,6 +1166,7 @@ END(paranoid_exit) ...@@ -1126,6 +1166,7 @@ END(paranoid_exit)
* Return: EBX=0: came from user mode; EBX=1: otherwise * Return: EBX=0: came from user mode; EBX=1: otherwise
*/ */
ENTRY(error_entry) ENTRY(error_entry)
UNWIND_HINT_FUNC
cld cld
SAVE_C_REGS 8 SAVE_C_REGS 8
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
...@@ -1210,6 +1251,7 @@ END(error_entry) ...@@ -1210,6 +1251,7 @@ END(error_entry)
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
*/ */
ENTRY(error_exit) ENTRY(error_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
testl %ebx, %ebx testl %ebx, %ebx
...@@ -1219,6 +1261,7 @@ END(error_exit) ...@@ -1219,6 +1261,7 @@ END(error_exit)
/* Runs on exception stack */ /* Runs on exception stack */
ENTRY(nmi) ENTRY(nmi)
UNWIND_HINT_IRET_REGS
/* /*
* Fix up the exception frame if we're on Xen. * Fix up the exception frame if we're on Xen.
* PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
...@@ -1290,11 +1333,13 @@ ENTRY(nmi) ...@@ -1290,11 +1333,13 @@ ENTRY(nmi)
cld cld
movq %rsp, %rdx movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
UNWIND_HINT_IRET_REGS base=%rdx offset=8
pushq 5*8(%rdx) /* pt_regs->ss */ pushq 5*8(%rdx) /* pt_regs->ss */
pushq 4*8(%rdx) /* pt_regs->rsp */ pushq 4*8(%rdx) /* pt_regs->rsp */
pushq 3*8(%rdx) /* pt_regs->flags */ pushq 3*8(%rdx) /* pt_regs->flags */
pushq 2*8(%rdx) /* pt_regs->cs */ pushq 2*8(%rdx) /* pt_regs->cs */
pushq 1*8(%rdx) /* pt_regs->rip */ pushq 1*8(%rdx) /* pt_regs->rip */
UNWIND_HINT_IRET_REGS
pushq $-1 /* pt_regs->orig_ax */ pushq $-1 /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */ pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
...@@ -1311,6 +1356,7 @@ ENTRY(nmi) ...@@ -1311,6 +1356,7 @@ ENTRY(nmi)
pushq %r13 /* pt_regs->r13 */ pushq %r13 /* pt_regs->r13 */
pushq %r14 /* pt_regs->r14 */ pushq %r14 /* pt_regs->r14 */
pushq %r15 /* pt_regs->r15 */ pushq %r15 /* pt_regs->r15 */
UNWIND_HINT_REGS
ENCODE_FRAME_POINTER ENCODE_FRAME_POINTER
/* /*
...@@ -1465,6 +1511,7 @@ first_nmi: ...@@ -1465,6 +1511,7 @@ first_nmi:
.rept 5 .rept 5
pushq 11*8(%rsp) pushq 11*8(%rsp)
.endr .endr
UNWIND_HINT_IRET_REGS
/* Everything up to here is safe from nested NMIs */ /* Everything up to here is safe from nested NMIs */
...@@ -1480,6 +1527,7 @@ first_nmi: ...@@ -1480,6 +1527,7 @@ first_nmi:
pushq $__KERNEL_CS /* CS */ pushq $__KERNEL_CS /* CS */
pushq $1f /* RIP */ pushq $1f /* RIP */
INTERRUPT_RETURN /* continues at repeat_nmi below */ INTERRUPT_RETURN /* continues at repeat_nmi below */
UNWIND_HINT_IRET_REGS
1: 1:
#endif #endif
...@@ -1529,6 +1577,7 @@ end_repeat_nmi: ...@@ -1529,6 +1577,7 @@ end_repeat_nmi:
* exceptions might do. * exceptions might do.
*/ */
call paranoid_entry call paranoid_entry
UNWIND_HINT_REGS
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp, %rdi movq %rsp, %rdi
...@@ -1566,17 +1615,19 @@ nmi_restore: ...@@ -1566,17 +1615,19 @@ nmi_restore:
END(nmi) END(nmi)
ENTRY(ignore_sysret) ENTRY(ignore_sysret)
UNWIND_HINT_EMPTY
mov $-ENOSYS, %eax mov $-ENOSYS, %eax
sysret sysret
END(ignore_sysret) END(ignore_sysret)
ENTRY(rewind_stack_do_exit) ENTRY(rewind_stack_do_exit)
UNWIND_HINT_FUNC
/* Prevent any naive code from trying to unwind to our caller. */ /* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp xorl %ebp, %ebp
movq PER_CPU_VAR(cpu_current_top_of_stack), %rax movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp leaq -PTREGS_SIZE(%rax), %rsp
UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
call do_exit call do_exit
1: jmp 1b
END(rewind_stack_do_exit) END(rewind_stack_do_exit)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册