提交 62610913 编写于 作者: J Jan Beulich 提交者: Steven Rostedt

x86-64: Fix CFI annotations for NMI nesting code

The saving and restoring of %rdx wasn't annotated at all, and the
jumping over sections where state gets partly restored wasn't handled
either.

Further, by folding the pushing of the previous frame in repeat_nmi
into that which so far was immediately preceding restart_nmi (after
moving the restore of %rdx ahead of that, since it doesn't get used
anymore when pushing prior frames), annotations of the replicated
frame creations can be made consistent too.

v2: Fully fold repeat_nmi into the normal code flow (adding a single
    redundant instruction to the "normal" code path), thus retaining
    the special protection of all instructions between repeat_nmi and
    end_repeat_nmi.

Link: http://lkml.kernel.org/r/4F478B630200007800074A31@nat28.tlf.novell.comSigned-off-by: NJan Beulich <jbeulich@suse.com>
Signed-off-by: NSteven Rostedt <rostedt@goodmis.org>
上级 a38449ef
...@@ -1530,6 +1530,7 @@ ENTRY(nmi) ...@@ -1530,6 +1530,7 @@ ENTRY(nmi)
/* Use %rdx as out temp variable throughout */ /* Use %rdx as out temp variable throughout */
pushq_cfi %rdx pushq_cfi %rdx
CFI_REL_OFFSET rdx, 0
/* /*
* If %cs was not the kernel segment, then the NMI triggered in user * If %cs was not the kernel segment, then the NMI triggered in user
...@@ -1554,6 +1555,7 @@ ENTRY(nmi) ...@@ -1554,6 +1555,7 @@ ENTRY(nmi)
*/ */
lea 6*8(%rsp), %rdx lea 6*8(%rsp), %rdx
test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
CFI_REMEMBER_STATE
nested_nmi: nested_nmi:
/* /*
...@@ -1585,10 +1587,12 @@ nested_nmi: ...@@ -1585,10 +1587,12 @@ nested_nmi:
nested_nmi_out: nested_nmi_out:
popq_cfi %rdx popq_cfi %rdx
CFI_RESTORE rdx
/* No need to check faults here */ /* No need to check faults here */
INTERRUPT_RETURN INTERRUPT_RETURN
CFI_RESTORE_STATE
first_nmi: first_nmi:
/* /*
* Because nested NMIs will use the pushed location that we * Because nested NMIs will use the pushed location that we
...@@ -1624,6 +1628,10 @@ first_nmi: ...@@ -1624,6 +1628,10 @@ first_nmi:
* NMI may zero out. The original stack frame and the temp storage * NMI may zero out. The original stack frame and the temp storage
* is also used by nested NMIs and can not be trusted on exit. * is also used by nested NMIs and can not be trusted on exit.
*/ */
/* Do not pop rdx, nested NMIs will corrupt it */
movq (%rsp), %rdx
CFI_RESTORE rdx
/* Set the NMI executing variable on the stack. */ /* Set the NMI executing variable on the stack. */
pushq_cfi $1 pushq_cfi $1
...@@ -1631,14 +1639,31 @@ first_nmi: ...@@ -1631,14 +1639,31 @@ first_nmi:
.rept 5 .rept 5
pushq_cfi 6*8(%rsp) pushq_cfi 6*8(%rsp)
.endr .endr
CFI_DEF_CFA_OFFSET SS+8-RIP
/*
* If there was a nested NMI, the first NMI's iret will return
* here. But NMIs are still enabled and we can take another
* nested NMI. The nested NMI checks the interrupted RIP to see
* if it is between repeat_nmi and end_repeat_nmi, and if so
* it will just return, as we are about to repeat an NMI anyway.
* This makes it safe to copy to the stack frame that a nested
* NMI will update.
*/
repeat_nmi:
/*
* Update the stack variable to say we are still in NMI (the update
* is benign for the non-repeat case, where 1 was pushed just above
* to this very stack slot).
*/
movq $1, 5*8(%rsp)
/* Make another copy, this one may be modified by nested NMIs */ /* Make another copy, this one may be modified by nested NMIs */
.rept 5 .rept 5
pushq_cfi 4*8(%rsp) pushq_cfi 4*8(%rsp)
.endr .endr
CFI_DEF_CFA_OFFSET SS+8-RIP
/* Do not pop rdx, nested NMIs will corrupt it */ end_repeat_nmi:
movq 11*8(%rsp), %rdx
/* /*
* Everything below this point can be preempted by a nested * Everything below this point can be preempted by a nested
...@@ -1646,7 +1671,6 @@ first_nmi: ...@@ -1646,7 +1671,6 @@ first_nmi:
* caused by an exception and nested NMI will start here, and * caused by an exception and nested NMI will start here, and
* can still be preempted by another NMI. * can still be preempted by another NMI.
*/ */
restart_nmi:
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
subq $ORIG_RAX-R15, %rsp subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
...@@ -1675,26 +1699,6 @@ nmi_restore: ...@@ -1675,26 +1699,6 @@ nmi_restore:
CFI_ENDPROC CFI_ENDPROC
END(nmi) END(nmi)
/*
* If an NMI hit an iret because of an exception or breakpoint,
* it can lose its NMI context, and a nested NMI may come in.
* In that case, the nested NMI will change the preempted NMI's
* stack to jump to here when it does the final iret.
*/
repeat_nmi:
INTR_FRAME
/* Update the stack variable to say we are still in NMI */
movq $1, 5*8(%rsp)
/* copy the saved stack back to copy stack */
.rept 5
pushq_cfi 4*8(%rsp)
.endr
jmp restart_nmi
CFI_ENDPROC
end_repeat_nmi:
ENTRY(ignore_sysret) ENTRY(ignore_sysret)
CFI_STARTPROC CFI_STARTPROC
mov $-ENOSYS,%eax mov $-ENOSYS,%eax
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册