提交 ecb586bd 编写于 作者: P Paolo Bonzini 提交者: Ingo Molnar

KVM/x86: Remove indirect MSR op calls from SPEC_CTRL

Having a paravirt indirect call in the IBRS restore path is not a
good idea, since we are trying to protect from speculative execution
of bogus indirect branch targets.  It is also slower, so use
native_wrmsrl() on the vmentry path too.
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: NJim Mattson <jmattson@google.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: KarimAllah Ahmed <karahmed@amazon.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Cc: stable@vger.kernel.org
Fixes: d28b387f
Link: http://lkml.kernel.org/r/20180222154318.20361-2-pbonzini@redhat.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 d5028ba8
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/kvm_para.h> #include <asm/kvm_para.h>
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
#include <asm/microcode.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include <asm/virtext.h> #include <asm/virtext.h>
...@@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* being speculatively taken. * being speculatively taken.
*/ */
if (svm->spec_ctrl) if (svm->spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile ( asm volatile (
"push %%" _ASM_BP "; \n\t" "push %%" _ASM_BP "; \n\t"
...@@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* save it. * save it.
*/ */
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (svm->spec_ctrl) if (svm->spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, 0); native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */ /* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB(); vmexit_fill_RSB();
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/microcode.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include "trace.h" #include "trace.h"
...@@ -9452,7 +9453,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -9452,7 +9453,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* being speculatively taken. * being speculatively taken.
*/ */
if (vmx->spec_ctrl) if (vmx->spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
vmx->__launched = vmx->loaded_vmcs->launched; vmx->__launched = vmx->loaded_vmcs->launched;
asm( asm(
...@@ -9588,10 +9589,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -9588,10 +9589,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* save it. * save it.
*/ */
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (vmx->spec_ctrl) if (vmx->spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, 0); native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */ /* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB(); vmexit_fill_RSB();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册