提交 9753d688 编写于 作者: A Aaron Lewis 提交者: Paolo Bonzini

KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS on Intel

When the guest can execute the XSAVES/XRSTORS instructions, use wrmsr to
set the hardware IA32_XSS MSR to guest/host values on VM-entry/VM-exit,
rather than the MSR-load areas. By using the same approach as AMD, we
will be able to use a common implementation for both (in the next
patch).
Reviewed-by: NJim Mattson <jmattson@google.com>
Signed-off-by: NAaron Lewis <aaronlewis@google.com>
Change-Id: I9447d104b2615c04e39e4af0c911e1e7309bf464
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 312a1c87
...@@ -2081,13 +2081,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2081,13 +2081,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data != 0) if (data != 0)
return 1; return 1;
vcpu->arch.ia32_xss = data; vcpu->arch.ia32_xss = data;
if (vcpu->arch.xsaves_enabled) {
if (vcpu->arch.ia32_xss != host_xss)
add_atomic_switch_msr(vmx, MSR_IA32_XSS,
vcpu->arch.ia32_xss, host_xss, false);
else
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
}
break; break;
case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_CTL:
if ((pt_mode != PT_MODE_HOST_GUEST) || if ((pt_mode != PT_MODE_HOST_GUEST) ||
...@@ -6473,6 +6466,22 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) ...@@ -6473,6 +6466,22 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
} }
} }
static void vmx_load_guest_xss(struct kvm_vcpu *vcpu)
{
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
vcpu->arch.xsaves_enabled &&
vcpu->arch.ia32_xss != host_xss)
wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
}
static void vmx_load_host_xss(struct kvm_vcpu *vcpu)
{
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
vcpu->arch.xsaves_enabled &&
vcpu->arch.ia32_xss != host_xss)
wrmsrl(MSR_IA32_XSS, host_xss);
}
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
static void vmx_vcpu_run(struct kvm_vcpu *vcpu) static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
...@@ -6524,6 +6533,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6524,6 +6533,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_set_interrupt_shadow(vcpu, 0); vmx_set_interrupt_shadow(vcpu, 0);
kvm_load_guest_xcr0(vcpu); kvm_load_guest_xcr0(vcpu);
vmx_load_guest_xss(vcpu);
if (static_cpu_has(X86_FEATURE_PKU) && if (static_cpu_has(X86_FEATURE_PKU) &&
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
...@@ -6630,6 +6640,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6630,6 +6640,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
__write_pkru(vmx->host_pkru); __write_pkru(vmx->host_pkru);
} }
vmx_load_host_xss(vcpu);
kvm_put_guest_xcr0(vcpu); kvm_put_guest_xcr0(vcpu);
vmx->nested.nested_run_pending = 0; vmx->nested.nested_run_pending = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册