提交 36be0b9d 编写于 作者: P Paolo Bonzini

KVM: x86: Add nested virtualization support for MPX

This is simple to do, the "host" BNDCFGS is either 0 or the guest value.
However, both controls have to be present.  We cannot provide MPX if
we only have one of the "load BNDCFGS" or "clear BNDCFGS" controls.
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 4ff41732
...@@ -206,6 +206,7 @@ struct __packed vmcs12 { ...@@ -206,6 +206,7 @@ struct __packed vmcs12 {
u64 guest_pdptr1; u64 guest_pdptr1;
u64 guest_pdptr2; u64 guest_pdptr2;
u64 guest_pdptr3; u64 guest_pdptr3;
u64 guest_bndcfgs;
u64 host_ia32_pat; u64 host_ia32_pat;
u64 host_ia32_efer; u64 host_ia32_efer;
u64 host_ia32_perf_global_ctrl; u64 host_ia32_perf_global_ctrl;
...@@ -541,6 +542,7 @@ static const unsigned long shadow_read_write_fields[] = { ...@@ -541,6 +542,7 @@ static const unsigned long shadow_read_write_fields[] = {
GUEST_CS_LIMIT, GUEST_CS_LIMIT,
GUEST_CS_BASE, GUEST_CS_BASE,
GUEST_ES_BASE, GUEST_ES_BASE,
GUEST_BNDCFGS,
CR0_GUEST_HOST_MASK, CR0_GUEST_HOST_MASK,
CR0_READ_SHADOW, CR0_READ_SHADOW,
CR4_READ_SHADOW, CR4_READ_SHADOW,
...@@ -596,6 +598,7 @@ static const unsigned short vmcs_field_to_offset_table[] = { ...@@ -596,6 +598,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD64(GUEST_PDPTR1, guest_pdptr1), FIELD64(GUEST_PDPTR1, guest_pdptr1),
FIELD64(GUEST_PDPTR2, guest_pdptr2), FIELD64(GUEST_PDPTR2, guest_pdptr2),
FIELD64(GUEST_PDPTR3, guest_pdptr3), FIELD64(GUEST_PDPTR3, guest_pdptr3),
FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
FIELD64(HOST_IA32_PAT, host_ia32_pat), FIELD64(HOST_IA32_PAT, host_ia32_pat),
FIELD64(HOST_IA32_EFER, host_ia32_efer), FIELD64(HOST_IA32_EFER, host_ia32_efer),
FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl), FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
...@@ -736,6 +739,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var); ...@@ -736,6 +739,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var);
static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu); static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
static bool vmx_mpx_supported(void);
static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs); static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
...@@ -2287,6 +2291,8 @@ static __init void nested_vmx_setup_ctls_msrs(void) ...@@ -2287,6 +2291,8 @@ static __init void nested_vmx_setup_ctls_msrs(void)
nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER; VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
if (vmx_mpx_supported())
nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* entry controls */ /* entry controls */
rdmsr(MSR_IA32_VMX_ENTRY_CTLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
...@@ -2300,6 +2306,8 @@ static __init void nested_vmx_setup_ctls_msrs(void) ...@@ -2300,6 +2306,8 @@ static __init void nested_vmx_setup_ctls_msrs(void)
VM_ENTRY_LOAD_IA32_PAT; VM_ENTRY_LOAD_IA32_PAT;
nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |
VM_ENTRY_LOAD_IA32_EFER); VM_ENTRY_LOAD_IA32_EFER);
if (vmx_mpx_supported())
nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* cpu-based controls */ /* cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
...@@ -7866,6 +7874,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) ...@@ -7866,6 +7874,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
set_cr4_guest_host_mask(vmx); set_cr4_guest_host_mask(vmx);
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vmcs_write64(TSC_OFFSET, vmcs_write64(TSC_OFFSET,
vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
...@@ -8351,6 +8362,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -8351,6 +8362,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
if (vmx_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
/* update exit information fields: */ /* update exit information fields: */
...@@ -8460,6 +8473,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, ...@@ -8460,6 +8473,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
/* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
vmcs_write64(GUEST_BNDCFGS, 0);
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
vcpu->arch.pat = vmcs12->host_ia32_pat; vcpu->arch.pat = vmcs12->host_ia32_pat;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册