提交 8d860bbe 编写于 作者: J Jim Mattson 提交者: Paolo Bonzini

kvm: vmx: Basic APIC virtualization controls have three settings

Previously, we toggled between SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
and SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, depending on whether or
not the EXTD bit was set in MSR_IA32_APICBASE. However, if the local
APIC is disabled, we should not set either of these APIC
virtualization control bits.
Signed-off-by: NJim Mattson <jmattson@google.com>
Signed-off-by: NKrish Sadhukhan <krish.sadhukhan@oracle.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 58871649
...@@ -995,7 +995,7 @@ struct kvm_x86_ops { ...@@ -995,7 +995,7 @@ struct kvm_x86_ops {
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
......
...@@ -1990,13 +1990,11 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) ...@@ -1990,13 +1990,11 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
} }
} }
if ((old_value ^ value) & X2APIC_ENABLE) { if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
if (value & X2APIC_ENABLE) { kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true); if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
} else kvm_x86_ops->set_virtual_apic_mode(vcpu);
kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
}
apic->base_address = apic->vcpu->arch.apic_base & apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE; MSR_IA32_APICBASE_BASE;
......
...@@ -5036,7 +5036,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) ...@@ -5036,7 +5036,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
set_cr_intercept(svm, INTERCEPT_CR8_WRITE); set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
} }
static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
{ {
return; return;
} }
...@@ -7076,7 +7076,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { ...@@ -7076,7 +7076,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.enable_nmi_window = enable_nmi_window, .enable_nmi_window = enable_nmi_window,
.enable_irq_window = enable_irq_window, .enable_irq_window = enable_irq_window,
.update_cr8_intercept = update_cr8_intercept, .update_cr8_intercept = update_cr8_intercept,
.set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, .set_virtual_apic_mode = svm_set_virtual_apic_mode,
.get_enable_apicv = svm_get_enable_apicv, .get_enable_apicv = svm_get_enable_apicv,
.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
.load_eoi_exitmap = svm_load_eoi_exitmap, .load_eoi_exitmap = svm_load_eoi_exitmap,
......
...@@ -481,7 +481,8 @@ struct nested_vmx { ...@@ -481,7 +481,8 @@ struct nested_vmx {
bool sync_shadow_vmcs; bool sync_shadow_vmcs;
bool dirty_vmcs12; bool dirty_vmcs12;
bool change_vmcs01_virtual_x2apic_mode; bool change_vmcs01_virtual_apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */ /* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending; bool nested_run_pending;
...@@ -9281,31 +9282,43 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) ...@@ -9281,31 +9282,43 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
vmcs_write32(TPR_THRESHOLD, irr); vmcs_write32(TPR_THRESHOLD, irr);
} }
static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
{ {
u32 sec_exec_control; u32 sec_exec_control;
if (!lapic_in_kernel(vcpu))
return;
/* Postpone execution until vmcs01 is the current VMCS. */ /* Postpone execution until vmcs01 is the current VMCS. */
if (is_guest_mode(vcpu)) { if (is_guest_mode(vcpu)) {
to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true; to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
return; return;
} }
if (!cpu_has_vmx_virtualize_x2apic_mode())
return;
if (!cpu_need_tpr_shadow(vcpu)) if (!cpu_need_tpr_shadow(vcpu))
return; return;
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
if (set) { switch (kvm_get_apic_mode(vcpu)) {
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; case LAPIC_MODE_INVALID:
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; WARN_ONCE(true, "Invalid local APIC state");
} else { case LAPIC_MODE_DISABLED:
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; break;
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; case LAPIC_MODE_XAPIC:
vmx_flush_tlb(vcpu, true); if (flexpriority_enabled) {
sec_exec_control |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
vmx_flush_tlb(vcpu, true);
}
break;
case LAPIC_MODE_X2APIC:
if (cpu_has_vmx_virtualize_x2apic_mode())
sec_exec_control |=
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
break;
} }
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
...@@ -12087,10 +12100,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, ...@@ -12087,10 +12100,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
if (kvm_has_tsc_control) if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx); decache_tsc_multiplier(vmx);
if (vmx->nested.change_vmcs01_virtual_x2apic_mode) { if (vmx->nested.change_vmcs01_virtual_apic_mode) {
vmx->nested.change_vmcs01_virtual_x2apic_mode = false; vmx->nested.change_vmcs01_virtual_apic_mode = false;
vmx_set_virtual_x2apic_mode(vcpu, vmx_set_virtual_apic_mode(vcpu);
vcpu->arch.apic_base & X2APIC_ENABLE);
} else if (!nested_cpu_has_ept(vmcs12) && } else if (!nested_cpu_has_ept(vmcs12) &&
nested_cpu_has2(vmcs12, nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
...@@ -12718,7 +12730,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { ...@@ -12718,7 +12730,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.enable_nmi_window = enable_nmi_window, .enable_nmi_window = enable_nmi_window,
.enable_irq_window = enable_irq_window, .enable_irq_window = enable_irq_window,
.update_cr8_intercept = update_cr8_intercept, .update_cr8_intercept = update_cr8_intercept,
.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
.set_apic_access_page_addr = vmx_set_apic_access_page_addr, .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
.get_enable_apicv = vmx_get_enable_apicv, .get_enable_apicv = vmx_get_enable_apicv,
.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册