提交 72b0eaa9 编写于 作者: S Sean Christopherson 提交者: Paolo Bonzini

KVM: VMX: Configure runtime hooks using vmx_x86_ops

Configure VMX's runtime hooks by modifying vmx_x86_ops directly instead
of using the global kvm_x86_ops.  This sets the stage for waiting until
after ->hardware_setup() to set kvm_x86_ops with the vendor's
implementation.
Signed-off-by: NSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200321202603.19355-5-sean.j.christopherson@intel.com>
Reviewed-by: NVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 484014fa
...@@ -6241,7 +6241,8 @@ void nested_vmx_hardware_unsetup(void) ...@@ -6241,7 +6241,8 @@ void nested_vmx_hardware_unsetup(void)
} }
} }
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) __init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
int (*exit_handlers[])(struct kvm_vcpu *))
{ {
int i; int i;
...@@ -6277,12 +6278,12 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) ...@@ -6277,12 +6278,12 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
kvm_x86_ops->check_nested_events = vmx_check_nested_events; ops->check_nested_events = vmx_check_nested_events;
kvm_x86_ops->get_nested_state = vmx_get_nested_state; ops->get_nested_state = vmx_get_nested_state;
kvm_x86_ops->set_nested_state = vmx_set_nested_state; ops->set_nested_state = vmx_set_nested_state;
kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages; ops->get_vmcs12_pages = nested_get_vmcs12_pages;
kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs; ops->nested_enable_evmcs = nested_enable_evmcs;
kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version; ops->nested_get_evmcs_version = nested_get_evmcs_version;
return 0; return 0;
} }
...@@ -19,7 +19,8 @@ enum nvmx_vmentry_status { ...@@ -19,7 +19,8 @@ enum nvmx_vmentry_status {
void vmx_leave_nested(struct kvm_vcpu *vcpu); void vmx_leave_nested(struct kvm_vcpu *vcpu);
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps); void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
void nested_vmx_hardware_unsetup(void); void nested_vmx_hardware_unsetup(void);
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)); __init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
int (*exit_handlers[])(struct kvm_vcpu *));
void nested_vmx_set_vmcs_shadowing_bitmap(void); void nested_vmx_set_vmcs_shadowing_bitmap(void);
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu); void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
......
...@@ -7854,16 +7854,16 @@ static __init int hardware_setup(void) ...@@ -7854,16 +7854,16 @@ static __init int hardware_setup(void)
* using the APIC_ACCESS_ADDR VMCS field. * using the APIC_ACCESS_ADDR VMCS field.
*/ */
if (!flexpriority_enabled) if (!flexpriority_enabled)
kvm_x86_ops->set_apic_access_page_addr = NULL; vmx_x86_ops.set_apic_access_page_addr = NULL;
if (!cpu_has_vmx_tpr_shadow()) if (!cpu_has_vmx_tpr_shadow())
kvm_x86_ops->update_cr8_intercept = NULL; vmx_x86_ops.update_cr8_intercept = NULL;
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
&& enable_ept) { && enable_ept) {
kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb; vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb;
kvm_x86_ops->tlb_remote_flush_with_range = vmx_x86_ops.tlb_remote_flush_with_range =
hv_remote_flush_tlb_with_range; hv_remote_flush_tlb_with_range;
} }
#endif #endif
...@@ -7878,7 +7878,7 @@ static __init int hardware_setup(void) ...@@ -7878,7 +7878,7 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_apicv()) { if (!cpu_has_vmx_apicv()) {
enable_apicv = 0; enable_apicv = 0;
kvm_x86_ops->sync_pir_to_irr = NULL; vmx_x86_ops.sync_pir_to_irr = NULL;
} }
if (cpu_has_vmx_tsc_scaling()) { if (cpu_has_vmx_tsc_scaling()) {
...@@ -7910,10 +7910,10 @@ static __init int hardware_setup(void) ...@@ -7910,10 +7910,10 @@ static __init int hardware_setup(void)
enable_pml = 0; enable_pml = 0;
if (!enable_pml) { if (!enable_pml) {
kvm_x86_ops->slot_enable_log_dirty = NULL; vmx_x86_ops.slot_enable_log_dirty = NULL;
kvm_x86_ops->slot_disable_log_dirty = NULL; vmx_x86_ops.slot_disable_log_dirty = NULL;
kvm_x86_ops->flush_log_dirty = NULL; vmx_x86_ops.flush_log_dirty = NULL;
kvm_x86_ops->enable_log_dirty_pt_masked = NULL; vmx_x86_ops.enable_log_dirty_pt_masked = NULL;
} }
if (!cpu_has_vmx_preemption_timer()) if (!cpu_has_vmx_preemption_timer())
...@@ -7941,9 +7941,9 @@ static __init int hardware_setup(void) ...@@ -7941,9 +7941,9 @@ static __init int hardware_setup(void)
} }
if (!enable_preemption_timer) { if (!enable_preemption_timer) {
kvm_x86_ops->set_hv_timer = NULL; vmx_x86_ops.set_hv_timer = NULL;
kvm_x86_ops->cancel_hv_timer = NULL; vmx_x86_ops.cancel_hv_timer = NULL;
kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit;
} }
kvm_set_posted_intr_wakeup_handler(wakeup_handler); kvm_set_posted_intr_wakeup_handler(wakeup_handler);
...@@ -7959,7 +7959,8 @@ static __init int hardware_setup(void) ...@@ -7959,7 +7959,8 @@ static __init int hardware_setup(void)
nested_vmx_setup_ctls_msrs(&vmcs_config.nested, nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
vmx_capability.ept); vmx_capability.ept);
r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); r = nested_vmx_hardware_setup(&vmx_x86_ops,
kvm_vmx_exit_handlers);
if (r) if (r)
return r; return r;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册