提交 987b2594 编写于 作者: S Sean Christopherson 提交者: Paolo Bonzini

KVM: x86: Move kvm_vcpu_init() invocation to common code

Move the kvm_cpu_{un}init() calls to common x86 code as an intermediate
step to removing kvm_cpu_{un}init() altogether.

Note, VMX'x alloc_apic_access_page() and init_rmode_identity_map() are
per-VM allocations and are intentionally kept if vCPU creation fails.
They are freed by kvm_arch_destroy_vm().

No functional change intended.
Signed-off-by: NSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 d813a8ba
...@@ -1050,7 +1050,7 @@ struct kvm_x86_ops { ...@@ -1050,7 +1050,7 @@ struct kvm_x86_ops {
void (*vm_destroy)(struct kvm *kvm); void (*vm_destroy)(struct kvm *kvm);
/* Create, but do not attach this VCPU */ /* Create, but do not attach this VCPU */
int (*vcpu_create)(struct kvm *kvm, struct kvm_vcpu *vcpu, unsigned id); int (*vcpu_create)(struct kvm_vcpu *vcpu);
void (*vcpu_free)(struct kvm_vcpu *vcpu); void (*vcpu_free)(struct kvm_vcpu *vcpu);
void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event); void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
......
...@@ -2187,8 +2187,7 @@ static int avic_init_vcpu(struct vcpu_svm *svm) ...@@ -2187,8 +2187,7 @@ static int avic_init_vcpu(struct vcpu_svm *svm)
return ret; return ret;
} }
static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu, static int svm_create_vcpu(struct kvm_vcpu *vcpu)
unsigned int id)
{ {
struct vcpu_svm *svm; struct vcpu_svm *svm;
struct page *page; struct page *page;
...@@ -2200,14 +2199,10 @@ static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu, ...@@ -2200,14 +2199,10 @@ static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
svm = to_svm(vcpu); svm = to_svm(vcpu);
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
return err;
err = -ENOMEM; err = -ENOMEM;
page = alloc_page(GFP_KERNEL_ACCOUNT); page = alloc_page(GFP_KERNEL_ACCOUNT);
if (!page) if (!page)
goto uninit; goto out;
msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER); msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
if (!msrpm_pages) if (!msrpm_pages)
...@@ -2256,8 +2251,7 @@ static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu, ...@@ -2256,8 +2251,7 @@ static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
free_page1: free_page1:
__free_page(page); __free_page(page);
uninit: out:
kvm_vcpu_uninit(vcpu);
return err; return err;
} }
...@@ -2284,7 +2278,6 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -2284,7 +2278,6 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
__free_page(virt_to_page(svm->nested.hsave)); __free_page(virt_to_page(svm->nested.hsave));
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
} }
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
......
...@@ -6681,11 +6681,9 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -6681,11 +6681,9 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
free_vpid(vmx->vpid); free_vpid(vmx->vpid);
nested_vmx_free_vcpu(vcpu); nested_vmx_free_vcpu(vcpu);
free_loaded_vmcs(vmx->loaded_vmcs); free_loaded_vmcs(vmx->loaded_vmcs);
kvm_vcpu_uninit(vcpu);
} }
static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu, static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
unsigned int id)
{ {
struct vcpu_vmx *vmx; struct vcpu_vmx *vmx;
unsigned long *msr_bitmap; unsigned long *msr_bitmap;
...@@ -6694,10 +6692,6 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu, ...@@ -6694,10 +6692,6 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0); BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
vmx = to_vmx(vcpu); vmx = to_vmx(vcpu);
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
return err;
err = -ENOMEM; err = -ENOMEM;
vmx->vpid = allocate_vpid(); vmx->vpid = allocate_vpid();
...@@ -6711,7 +6705,7 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu, ...@@ -6711,7 +6705,7 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
if (enable_pml) { if (enable_pml) {
vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!vmx->pml_pg) if (!vmx->pml_pg)
goto uninit_vcpu; goto free_vpid;
} }
BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != NR_SHARED_MSRS); BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != NR_SHARED_MSRS);
...@@ -6756,7 +6750,7 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu, ...@@ -6756,7 +6750,7 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
if (kvm_cstate_in_guest(kvm)) { if (kvm_cstate_in_guest(vcpu->kvm)) {
vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C1_RES, MSR_TYPE_R); vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C1_RES, MSR_TYPE_R);
vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R); vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R); vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
...@@ -6772,13 +6766,13 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu, ...@@ -6772,13 +6766,13 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
vmx_vcpu_put(vcpu); vmx_vcpu_put(vcpu);
put_cpu(); put_cpu();
if (cpu_need_virtualize_apic_accesses(vcpu)) { if (cpu_need_virtualize_apic_accesses(vcpu)) {
err = alloc_apic_access_page(kvm); err = alloc_apic_access_page(vcpu->kvm);
if (err) if (err)
goto free_vmcs; goto free_vmcs;
} }
if (enable_ept && !enable_unrestricted_guest) { if (enable_ept && !enable_unrestricted_guest) {
err = init_rmode_identity_map(kvm); err = init_rmode_identity_map(vcpu->kvm);
if (err) if (err)
goto free_vmcs; goto free_vmcs;
} }
...@@ -6810,8 +6804,7 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu, ...@@ -6810,8 +6804,7 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
free_loaded_vmcs(vmx->loaded_vmcs); free_loaded_vmcs(vmx->loaded_vmcs);
free_pml: free_pml:
vmx_destroy_pml_buffer(vmx); vmx_destroy_pml_buffer(vmx);
uninit_vcpu: free_vpid:
kvm_vcpu_uninit(vcpu);
free_vpid(vmx->vpid); free_vpid(vmx->vpid);
return err; return err;
} }
......
...@@ -9176,6 +9176,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -9176,6 +9176,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvm_x86_ops->vcpu_free(vcpu); kvm_x86_ops->vcpu_free(vcpu);
kvm_vcpu_uninit(vcpu);
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
...@@ -9197,12 +9199,20 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -9197,12 +9199,20 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
if (!vcpu) if (!vcpu)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
r = kvm_x86_ops->vcpu_create(kvm, vcpu, id); r = kvm_vcpu_init(vcpu, kvm, id);
if (r) { if (r)
kmem_cache_free(kvm_vcpu_cache, vcpu); goto free_vcpu;
return ERR_PTR(r);
} r = kvm_x86_ops->vcpu_create(vcpu);
if (r)
goto uninit_vcpu;
return vcpu; return vcpu;
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
return ERR_PTR(r);
} }
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册