提交 3067714c 编写于 作者: A Avi Kivity

KVM: Move page fault processing to common code

The code that dispatches the page fault and emulates if we failed to map
is duplicated across vmx and svm.  Merge it to simplify further bugfixing.
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 c7e75a3d
...@@ -1347,6 +1347,42 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) ...@@ -1347,6 +1347,42 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
} }
} }
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
{
int r;
enum emulation_result er;
mutex_lock(&vcpu->kvm->lock);
r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
if (r < 0)
goto out;
if (!r) {
r = 1;
goto out;
}
er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
mutex_unlock(&vcpu->kvm->lock);
switch (er) {
case EMULATE_DONE:
return 1;
case EMULATE_DO_MMIO:
++vcpu->stat.mmio_exits;
return 0;
case EMULATE_FAIL:
kvm_report_emulation_failure(vcpu, "pagetable");
return 1;
default:
BUG();
}
out:
mutex_unlock(&vcpu->kvm->lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
static void free_mmu_pages(struct kvm_vcpu *vcpu) static void free_mmu_pages(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu_page *page; struct kvm_mmu_page *page;
......
...@@ -933,45 +933,14 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -933,45 +933,14 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
struct kvm *kvm = svm->vcpu.kvm; struct kvm *kvm = svm->vcpu.kvm;
u64 fault_address; u64 fault_address;
u32 error_code; u32 error_code;
enum emulation_result er;
int r;
if (!irqchip_in_kernel(kvm) && if (!irqchip_in_kernel(kvm) &&
is_external_interrupt(exit_int_info)) is_external_interrupt(exit_int_info))
push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
mutex_lock(&kvm->lock);
fault_address = svm->vmcb->control.exit_info_2; fault_address = svm->vmcb->control.exit_info_2;
error_code = svm->vmcb->control.exit_info_1; error_code = svm->vmcb->control.exit_info_1;
r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
if (r < 0) {
mutex_unlock(&kvm->lock);
return r;
}
if (!r) {
mutex_unlock(&kvm->lock);
return 1;
}
er = emulate_instruction(&svm->vcpu, kvm_run, fault_address,
error_code, 0);
mutex_unlock(&kvm->lock);
switch (er) {
case EMULATE_DONE:
return 1;
case EMULATE_DO_MMIO:
++svm->vcpu.stat.mmio_exits;
return 0;
case EMULATE_FAIL:
kvm_report_emulation_failure(&svm->vcpu, "pagetable");
break;
default:
BUG();
}
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
return 0;
} }
static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
......
...@@ -1796,7 +1796,6 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1796,7 +1796,6 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
unsigned long cr2, rip; unsigned long cr2, rip;
u32 vect_info; u32 vect_info;
enum emulation_result er; enum emulation_result er;
int r;
vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
intr_info = vmcs_read32(VM_EXIT_INTR_INFO); intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
...@@ -1834,33 +1833,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1834,33 +1833,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
if (is_page_fault(intr_info)) { if (is_page_fault(intr_info)) {
cr2 = vmcs_readl(EXIT_QUALIFICATION); cr2 = vmcs_readl(EXIT_QUALIFICATION);
return kvm_mmu_page_fault(vcpu, cr2, error_code);
mutex_lock(&vcpu->kvm->lock);
r = kvm_mmu_page_fault(vcpu, cr2, error_code);
if (r < 0) {
mutex_unlock(&vcpu->kvm->lock);
return r;
}
if (!r) {
mutex_unlock(&vcpu->kvm->lock);
return 1;
}
er = emulate_instruction(vcpu, kvm_run, cr2, error_code, 0);
mutex_unlock(&vcpu->kvm->lock);
switch (er) {
case EMULATE_DONE:
return 1;
case EMULATE_DO_MMIO:
++vcpu->stat.mmio_exits;
return 0;
case EMULATE_FAIL:
kvm_report_emulation_failure(vcpu, "pagetable");
break;
default:
BUG();
}
} }
if (vcpu->rmode.active && if (vcpu->rmode.active &&
......
...@@ -85,11 +85,7 @@ struct kvm_vcpu { ...@@ -85,11 +85,7 @@ struct kvm_vcpu {
struct x86_emulate_ctxt emulate_ctxt; struct x86_emulate_ctxt emulate_ctxt;
}; };
static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
u32 error_code)
{
return vcpu->mmu.page_fault(vcpu, gva, error_code);
}
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册