提交 6389ee94 编写于 作者: A Avi Kivity

KVM: Pull extra page fault information into struct x86_exception

Currently page fault cr2 and nesting infomation are carried outside
the fault data structure.  Instead they are placed in the vcpu struct,
which results in confusion as global variables are manipulated instead
of passing parameters.

Fix this issue by adding address and nested fields to struct x86_exception,
so this struct can carry all information associated with a fault.
Signed-off-by: NAvi Kivity <avi@redhat.com>
Tested-by: NJoerg Roedel <joerg.roedel@amd.com>
Tested-by: NGleb Natapov <gleb@redhat.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 8c28d031
...@@ -19,6 +19,8 @@ struct x86_exception { ...@@ -19,6 +19,8 @@ struct x86_exception {
u8 vector; u8 vector;
bool error_code_valid; bool error_code_valid;
u16 error_code; u16 error_code;
bool nested_page_fault;
u64 address; /* cr2 or nested page fault gpa */
}; };
/* /*
......
...@@ -242,7 +242,8 @@ struct kvm_mmu { ...@@ -242,7 +242,8 @@ struct kvm_mmu {
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, bool no_apf); int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, bool no_apf);
void (*inject_page_fault)(struct kvm_vcpu *vcpu); void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
void (*free)(struct kvm_vcpu *vcpu); void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
struct x86_exception *exception); struct x86_exception *exception);
...@@ -318,16 +319,6 @@ struct kvm_vcpu_arch { ...@@ -318,16 +319,6 @@ struct kvm_vcpu_arch {
*/ */
struct kvm_mmu *walk_mmu; struct kvm_mmu *walk_mmu;
/*
* This struct is filled with the necessary information to propagate a
* page fault into the guest
*/
struct {
u64 address;
unsigned error_code;
bool nested;
} fault;
/* only needed in kvm_pv_mmu_op() path, but it's hot so /* only needed in kvm_pv_mmu_op() path, but it's hot so
* put it here to avoid allocation */ * put it here to avoid allocation */
struct kvm_pv_mmu_op_buffer mmu_op_buffer; struct kvm_pv_mmu_op_buffer mmu_op_buffer;
...@@ -686,11 +677,11 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); ...@@ -686,11 +677,11 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t gfn, void *data, int offset, int len, gfn_t gfn, void *data, int offset, int len,
u32 access); u32 access);
void kvm_propagate_fault(struct kvm_vcpu *vcpu); void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
int kvm_pic_set_irq(void *opaque, int irq, int level); int kvm_pic_set_irq(void *opaque, int irq, int level);
......
...@@ -2736,9 +2736,10 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu) ...@@ -2736,9 +2736,10 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
return vcpu->arch.cr3; return vcpu->arch.cr3;
} }
static void inject_page_fault(struct kvm_vcpu *vcpu) static void inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{ {
vcpu->arch.mmu.inject_page_fault(vcpu); vcpu->arch.mmu.inject_page_fault(vcpu, fault);
} }
static void paging_free(struct kvm_vcpu *vcpu) static void paging_free(struct kvm_vcpu *vcpu)
......
...@@ -279,8 +279,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -279,8 +279,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
if (rsvd_fault) if (rsvd_fault)
walker->fault.error_code |= PFERR_RSVD_MASK; walker->fault.error_code |= PFERR_RSVD_MASK;
vcpu->arch.fault.address = addr; walker->fault.address = addr;
vcpu->arch.fault.error_code = walker->fault.error_code; walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
trace_kvm_mmu_walker_error(walker->fault.error_code); trace_kvm_mmu_walker_error(walker->fault.error_code);
return 0; return 0;
...@@ -568,7 +568,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -568,7 +568,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
*/ */
if (!r) { if (!r) {
pgprintk("%s: guest page fault\n", __func__); pgprintk("%s: guest page fault\n", __func__);
inject_page_fault(vcpu); inject_page_fault(vcpu, &walker.fault);
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
return 0; return 0;
} }
......
...@@ -1647,14 +1647,15 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, ...@@ -1647,14 +1647,15 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
force_new_asid(vcpu); force_new_asid(vcpu);
} }
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu) static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.exit_code = SVM_EXIT_NPF; svm->vmcb->control.exit_code = SVM_EXIT_NPF;
svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = vcpu->arch.fault.error_code; svm->vmcb->control.exit_info_1 = fault->error_code;
svm->vmcb->control.exit_info_2 = vcpu->arch.fault.address; svm->vmcb->control.exit_info_2 = fault->address;
nested_svm_vmexit(svm); nested_svm_vmexit(svm);
} }
......
...@@ -334,23 +334,19 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) ...@@ -334,23 +334,19 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
} }
EXPORT_SYMBOL_GPL(kvm_requeue_exception); EXPORT_SYMBOL_GPL(kvm_requeue_exception);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu) void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{ {
unsigned error_code = vcpu->arch.fault.error_code;
++vcpu->stat.pf_guest; ++vcpu->stat.pf_guest;
vcpu->arch.cr2 = vcpu->arch.fault.address; vcpu->arch.cr2 = fault->address;
kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
} }
void kvm_propagate_fault(struct kvm_vcpu *vcpu) void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{ {
if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested) if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
vcpu->arch.nested_mmu.inject_page_fault(vcpu); vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
else else
vcpu->arch.mmu.inject_page_fault(vcpu); vcpu->arch.mmu.inject_page_fault(vcpu, fault);
vcpu->arch.fault.nested = false;
} }
void kvm_inject_nmi(struct kvm_vcpu *vcpu) void kvm_inject_nmi(struct kvm_vcpu *vcpu)
...@@ -3610,8 +3606,6 @@ static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) ...@@ -3610,8 +3606,6 @@ static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
/* NPT walks are always user-walks */ /* NPT walks are always user-walks */
access |= PFERR_USER_MASK; access |= PFERR_USER_MASK;
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
if (t_gpa == UNMAPPED_GVA)
vcpu->arch.fault.nested = true;
return t_gpa; return t_gpa;
} }
...@@ -4259,7 +4253,7 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) ...@@ -4259,7 +4253,7 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
{ {
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
if (ctxt->exception.vector == PF_VECTOR) if (ctxt->exception.vector == PF_VECTOR)
kvm_propagate_fault(vcpu); kvm_propagate_fault(vcpu, &ctxt->exception);
else if (ctxt->exception.error_code_valid) else if (ctxt->exception.error_code_valid)
kvm_queue_exception_e(vcpu, ctxt->exception.vector, kvm_queue_exception_e(vcpu, ctxt->exception.vector,
ctxt->exception.error_code); ctxt->exception.error_code);
...@@ -6264,6 +6258,8 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) ...@@ -6264,6 +6258,8 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work) struct kvm_async_pf *work)
{ {
struct x86_exception fault;
trace_kvm_async_pf_not_present(work->arch.token, work->gva); trace_kvm_async_pf_not_present(work->arch.token, work->gva);
kvm_add_async_pf_gfn(vcpu, work->arch.gfn); kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
...@@ -6272,15 +6268,20 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, ...@@ -6272,15 +6268,20 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_x86_ops->get_cpl(vcpu) == 0))
kvm_make_request(KVM_REQ_APF_HALT, vcpu); kvm_make_request(KVM_REQ_APF_HALT, vcpu);
else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
vcpu->arch.fault.error_code = 0; fault.vector = PF_VECTOR;
vcpu->arch.fault.address = work->arch.token; fault.error_code_valid = true;
kvm_inject_page_fault(vcpu); fault.error_code = 0;
fault.nested_page_fault = false;
fault.address = work->arch.token;
kvm_inject_page_fault(vcpu, &fault);
} }
} }
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work) struct kvm_async_pf *work)
{ {
struct x86_exception fault;
trace_kvm_async_pf_ready(work->arch.token, work->gva); trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (is_error_page(work->page)) if (is_error_page(work->page))
work->arch.token = ~0; /* broadcast wakeup */ work->arch.token = ~0; /* broadcast wakeup */
...@@ -6289,9 +6290,12 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, ...@@ -6289,9 +6290,12 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
vcpu->arch.fault.error_code = 0; fault.vector = PF_VECTOR;
vcpu->arch.fault.address = work->arch.token; fault.error_code_valid = true;
kvm_inject_page_fault(vcpu); fault.error_code = 0;
fault.nested_page_fault = false;
fault.address = work->arch.token;
kvm_inject_page_fault(vcpu, &fault);
} }
vcpu->arch.apf.halted = false; vcpu->arch.apf.halted = false;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册