提交 8df25a32 编写于 作者: J Joerg Roedel 提交者: Avi Kivity

KVM: MMU: Track page fault data in struct vcpu

This patch introduces a struct with two new fields in
vcpu_arch for x86:

	* fault.address
	* fault.error_code

This will be used to correctly propagate page faults back
into the guest when we could have either an ordinary page
fault or a nested page fault. In the case of a nested page
fault the fault-address is different from the original
address that should be walked. So we need to keep track
about the real fault-address.
Signed-off-by: NJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 3241f22d
...@@ -229,7 +229,6 @@ struct x86_emulate_ctxt { ...@@ -229,7 +229,6 @@ struct x86_emulate_ctxt {
int exception; /* exception that happens during emulation or -1 */ int exception; /* exception that happens during emulation or -1 */
u32 error_code; /* error code for exception */ u32 error_code; /* error code for exception */
bool error_code_valid; bool error_code_valid;
unsigned long cr2; /* faulted address in case of #PF */
/* decode cache */ /* decode cache */
struct decode_cache decode; struct decode_cache decode;
......
...@@ -239,9 +239,7 @@ struct kvm_mmu { ...@@ -239,9 +239,7 @@ struct kvm_mmu {
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
void (*inject_page_fault)(struct kvm_vcpu *vcpu, void (*inject_page_fault)(struct kvm_vcpu *vcpu);
unsigned long addr,
u32 error_code);
void (*free)(struct kvm_vcpu *vcpu); void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
u32 *error); u32 *error);
...@@ -288,6 +286,16 @@ struct kvm_vcpu_arch { ...@@ -288,6 +286,16 @@ struct kvm_vcpu_arch {
bool tpr_access_reporting; bool tpr_access_reporting;
struct kvm_mmu mmu; struct kvm_mmu mmu;
/*
* This struct is filled with the necessary information to propagate a
* page fault into the guest
*/
struct {
u64 address;
unsigned error_code;
} fault;
/* only needed in kvm_pv_mmu_op() path, but it's hot so /* only needed in kvm_pv_mmu_op() path, but it's hot so
* put it here to avoid allocation */ * put it here to avoid allocation */
struct kvm_pv_mmu_op_buffer mmu_op_buffer; struct kvm_pv_mmu_op_buffer mmu_op_buffer;
...@@ -624,8 +632,7 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); ...@@ -624,8 +632,7 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, void kvm_inject_page_fault(struct kvm_vcpu *vcpu);
u32 error_code);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
int kvm_pic_set_irq(void *opaque, int irq, int level); int kvm_pic_set_irq(void *opaque, int irq, int level);
......
...@@ -487,11 +487,9 @@ static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err) ...@@ -487,11 +487,9 @@ static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
emulate_exception(ctxt, GP_VECTOR, err, true); emulate_exception(ctxt, GP_VECTOR, err, true);
} }
static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr, static void emulate_pf(struct x86_emulate_ctxt *ctxt)
int err)
{ {
ctxt->cr2 = addr; emulate_exception(ctxt, PF_VECTOR, 0, true);
emulate_exception(ctxt, PF_VECTOR, err, true);
} }
static void emulate_ud(struct x86_emulate_ctxt *ctxt) static void emulate_ud(struct x86_emulate_ctxt *ctxt)
...@@ -834,7 +832,7 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, ...@@ -834,7 +832,7 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
rc = ops->read_emulated(addr, mc->data + mc->end, n, &err, rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
ctxt->vcpu); ctxt->vcpu);
if (rc == X86EMUL_PROPAGATE_FAULT) if (rc == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt, addr, err); emulate_pf(ctxt);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
mc->end += n; mc->end += n;
...@@ -921,7 +919,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -921,7 +919,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
addr = dt.address + index * 8; addr = dt.address + index * 8;
ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT) if (ret == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt, addr, err); emulate_pf(ctxt);
return ret; return ret;
} }
...@@ -947,7 +945,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -947,7 +945,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
addr = dt.address + index * 8; addr = dt.address + index * 8;
ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT) if (ret == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt, addr, err); emulate_pf(ctxt);
return ret; return ret;
} }
...@@ -1117,7 +1115,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, ...@@ -1117,7 +1115,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
&err, &err,
ctxt->vcpu); ctxt->vcpu);
if (rc == X86EMUL_PROPAGATE_FAULT) if (rc == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt, c->dst.addr.mem, err); emulate_pf(ctxt);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
break; break;
...@@ -1939,7 +1937,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, ...@@ -1939,7 +1937,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
&err); &err);
if (ret == X86EMUL_PROPAGATE_FAULT) { if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */ /* FIXME: need to provide precise fault address */
emulate_pf(ctxt, old_tss_base, err); emulate_pf(ctxt);
return ret; return ret;
} }
...@@ -1949,7 +1947,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, ...@@ -1949,7 +1947,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
&err); &err);
if (ret == X86EMUL_PROPAGATE_FAULT) { if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */ /* FIXME: need to provide precise fault address */
emulate_pf(ctxt, old_tss_base, err); emulate_pf(ctxt);
return ret; return ret;
} }
...@@ -1957,7 +1955,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, ...@@ -1957,7 +1955,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
&err); &err);
if (ret == X86EMUL_PROPAGATE_FAULT) { if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */ /* FIXME: need to provide precise fault address */
emulate_pf(ctxt, new_tss_base, err); emulate_pf(ctxt);
return ret; return ret;
} }
...@@ -1970,7 +1968,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, ...@@ -1970,7 +1968,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
ctxt->vcpu, &err); ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT) { if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */ /* FIXME: need to provide precise fault address */
emulate_pf(ctxt, new_tss_base, err); emulate_pf(ctxt);
return ret; return ret;
} }
} }
...@@ -2081,7 +2079,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, ...@@ -2081,7 +2079,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
&err); &err);
if (ret == X86EMUL_PROPAGATE_FAULT) { if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */ /* FIXME: need to provide precise fault address */
emulate_pf(ctxt, old_tss_base, err); emulate_pf(ctxt);
return ret; return ret;
} }
...@@ -2091,7 +2089,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, ...@@ -2091,7 +2089,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
&err); &err);
if (ret == X86EMUL_PROPAGATE_FAULT) { if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */ /* FIXME: need to provide precise fault address */
emulate_pf(ctxt, old_tss_base, err); emulate_pf(ctxt);
return ret; return ret;
} }
...@@ -2099,7 +2097,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, ...@@ -2099,7 +2097,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
&err); &err);
if (ret == X86EMUL_PROPAGATE_FAULT) { if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */ /* FIXME: need to provide precise fault address */
emulate_pf(ctxt, new_tss_base, err); emulate_pf(ctxt);
return ret; return ret;
} }
...@@ -2112,7 +2110,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, ...@@ -2112,7 +2110,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
ctxt->vcpu, &err); ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT) { if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */ /* FIXME: need to provide precise fault address */
emulate_pf(ctxt, new_tss_base, err); emulate_pf(ctxt);
return ret; return ret;
} }
} }
......
...@@ -2566,11 +2566,9 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu) ...@@ -2566,11 +2566,9 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
return vcpu->arch.cr3; return vcpu->arch.cr3;
} }
static void inject_page_fault(struct kvm_vcpu *vcpu, static void inject_page_fault(struct kvm_vcpu *vcpu)
u64 addr,
u32 err_code)
{ {
vcpu->arch.mmu.inject_page_fault(vcpu, addr, err_code); vcpu->arch.mmu.inject_page_fault(vcpu);
} }
static void paging_free(struct kvm_vcpu *vcpu) static void paging_free(struct kvm_vcpu *vcpu)
......
...@@ -258,6 +258,10 @@ static int FNAME(walk_addr)(struct guest_walker *walker, ...@@ -258,6 +258,10 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
walker->error_code |= PFERR_FETCH_MASK; walker->error_code |= PFERR_FETCH_MASK;
if (rsvd_fault) if (rsvd_fault)
walker->error_code |= PFERR_RSVD_MASK; walker->error_code |= PFERR_RSVD_MASK;
vcpu->arch.fault.address = addr;
vcpu->arch.fault.error_code = walker->error_code;
trace_kvm_mmu_walker_error(walker->error_code); trace_kvm_mmu_walker_error(walker->error_code);
return 0; return 0;
} }
...@@ -521,7 +525,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -521,7 +525,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
*/ */
if (!r) { if (!r) {
pgprintk("%s: guest page fault\n", __func__); pgprintk("%s: guest page fault\n", __func__);
inject_page_fault(vcpu, addr, walker.error_code); inject_page_fault(vcpu);
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
return 0; return 0;
} }
......
...@@ -329,11 +329,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) ...@@ -329,11 +329,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
} }
EXPORT_SYMBOL_GPL(kvm_requeue_exception); EXPORT_SYMBOL_GPL(kvm_requeue_exception);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
u32 error_code)
{ {
unsigned error_code = vcpu->arch.fault.error_code;
++vcpu->stat.pf_guest; ++vcpu->stat.pf_guest;
vcpu->arch.cr2 = addr; vcpu->arch.cr2 = vcpu->arch.fault.address;
kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
} }
...@@ -4080,7 +4081,7 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) ...@@ -4080,7 +4081,7 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
{ {
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
if (ctxt->exception == PF_VECTOR) if (ctxt->exception == PF_VECTOR)
kvm_inject_page_fault(vcpu, ctxt->cr2, ctxt->error_code); kvm_inject_page_fault(vcpu);
else if (ctxt->error_code_valid) else if (ctxt->error_code_valid)
kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code); kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
else else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册