提交 09072daf 编写于 作者: A Avi Kivity

KVM: Unify kvm_mmu_pre_write() and kvm_mmu_post_write()

Instead of calling two functions and repeating expensive checks, call one
function and provide it with before/after information.
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 62135845
...@@ -525,8 +525,8 @@ int kvm_write_guest(struct kvm_vcpu *vcpu, ...@@ -525,8 +525,8 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
unsigned long segment_base(u16 selector); unsigned long segment_base(u16 selector);
void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); const u8 *old, const u8 *new, int bytes);
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
......
...@@ -1071,18 +1071,18 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1071,18 +1071,18 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
{ {
struct page *page; struct page *page;
void *virt; void *virt;
unsigned offset = offset_in_page(gpa);
if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
return 0; return 0;
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
if (!page) if (!page)
return 0; return 0;
kvm_mmu_pre_write(vcpu, gpa, bytes);
mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
virt = kmap_atomic(page, KM_USER0); virt = kmap_atomic(page, KM_USER0);
kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
memcpy(virt + offset_in_page(gpa), val, bytes); memcpy(virt + offset_in_page(gpa), val, bytes);
kunmap_atomic(virt, KM_USER0); kunmap_atomic(virt, KM_USER0);
kvm_mmu_post_write(vcpu, gpa, bytes);
return 1; return 1;
} }
......
...@@ -1118,7 +1118,7 @@ int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) ...@@ -1118,7 +1118,7 @@ int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
return r; return r;
} }
static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu, static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page, struct kvm_mmu_page *page,
u64 *spte) u64 *spte)
{ {
...@@ -1137,7 +1137,8 @@ static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu, ...@@ -1137,7 +1137,8 @@ static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
*spte = 0; *spte = 0;
} }
void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *old, const u8 *new, int bytes)
{ {
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *page; struct kvm_mmu_page *page;
...@@ -1206,16 +1207,12 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) ...@@ -1206,16 +1207,12 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
spte = __va(page->page_hpa); spte = __va(page->page_hpa);
spte += page_offset / sizeof(*spte); spte += page_offset / sizeof(*spte);
while (npte--) { while (npte--) {
mmu_pre_write_zap_pte(vcpu, page, spte); mmu_pte_write_zap_pte(vcpu, page, spte);
++spte; ++spte;
} }
} }
} }
void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
{
}
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{ {
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册