提交 9b7a0325 编写于 作者: A Avi Kivity 提交者: Linus Torvalds

[PATCH] KVM: MMU: Zap shadow page table entries on writes to guest page tables

Iterate over all shadow pages which correspond to a the given guest page table
and remove the mappings.

A subsequent page fault will reestablish the new mapping.
Signed-off-by: NAvi Kivity <avi@qumranet.com>
Acked-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 da4a00f0
......@@ -958,7 +958,43 @@ int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *page;
struct kvm_mmu_page *child;
struct hlist_node *node;
struct hlist_head *bucket;
unsigned index;
u64 *spte;
u64 pte;
unsigned offset = offset_in_page(gpa);
unsigned page_offset;
int level;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &vcpu->kvm->mmu_page_hash[index];
hlist_for_each_entry(page, node, bucket, hash_link) {
if (page->gfn != gfn || page->role.metaphysical)
continue;
page_offset = offset;
level = page->role.level;
if (page->role.glevels == PT32_ROOT_LEVEL) {
page_offset <<= 1; /* 32->64 */
page_offset &= ~PAGE_MASK;
}
spte = __va(page->page_hpa);
spte += page_offset / sizeof(*spte);
pte = *spte;
if (is_present_pte(pte)) {
if (level == PT_PAGE_TABLE_LEVEL)
rmap_remove(vcpu->kvm, spte);
else {
child = page_header(pte & PT64_BASE_ADDR_MASK);
mmu_page_remove_parent_pte(child, spte);
}
}
*spte = 0;
}
}
void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册