提交 5b5c6a5a 编写于 作者: A Avi Kivity

KVM: MMU: Simplify kvm_mmu_zap_page()

The twisty maze of conditionals can be reduced.

[joerg: fix tlb flushing]
Signed-off-by: NJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 31aa2b44
...@@ -955,7 +955,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm, ...@@ -955,7 +955,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
rmap_remove(kvm, &pt[i]); rmap_remove(kvm, &pt[i]);
pt[i] = shadow_trap_nonpresent_pte; pt[i] = shadow_trap_nonpresent_pte;
} }
kvm_flush_remote_tlbs(kvm);
return; return;
} }
...@@ -974,7 +973,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm, ...@@ -974,7 +973,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
} }
pt[i] = shadow_trap_nonpresent_pte; pt[i] = shadow_trap_nonpresent_pte;
} }
kvm_flush_remote_tlbs(kvm);
} }
static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
...@@ -1016,18 +1014,16 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -1016,18 +1014,16 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
++kvm->stat.mmu_shadow_zapped; ++kvm->stat.mmu_shadow_zapped;
kvm_mmu_page_unlink_children(kvm, sp); kvm_mmu_page_unlink_children(kvm, sp);
kvm_mmu_unlink_parents(kvm, sp); kvm_mmu_unlink_parents(kvm, sp);
kvm_flush_remote_tlbs(kvm);
if (!sp->role.invalid && !sp->role.metaphysical)
unaccount_shadowed(kvm, sp->gfn);
if (!sp->root_count) { if (!sp->root_count) {
if (!sp->role.metaphysical && !sp->role.invalid)
unaccount_shadowed(kvm, sp->gfn);
hlist_del(&sp->hash_link); hlist_del(&sp->hash_link);
kvm_mmu_free_page(kvm, sp); kvm_mmu_free_page(kvm, sp);
} else { } else {
int invalid = sp->role.invalid;
list_move(&sp->link, &kvm->arch.active_mmu_pages);
sp->role.invalid = 1; sp->role.invalid = 1;
list_move(&sp->link, &kvm->arch.active_mmu_pages);
kvm_reload_remote_mmus(kvm); kvm_reload_remote_mmus(kvm);
if (!sp->role.metaphysical && !invalid)
unaccount_shadowed(kvm, sp->gfn);
} }
kvm_mmu_reset_last_pte_updated(kvm); kvm_mmu_reset_last_pte_updated(kvm);
} }
...@@ -1842,7 +1838,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1842,7 +1838,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
index = kvm_page_table_hashfn(gfn); index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index]; bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
if (sp->gfn != gfn || sp->role.metaphysical) if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid)
continue; continue;
pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册