提交 210f4942 编写于 作者: S Sean Christopherson 提交者: Paolo Bonzini

Revert "KVM: MMU: collapse TLB flushes when zap all pages"

Unwinding optimizations related to obsolete pages is a step towards
removing x86 KVM's fast invalidate mechanism, i.e. this is one part of
a revert all patches from the series that introduced the mechanism[1].

This reverts commit f34d251d.

[1] https://lkml.kernel.org/r/1369960590-14138-1-git-send-email-xiaoguangrong@linux.vnet.ibm.com

Cc: Xiao Guangrong <guangrong.xiao@gmail.com>
Signed-off-by: NSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 52d5dedc
...@@ -2211,14 +2211,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, ...@@ -2211,14 +2211,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
static void kvm_mmu_commit_zap_page(struct kvm *kvm, static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list); struct list_head *invalid_list);
/*
* NOTE: we should pay more attention on the zapped-obsolete page
* (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
* since it has been deleted from active_mmu_pages but still can be found
* at hast list.
*
* for_each_valid_sp() has skipped that kind of pages.
*/
#define for_each_valid_sp(_kvm, _sp, _gfn) \ #define for_each_valid_sp(_kvm, _sp, _gfn) \
hlist_for_each_entry(_sp, \ hlist_for_each_entry(_sp, \
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
...@@ -5881,13 +5873,11 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) ...@@ -5881,13 +5873,11 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
if (sp->role.invalid) if (sp->role.invalid)
continue; continue;
/*
* Need not flush tlb since we only zap the sp with invalid
* generation number.
*/
if (batch >= BATCH_ZAP_PAGES && if (batch >= BATCH_ZAP_PAGES &&
cond_resched_lock(&kvm->mmu_lock)) { (need_resched() || spin_needbreak(&kvm->mmu_lock))) {
batch = 0; batch = 0;
kvm_mmu_commit_zap_page(kvm, &invalid_list);
cond_resched_lock(&kvm->mmu_lock);
goto restart; goto restart;
} }
...@@ -5898,10 +5888,6 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) ...@@ -5898,10 +5888,6 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
goto restart; goto restart;
} }
/*
* Should flush tlb before free page tables since lockless-walking
* may use the pages.
*/
kvm_mmu_commit_zap_page(kvm, &invalid_list); kvm_mmu_commit_zap_page(kvm, &invalid_list);
} }
...@@ -5920,17 +5906,6 @@ void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm) ...@@ -5920,17 +5906,6 @@ void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
trace_kvm_mmu_invalidate_zap_all_pages(kvm); trace_kvm_mmu_invalidate_zap_all_pages(kvm);
kvm->arch.mmu_valid_gen++; kvm->arch.mmu_valid_gen++;
/*
* Notify all vcpus to reload its shadow page table
* and flush TLB. Then all vcpus will switch to new
* shadow page table with the new mmu_valid_gen.
*
* Note: we should do this under the protection of
* mmu-lock, otherwise, vcpu would purge shadow page
* but miss tlb flush.
*/
kvm_reload_remote_mmus(kvm);
kvm_zap_obsolete_pages(kvm); kvm_zap_obsolete_pages(kvm);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册