提交 5da59607 编写于 作者: T Takuya Yoshikawa 提交者: Marcelo Tosatti

KVM: MMU: Introduce a helper function for FIFO zapping

Make the code for zapping the oldest mmu page, placed at the tail of the
active list, a separate function.
Reviewed-by: NGleb Natapov <gleb@redhat.com>
Signed-off-by: NTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 945315b9
......@@ -2110,6 +2110,21 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
}
}
static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *sp;
if (list_empty(&kvm->arch.active_mmu_pages))
return false;
sp = list_entry(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
return true;
}
/*
* Changing the number of mmu pages allocated to the vm
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
......@@ -2117,23 +2132,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
{
LIST_HEAD(invalid_list);
/*
* If we set the number of mmu pages to be smaller be than the
* number of actived pages , we must to free some mmu pages before we
* change the value
*/
spin_lock(&kvm->mmu_lock);
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
!list_empty(&kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *page;
/* Need to free some mmu pages to achieve the goal. */
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
break;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
}
......@@ -4007,13 +4014,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
LIST_HEAD(invalid_list);
while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *sp;
while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
break;
sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
++vcpu->kvm->stat.mmu_recycled;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
......@@ -4182,19 +4186,6 @@ void kvm_mmu_zap_all(struct kvm *kvm)
spin_unlock(&kvm->mmu_lock);
}
static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *page;
if (list_empty(&kvm->arch.active_mmu_pages))
return;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
}
static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
{
struct kvm *kvm;
......@@ -4229,7 +4220,7 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list);
prepare_zap_oldest_mmu_page(kvm, &invalid_list);
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册