提交 505aef8f 编写于 作者: X Xiao Guangrong 提交者: Avi Kivity

KVM: MMU: cleanup FNAME(invlpg)

Directly Use mmu_page_zap_pte to zap spte in FNAME(invlpg), also remove the
same code between FNAME(invlpg) and FNAME(sync_page)
Signed-off-by: NXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 d01f8d5e
...@@ -1809,7 +1809,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -1809,7 +1809,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
} }
} }
static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *spte) u64 *spte)
{ {
u64 pte; u64 pte;
...@@ -1817,17 +1817,21 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, ...@@ -1817,17 +1817,21 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
pte = *spte; pte = *spte;
if (is_shadow_present_pte(pte)) { if (is_shadow_present_pte(pte)) {
if (is_last_spte(pte, sp->role.level)) if (is_last_spte(pte, sp->role.level)) {
drop_spte(kvm, spte); drop_spte(kvm, spte);
else { if (is_large_pte(pte))
--kvm->stat.lpages;
} else {
child = page_header(pte & PT64_BASE_ADDR_MASK); child = page_header(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, spte); drop_parent_pte(child, spte);
} }
} else if (is_mmio_spte(pte)) return true;
}
if (is_mmio_spte(pte))
mmu_spte_clear_no_track(spte); mmu_spte_clear_no_track(spte);
if (is_large_pte(pte)) return false;
--kvm->stat.lpages;
} }
static void kvm_mmu_page_unlink_children(struct kvm *kvm, static void kvm_mmu_page_unlink_children(struct kvm *kvm,
......
...@@ -656,6 +656,18 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -656,6 +656,18 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return 0; return 0;
} }
static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
{
int offset = 0;
WARN_ON(sp->role.level != 1);
if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;
return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
}
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{ {
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator iterator;
...@@ -663,7 +675,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -663,7 +675,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t pte_gpa = -1; gpa_t pte_gpa = -1;
int level; int level;
u64 *sptep; u64 *sptep;
int need_flush = 0;
vcpu_clear_mmio_info(vcpu, gva); vcpu_clear_mmio_info(vcpu, gva);
...@@ -675,36 +686,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -675,36 +686,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
sp = page_header(__pa(sptep)); sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) { if (is_last_spte(*sptep, level)) {
int offset, shift;
if (!sp->unsync) if (!sp->unsync)
break; break;
shift = PAGE_SHIFT - pte_gpa = FNAME(get_level1_sp_gpa)(sp);
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
offset = sp->role.quadrant << shift;
pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (is_shadow_present_pte(*sptep)) { if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
if (is_large_pte(*sptep)) kvm_flush_remote_tlbs(vcpu->kvm);
--vcpu->kvm->stat.lpages;
drop_spte(vcpu->kvm, sptep);
need_flush = 1;
} else if (is_mmio_spte(*sptep))
mmu_spte_clear_no_track(sptep);
break;
} }
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break; break;
} }
if (need_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
atomic_inc(&vcpu->kvm->arch.invlpg_counter); atomic_inc(&vcpu->kvm->arch.invlpg_counter);
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
...@@ -769,19 +764,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, ...@@ -769,19 +764,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
*/ */
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
int i, offset, nr_present; int i, nr_present = 0;
bool host_writable; bool host_writable;
gpa_t first_pte_gpa; gpa_t first_pte_gpa;
offset = nr_present = 0;
/* direct kvm_mmu_page can not be unsync. */ /* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct); BUG_ON(sp->role.direct);
if (PTTYPE == 32) first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
offset = sp->role.quadrant << PT64_LEVEL_BITS;
first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) { for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
unsigned pte_access; unsigned pte_access;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册