提交 c8cfbb55 编写于 作者: T Takuya Yoshikawa 提交者: Avi Kivity

KVM: MMU: Use ptep_user for cmpxchg_gpte()

The address of the gpte was already calculated and stored in ptep_user
before entering cmpxchg_gpte().

This patch makes cmpxchg_gpte() to use that to make it clear that we
are using the same address during walk_addr_generic().

Note that the unlikely annotations are used to show that the conditions
are something unusual rather than for performance.
Signed-off-by: NTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 85722cda
...@@ -79,21 +79,19 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) ...@@ -79,21 +79,19 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
} }
static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t table_gfn, unsigned index, pt_element_t __user *ptep_user, unsigned index,
pt_element_t orig_pte, pt_element_t new_pte) pt_element_t orig_pte, pt_element_t new_pte)
{ {
int npages;
pt_element_t ret; pt_element_t ret;
pt_element_t *table; pt_element_t *table;
struct page *page; struct page *page;
gpa_t gpa;
gpa = mmu->translate_gpa(vcpu, table_gfn << PAGE_SHIFT, npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
PFERR_USER_MASK|PFERR_WRITE_MASK); /* Check if the user is doing something meaningless. */
if (gpa == UNMAPPED_GVA) if (unlikely(npages != 1))
return -EFAULT; return -EFAULT;
page = gfn_to_page(vcpu->kvm, gpa_to_gfn(gpa));
table = kmap_atomic(page, KM_USER0); table = kmap_atomic(page, KM_USER0);
ret = CMPXCHG(&table[index], orig_pte, new_pte); ret = CMPXCHG(&table[index], orig_pte, new_pte);
kunmap_atomic(table, KM_USER0); kunmap_atomic(table, KM_USER0);
...@@ -220,9 +218,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -220,9 +218,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
int ret; int ret;
trace_kvm_mmu_set_accessed_bit(table_gfn, index, trace_kvm_mmu_set_accessed_bit(table_gfn, index,
sizeof(pte)); sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
index, pte, pte|PT_ACCESSED_MASK); pte, pte|PT_ACCESSED_MASK);
if (ret < 0) { if (unlikely(ret < 0)) {
present = false; present = false;
break; break;
} else if (ret) } else if (ret)
...@@ -279,9 +277,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -279,9 +277,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
int ret; int ret;
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte, ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte|PT_DIRTY_MASK); pte, pte|PT_DIRTY_MASK);
if (ret < 0) { if (unlikely(ret < 0)) {
present = false; present = false;
goto error; goto error;
} else if (ret) } else if (ret)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册