提交 4c2155ce 编写于 作者: M Marcelo Tosatti 提交者: Avi Kivity

KVM: switch to get_user_pages_fast

Convert gfn_to_pfn to use get_user_pages_fast, which can do lockless
pagetable lookups on x86. Kernel compilation on 4-way guest is 3.7%
faster on VMX.
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 777b3f49
...@@ -147,9 +147,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, ...@@ -147,9 +147,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
stlbe = &vcpu->arch.shadow_tlb[victim]; stlbe = &vcpu->arch.shadow_tlb[victim];
/* Get reference to new page. */ /* Get reference to new page. */
down_read(&current->mm->mmap_sem);
new_page = gfn_to_page(vcpu->kvm, gfn); new_page = gfn_to_page(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
if (is_error_page(new_page)) { if (is_error_page(new_page)) {
printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
kvm_release_page_clean(new_page); kvm_release_page_clean(new_page);
......
...@@ -405,16 +405,19 @@ static int host_largepage_backed(struct kvm *kvm, gfn_t gfn) ...@@ -405,16 +405,19 @@ static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long addr; unsigned long addr;
int ret = 0;
addr = gfn_to_hva(kvm, gfn); addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr)) if (kvm_is_error_hva(addr))
return 0; return ret;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, addr); vma = find_vma(current->mm, addr);
if (vma && is_vm_hugetlb_page(vma)) if (vma && is_vm_hugetlb_page(vma))
return 1; ret = 1;
up_read(&current->mm->mmap_sem);
return 0; return ret;
} }
static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn) static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
...@@ -1140,9 +1143,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -1140,9 +1143,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return NULL; return NULL;
down_read(&current->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
up_read(&current->mm->mmap_sem);
return page; return page;
} }
...@@ -1330,16 +1331,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) ...@@ -1330,16 +1331,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
pfn_t pfn; pfn_t pfn;
unsigned long mmu_seq; unsigned long mmu_seq;
down_read(&current->mm->mmap_sem);
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
gfn &= ~(KVM_PAGES_PER_HPAGE-1); gfn &= ~(KVM_PAGES_PER_HPAGE-1);
largepage = 1; largepage = 1;
} }
mmu_seq = vcpu->kvm->mmu_notifier_seq; mmu_seq = vcpu->kvm->mmu_notifier_seq;
/* implicit mb(), we'll read before PT lock is unlocked */ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, gfn); pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
/* mmio */ /* mmio */
if (is_error_pfn(pfn)) { if (is_error_pfn(pfn)) {
...@@ -1488,15 +1487,13 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, ...@@ -1488,15 +1487,13 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
if (r) if (r)
return r; return r;
down_read(&current->mm->mmap_sem);
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
gfn &= ~(KVM_PAGES_PER_HPAGE-1); gfn &= ~(KVM_PAGES_PER_HPAGE-1);
largepage = 1; largepage = 1;
} }
mmu_seq = vcpu->kvm->mmu_notifier_seq; mmu_seq = vcpu->kvm->mmu_notifier_seq;
/* implicit mb(), we'll read before PT lock is unlocked */ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, gfn); pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
if (is_error_pfn(pfn)) { if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
return 1; return 1;
...@@ -1809,15 +1806,13 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1809,15 +1806,13 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
return; return;
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
down_read(&current->mm->mmap_sem);
if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) { if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
gfn &= ~(KVM_PAGES_PER_HPAGE-1); gfn &= ~(KVM_PAGES_PER_HPAGE-1);
vcpu->arch.update_pte.largepage = 1; vcpu->arch.update_pte.largepage = 1;
} }
vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
/* implicit mb(), we'll read before PT lock is unlocked */ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, gfn); pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
if (is_error_pfn(pfn)) { if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
......
...@@ -102,14 +102,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, ...@@ -102,14 +102,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
pt_element_t *table; pt_element_t *table;
struct page *page; struct page *page;
down_read(&current->mm->mmap_sem);
page = gfn_to_page(kvm, table_gfn); page = gfn_to_page(kvm, table_gfn);
up_read(&current->mm->mmap_sem);
table = kmap_atomic(page, KM_USER0); table = kmap_atomic(page, KM_USER0);
ret = CMPXCHG(&table[index], orig_pte, new_pte); ret = CMPXCHG(&table[index], orig_pte, new_pte);
kunmap_atomic(table, KM_USER0); kunmap_atomic(table, KM_USER0);
kvm_release_page_dirty(page); kvm_release_page_dirty(page);
...@@ -418,7 +414,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -418,7 +414,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
return 0; return 0;
} }
down_read(&current->mm->mmap_sem);
if (walker.level == PT_DIRECTORY_LEVEL) { if (walker.level == PT_DIRECTORY_LEVEL) {
gfn_t large_gfn; gfn_t large_gfn;
large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1); large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
...@@ -428,9 +423,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -428,9 +423,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
} }
} }
mmu_seq = vcpu->kvm->mmu_notifier_seq; mmu_seq = vcpu->kvm->mmu_notifier_seq;
/* implicit mb(), we'll read before PT lock is unlocked */ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
up_read(&current->mm->mmap_sem);
/* mmio */ /* mmio */
if (is_error_pfn(pfn)) { if (is_error_pfn(pfn)) {
......
...@@ -2010,9 +2010,7 @@ static int alloc_apic_access_page(struct kvm *kvm) ...@@ -2010,9 +2010,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
if (r) if (r)
goto out; goto out;
down_read(&current->mm->mmap_sem);
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
up_read(&current->mm->mmap_sem);
out: out:
up_write(&kvm->slots_lock); up_write(&kvm->slots_lock);
return r; return r;
...@@ -2034,10 +2032,8 @@ static int alloc_identity_pagetable(struct kvm *kvm) ...@@ -2034,10 +2032,8 @@ static int alloc_identity_pagetable(struct kvm *kvm)
if (r) if (r)
goto out; goto out;
down_read(&current->mm->mmap_sem);
kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT); VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT);
up_read(&current->mm->mmap_sem);
out: out:
up_write(&kvm->slots_lock); up_write(&kvm->slots_lock);
return r; return r;
......
...@@ -946,10 +946,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -946,10 +946,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
/* ...but clean it before doing the actual write */ /* ...but clean it before doing the actual write */
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
down_read(&current->mm->mmap_sem);
vcpu->arch.time_page = vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
up_read(&current->mm->mmap_sem);
if (is_error_page(vcpu->arch.time_page)) { if (is_error_page(vcpu->arch.time_page)) {
kvm_release_page_clean(vcpu->arch.time_page); kvm_release_page_clean(vcpu->arch.time_page);
...@@ -2322,9 +2320,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, ...@@ -2322,9 +2320,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
val = *(u64 *)new; val = *(u64 *)new;
down_read(&current->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
up_read(&current->mm->mmap_sem);
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page, KM_USER0);
set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
...@@ -3089,9 +3085,7 @@ static void vapic_enter(struct kvm_vcpu *vcpu) ...@@ -3089,9 +3085,7 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
if (!apic || !apic->vapic_addr) if (!apic || !apic->vapic_addr)
return; return;
down_read(&current->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
up_read(&current->mm->mmap_sem);
vcpu->arch.apic->vapic_page = page; vcpu->arch.apic->vapic_page = page;
} }
......
...@@ -723,9 +723,6 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) ...@@ -723,9 +723,6 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
} }
EXPORT_SYMBOL_GPL(gfn_to_hva); EXPORT_SYMBOL_GPL(gfn_to_hva);
/*
* Requires current->mm->mmap_sem to be held
*/
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{ {
struct page *page[1]; struct page *page[1];
...@@ -741,20 +738,23 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) ...@@ -741,20 +738,23 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
return page_to_pfn(bad_page); return page_to_pfn(bad_page);
} }
npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, npages = get_user_pages_fast(addr, 1, 1, page);
NULL);
if (unlikely(npages != 1)) { if (unlikely(npages != 1)) {
struct vm_area_struct *vma; struct vm_area_struct *vma;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, addr); vma = find_vma(current->mm, addr);
if (vma == NULL || addr < vma->vm_start || if (vma == NULL || addr < vma->vm_start ||
!(vma->vm_flags & VM_PFNMAP)) { !(vma->vm_flags & VM_PFNMAP)) {
up_read(&current->mm->mmap_sem);
get_page(bad_page); get_page(bad_page);
return page_to_pfn(bad_page); return page_to_pfn(bad_page);
} }
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
up_read(&current->mm->mmap_sem);
BUG_ON(!is_mmio_pfn(pfn)); BUG_ON(!is_mmio_pfn(pfn));
} else } else
pfn = page_to_pfn(page[0]); pfn = page_to_pfn(page[0]);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册