提交 93e60249 编写于 作者: P Paul Mackerras 提交者: Avi Kivity

KVM: PPC: Add an interface for pinning guest pages in Book3s HV guests

This adds two new functions, kvmppc_pin_guest_page() and
kvmppc_unpin_guest_page(), and uses them to pin the guest pages where
the guest has registered areas of memory for the hypervisor to update,
(i.e. the per-cpu virtual processor areas, SLB shadow buffers and
dispatch trace logs) and then unpin them when they are no longer
required.

Although it is not strictly necessary to pin the pages at this point,
since all guest pages are already pinned, later commits in this series
will mean that guest pages aren't all pinned.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
Signed-off-by: NAlexander Graf <agraf@suse.de>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 b2b2f165
...@@ -138,6 +138,9 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, ...@@ -138,6 +138,9 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
unsigned long *nb_ret);
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
extern void kvmppc_entry_trampoline(void); extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void); extern void kvmppc_hv_entry_trampoline(void);
......
...@@ -184,6 +184,44 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -184,6 +184,44 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return -ENOENT; return -ENOENT;
} }
void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
unsigned long *nb_ret)
{
struct kvm_memory_slot *memslot;
unsigned long gfn = gpa >> PAGE_SHIFT;
struct page *page;
unsigned long offset;
unsigned long pfn, pa;
unsigned long *physp;
memslot = gfn_to_memslot(kvm, gfn);
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
return NULL;
physp = kvm->arch.slot_phys[memslot->id];
if (!physp)
return NULL;
physp += (gfn - memslot->base_gfn) >>
(kvm->arch.ram_porder - PAGE_SHIFT);
pa = *physp;
if (!pa)
return NULL;
pfn = pa >> PAGE_SHIFT;
page = pfn_to_page(pfn);
get_page(page);
offset = gpa & (kvm->arch.ram_psize - 1);
if (nb_ret)
*nb_ret = kvm->arch.ram_psize - offset;
return page_address(page) + offset;
}
void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
{
struct page *page = virt_to_page(va);
page = compound_head(page);
put_page(page);
}
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_mmu *mmu = &vcpu->arch.mmu; struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
......
...@@ -139,12 +139,10 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, ...@@ -139,12 +139,10 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
unsigned long vcpuid, unsigned long vpa) unsigned long vcpuid, unsigned long vpa)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
unsigned long gfn, pg_index, ra, len; unsigned long len, nb;
unsigned long pg_offset;
void *va; void *va;
struct kvm_vcpu *tvcpu; struct kvm_vcpu *tvcpu;
struct kvm_memory_slot *memslot; int err = H_PARAMETER;
unsigned long *physp;
tvcpu = kvmppc_find_vcpu(kvm, vcpuid); tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
if (!tvcpu) if (!tvcpu)
...@@ -157,51 +155,41 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, ...@@ -157,51 +155,41 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
if (flags < 4) { if (flags < 4) {
if (vpa & 0x7f) if (vpa & 0x7f)
return H_PARAMETER; return H_PARAMETER;
if (flags >= 2 && !tvcpu->arch.vpa)
return H_RESOURCE;
/* registering new area; convert logical addr to real */ /* registering new area; convert logical addr to real */
gfn = vpa >> PAGE_SHIFT; va = kvmppc_pin_guest_page(kvm, vpa, &nb);
memslot = gfn_to_memslot(kvm, gfn); if (va == NULL)
if (!memslot || !(memslot->flags & KVM_MEMSLOT_INVALID))
return H_PARAMETER;
physp = kvm->arch.slot_phys[memslot->id];
if (!physp)
return H_PARAMETER;
pg_index = (gfn - memslot->base_gfn) >>
(kvm->arch.ram_porder - PAGE_SHIFT);
pg_offset = vpa & (kvm->arch.ram_psize - 1);
ra = physp[pg_index];
if (!ra)
return H_PARAMETER; return H_PARAMETER;
ra = (ra & PAGE_MASK) | pg_offset;
va = __va(ra);
if (flags <= 1) if (flags <= 1)
len = *(unsigned short *)(va + 4); len = *(unsigned short *)(va + 4);
else else
len = *(unsigned int *)(va + 4); len = *(unsigned int *)(va + 4);
if (pg_offset + len > kvm->arch.ram_psize) if (len > nb)
return H_PARAMETER; goto out_unpin;
switch (flags) { switch (flags) {
case 1: /* register VPA */ case 1: /* register VPA */
if (len < 640) if (len < 640)
return H_PARAMETER; goto out_unpin;
if (tvcpu->arch.vpa)
kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
tvcpu->arch.vpa = va; tvcpu->arch.vpa = va;
init_vpa(vcpu, va); init_vpa(vcpu, va);
break; break;
case 2: /* register DTL */ case 2: /* register DTL */
if (len < 48) if (len < 48)
return H_PARAMETER; goto out_unpin;
if (!tvcpu->arch.vpa)
return H_RESOURCE;
len -= len % 48; len -= len % 48;
if (tvcpu->arch.dtl)
kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
tvcpu->arch.dtl = va; tvcpu->arch.dtl = va;
tvcpu->arch.dtl_end = va + len; tvcpu->arch.dtl_end = va + len;
break; break;
case 3: /* register SLB shadow buffer */ case 3: /* register SLB shadow buffer */
if (len < 8) if (len < 16)
return H_PARAMETER; goto out_unpin;
if (!tvcpu->arch.vpa) if (tvcpu->arch.slb_shadow)
return H_RESOURCE; kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
tvcpu->arch.slb_shadow = va;
len = (len - 16) / 16;
tvcpu->arch.slb_shadow = va; tvcpu->arch.slb_shadow = va;
break; break;
} }
...@@ -210,17 +198,30 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, ...@@ -210,17 +198,30 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
case 5: /* unregister VPA */ case 5: /* unregister VPA */
if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl) if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
return H_RESOURCE; return H_RESOURCE;
if (!tvcpu->arch.vpa)
break;
kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
tvcpu->arch.vpa = NULL; tvcpu->arch.vpa = NULL;
break; break;
case 6: /* unregister DTL */ case 6: /* unregister DTL */
if (!tvcpu->arch.dtl)
break;
kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl);
tvcpu->arch.dtl = NULL; tvcpu->arch.dtl = NULL;
break; break;
case 7: /* unregister SLB shadow buffer */ case 7: /* unregister SLB shadow buffer */
if (!tvcpu->arch.slb_shadow)
break;
kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow);
tvcpu->arch.slb_shadow = NULL; tvcpu->arch.slb_shadow = NULL;
break; break;
} }
} }
return H_SUCCESS; return H_SUCCESS;
out_unpin:
kvmppc_unpin_guest_page(kvm, va);
return err;
} }
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
...@@ -470,6 +471,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -470,6 +471,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.dtl)
kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl);
if (vcpu->arch.slb_shadow)
kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow);
if (vcpu->arch.vpa)
kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kfree(vcpu); kfree(vcpu);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册