提交 f31e65e1 编写于 作者: B Benjamin Herrenschmidt 提交者: Alexander Graf

kvm/book3s: Make kernel emulated H_PUT_TCE available for "PR" KVM

There is nothing in the code for emulating TCE tables in the kernel
that prevents it from working on "PR" KVM... other than ifdef's and
location of the code.

This and moves the bulk of the code there to a new file called
book3s_64_vio.c.

This speeds things up a bit on my G5.
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
[agraf: fix for hv kvm, 32bit, whitespace]
Signed-off-by: NAlexander Graf <agraf@suse.de>
上级 4444aa5f
...@@ -237,7 +237,6 @@ struct kvm_arch { ...@@ -237,7 +237,6 @@ struct kvm_arch {
unsigned long vrma_slb_v; unsigned long vrma_slb_v;
int rma_setup_done; int rma_setup_done;
int using_mmu_notifiers; int using_mmu_notifiers;
struct list_head spapr_tce_tables;
spinlock_t slot_phys_lock; spinlock_t slot_phys_lock;
unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
int slot_npages[KVM_MEM_SLOTS_NUM]; int slot_npages[KVM_MEM_SLOTS_NUM];
...@@ -245,6 +244,9 @@ struct kvm_arch { ...@@ -245,6 +244,9 @@ struct kvm_arch {
struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
struct kvmppc_linear_info *hpt_li; struct kvmppc_linear_info *hpt_li;
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
#endif
}; };
/* /*
......
...@@ -126,6 +126,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, ...@@ -126,6 +126,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args); struct kvm_create_spapr_tce *args);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma); struct kvm_allocate_rma *rma);
extern struct kvmppc_linear_info *kvm_alloc_rma(void); extern struct kvmppc_linear_info *kvm_alloc_rma(void);
......
...@@ -54,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ ...@@ -54,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
book3s_paired_singles.o \ book3s_paired_singles.o \
book3s_pr.o \ book3s_pr.o \
book3s_pr_papr.o \ book3s_pr_papr.o \
book3s_64_vio_hv.o \
book3s_emulate.o \ book3s_emulate.o \
book3s_interrupts.o \ book3s_interrupts.o \
book3s_mmu_hpte.o \ book3s_mmu_hpte.o \
...@@ -78,6 +79,7 @@ kvm-book3s_64-module-objs := \ ...@@ -78,6 +79,7 @@ kvm-book3s_64-module-objs := \
powerpc.o \ powerpc.o \
emulate.o \ emulate.o \
book3s.o \ book3s.o \
book3s_64_vio.o \
$(kvm-book3s_64-objs-y) $(kvm-book3s_64-objs-y)
kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
......
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
* Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/list.h>
#include <linux/anon_inodes.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu-hash64.h>
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/kvm_host.h>
#include <asm/udbg.h>
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
static long kvmppc_stt_npages(unsigned long window_size)
{
return ALIGN((window_size >> SPAPR_TCE_SHIFT)
* sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
}
static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
{
struct kvm *kvm = stt->kvm;
int i;
mutex_lock(&kvm->lock);
list_del(&stt->list);
for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
__free_page(stt->pages[i]);
kfree(stt);
mutex_unlock(&kvm->lock);
kvm_put_kvm(kvm);
}
static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
return VM_FAULT_SIGBUS;
page = stt->pages[vmf->pgoff];
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
.fault = kvm_spapr_tce_fault,
};
static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
{
vma->vm_ops = &kvm_spapr_tce_vm_ops;
return 0;
}
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
{
struct kvmppc_spapr_tce_table *stt = filp->private_data;
release_spapr_tce_table(stt);
return 0;
}
static struct file_operations kvm_spapr_tce_fops = {
.mmap = kvm_spapr_tce_mmap,
.release = kvm_spapr_tce_release,
};
long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
long npages;
int ret = -ENOMEM;
int i;
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
npages = kvmppc_stt_npages(args->window_size);
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL);
if (!stt)
goto fail;
stt->liobn = args->liobn;
stt->window_size = args->window_size;
stt->kvm = kvm;
for (i = 0; i < npages; i++) {
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!stt->pages[i])
goto fail;
}
kvm_get_kvm(kvm);
mutex_lock(&kvm->lock);
list_add(&stt->list, &kvm->arch.spapr_tce_tables);
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR);
fail:
if (stt) {
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt);
}
return ret;
}
...@@ -38,6 +38,9 @@ ...@@ -38,6 +38,9 @@
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
/* WARNING: This will be called in real-mode on HV KVM and virtual
* mode on PR KVM
*/
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce) unsigned long ioba, unsigned long tce)
{ {
......
...@@ -1093,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1093,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r; return r;
} }
static long kvmppc_stt_npages(unsigned long window_size)
{
return ALIGN((window_size >> SPAPR_TCE_SHIFT)
* sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
}
static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
{
struct kvm *kvm = stt->kvm;
int i;
mutex_lock(&kvm->lock);
list_del(&stt->list);
for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
__free_page(stt->pages[i]);
kfree(stt);
mutex_unlock(&kvm->lock);
kvm_put_kvm(kvm);
}
static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
return VM_FAULT_SIGBUS;
page = stt->pages[vmf->pgoff];
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
.fault = kvm_spapr_tce_fault,
};
static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
{
vma->vm_ops = &kvm_spapr_tce_vm_ops;
return 0;
}
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
{
struct kvmppc_spapr_tce_table *stt = filp->private_data;
release_spapr_tce_table(stt);
return 0;
}
static struct file_operations kvm_spapr_tce_fops = {
.mmap = kvm_spapr_tce_mmap,
.release = kvm_spapr_tce_release,
};
long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
long npages;
int ret = -ENOMEM;
int i;
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
npages = kvmppc_stt_npages(args->window_size);
stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
GFP_KERNEL);
if (!stt)
goto fail;
stt->liobn = args->liobn;
stt->window_size = args->window_size;
stt->kvm = kvm;
for (i = 0; i < npages; i++) {
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!stt->pages[i])
goto fail;
}
kvm_get_kvm(kvm);
mutex_lock(&kvm->lock);
list_add(&stt->list, &kvm->arch.spapr_tce_tables);
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR);
fail:
if (stt) {
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt);
}
return ret;
}
/* Work out RMLS (real mode limit selector) field value for a given RMA size. /* Work out RMLS (real mode limit selector) field value for a given RMA size.
Assumes POWER7 or PPC970. */ Assumes POWER7 or PPC970. */
......
...@@ -1171,11 +1171,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, ...@@ -1171,11 +1171,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
int kvmppc_core_init_vm(struct kvm *kvm) int kvmppc_core_init_vm(struct kvm *kvm)
{ {
#ifdef CONFIG_PPC64
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
#endif
return 0; return 0;
} }
void kvmppc_core_destroy_vm(struct kvm *kvm) void kvmppc_core_destroy_vm(struct kvm *kvm)
{ {
#ifdef CONFIG_PPC64
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
#endif
} }
static int kvmppc_book3s_init(void) static int kvmppc_book3s_init(void)
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/anon_inodes.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h> #include <asm/kvm_book3s.h>
...@@ -211,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) ...@@ -211,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
return EMULATE_DONE; return EMULATE_DONE;
} }
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
unsigned long tce = kvmppc_get_gpr(vcpu, 6);
long rc;
rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
return EMULATE_DONE;
}
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
{ {
switch (cmd) { switch (cmd) {
...@@ -222,6 +238,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) ...@@ -222,6 +238,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
return kvmppc_h_pr_protect(vcpu); return kvmppc_h_pr_protect(vcpu);
case H_BULK_REMOVE: case H_BULK_REMOVE:
return kvmppc_h_pr_bulk_remove(vcpu); return kvmppc_h_pr_bulk_remove(vcpu);
case H_PUT_TCE:
return kvmppc_h_pr_put_tce(vcpu);
case H_CEDE: case H_CEDE:
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
......
...@@ -244,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -244,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext)
r = KVM_COALESCED_MMIO_PAGE_OFFSET; r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break; break;
#endif #endif
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE: case KVM_CAP_SPAPR_TCE:
r = 1; r = 1;
break; break;
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_SMT: case KVM_CAP_PPC_SMT:
r = threads_per_core; r = threads_per_core;
break; break;
...@@ -773,7 +775,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -773,7 +775,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
break; break;
} }
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_PPC_BOOK3S_64
case KVM_CREATE_SPAPR_TCE: { case KVM_CREATE_SPAPR_TCE: {
struct kvm_create_spapr_tce create_tce; struct kvm_create_spapr_tce create_tce;
struct kvm *kvm = filp->private_data; struct kvm *kvm = filp->private_data;
...@@ -784,7 +786,9 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -784,7 +786,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
goto out; goto out;
} }
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_ALLOCATE_RMA: { case KVM_ALLOCATE_RMA: {
struct kvm *kvm = filp->private_data; struct kvm *kvm = filp->private_data;
struct kvm_allocate_rma rma; struct kvm_allocate_rma rma;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册