提交 d2a1b483 编写于 作者: A Alexander Graf 提交者: Avi Kivity

KVM: PPC: Add HPT preallocator

We're currently allocating 16MB of linear memory on demand when creating
a guest. That does work some times, but finding 16MB of linear memory
available in the system at runtime is definitely not a given.

So let's add another command line option similar to the RMA preallocator,
that we can use to keep a pool of page tables around. Now, when a guest
gets created it has a pretty low chance of receiving an OOM.
Signed-off-by: NAlexander Graf <agraf@suse.de>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 b7f5d011
...@@ -235,6 +235,7 @@ struct kvm_arch { ...@@ -235,6 +235,7 @@ struct kvm_arch {
int slot_npages[KVM_MEM_SLOTS_NUM]; int slot_npages[KVM_MEM_SLOTS_NUM];
unsigned short last_vcpu[NR_CPUS]; unsigned short last_vcpu[NR_CPUS];
struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
struct kvmppc_linear_info *hpt_li;
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_64_HV */
}; };
......
...@@ -130,6 +130,8 @@ extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, ...@@ -130,6 +130,8 @@ extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma); struct kvm_allocate_rma *rma);
extern struct kvmppc_linear_info *kvm_alloc_rma(void); extern struct kvmppc_linear_info *kvm_alloc_rma(void);
extern void kvm_release_rma(struct kvmppc_linear_info *ri); extern void kvm_release_rma(struct kvmppc_linear_info *ri);
extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
extern void kvm_release_hpt(struct kvmppc_linear_info *li);
extern int kvmppc_core_init_vm(struct kvm *kvm); extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm); extern void kvmppc_core_destroy_vm(struct kvm *kvm);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
......
...@@ -44,10 +44,20 @@ long kvmppc_alloc_hpt(struct kvm *kvm) ...@@ -44,10 +44,20 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
unsigned long hpt; unsigned long hpt;
unsigned long lpid; unsigned long lpid;
struct revmap_entry *rev; struct revmap_entry *rev;
struct kvmppc_linear_info *li;
/* Allocate guest's hashed page table */ /* Allocate guest's hashed page table */
hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN, li = kvm_alloc_hpt();
HPT_ORDER - PAGE_SHIFT); if (li) {
/* using preallocated memory */
hpt = (ulong)li->base_virt;
kvm->arch.hpt_li = li;
} else {
/* using dynamic memory */
hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
__GFP_NOWARN, HPT_ORDER - PAGE_SHIFT);
}
if (!hpt) { if (!hpt) {
pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n"); pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
return -ENOMEM; return -ENOMEM;
...@@ -88,7 +98,10 @@ void kvmppc_free_hpt(struct kvm *kvm) ...@@ -88,7 +98,10 @@ void kvmppc_free_hpt(struct kvm *kvm)
{ {
clear_bit(kvm->arch.lpid, lpid_inuse); clear_bit(kvm->arch.lpid, lpid_inuse);
vfree(kvm->arch.revmap); vfree(kvm->arch.revmap);
free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT); if (kvm->arch.hpt_li)
kvm_release_hpt(kvm->arch.hpt_li);
else
free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
} }
/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/kvm_book3s.h> #include <asm/kvm_book3s.h>
#define KVM_LINEAR_RMA 0 #define KVM_LINEAR_RMA 0
#define KVM_LINEAR_HPT 1
static void __init kvm_linear_init_one(ulong size, int count, int type); static void __init kvm_linear_init_one(ulong size, int count, int type);
static struct kvmppc_linear_info *kvm_alloc_linear(int type); static struct kvmppc_linear_info *kvm_alloc_linear(int type);
...@@ -97,6 +98,39 @@ void kvm_release_rma(struct kvmppc_linear_info *ri) ...@@ -97,6 +98,39 @@ void kvm_release_rma(struct kvmppc_linear_info *ri)
} }
EXPORT_SYMBOL_GPL(kvm_release_rma); EXPORT_SYMBOL_GPL(kvm_release_rma);
/*************** HPT *************/
/*
* This maintains a list of big linear HPT tables that contain the GVA->HPA
* memory mappings. If we don't reserve those early on, we might not be able
* to get a big (usually 16MB) linear memory region from the kernel anymore.
*/
static unsigned long kvm_hpt_count;
static int __init early_parse_hpt_count(char *p)
{
if (!p)
return 1;
kvm_hpt_count = simple_strtoul(p, NULL, 0);
return 0;
}
early_param("kvm_hpt_count", early_parse_hpt_count);
struct kvmppc_linear_info *kvm_alloc_hpt(void)
{
return kvm_alloc_linear(KVM_LINEAR_HPT);
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
void kvm_release_hpt(struct kvmppc_linear_info *li)
{
kvm_release_linear(li);
}
EXPORT_SYMBOL_GPL(kvm_release_hpt);
/*************** generic *************/ /*************** generic *************/
static LIST_HEAD(free_linears); static LIST_HEAD(free_linears);
...@@ -114,7 +148,7 @@ static void __init kvm_linear_init_one(ulong size, int count, int type) ...@@ -114,7 +148,7 @@ static void __init kvm_linear_init_one(ulong size, int count, int type)
if (!count) if (!count)
return; return;
typestr = (type == KVM_LINEAR_RMA) ? "RMA" : ""; typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "HPT";
npages = size >> PAGE_SHIFT; npages = size >> PAGE_SHIFT;
linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info)); linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
...@@ -173,6 +207,9 @@ static void kvm_release_linear(struct kvmppc_linear_info *ri) ...@@ -173,6 +207,9 @@ static void kvm_release_linear(struct kvmppc_linear_info *ri)
*/ */
void __init kvm_linear_init(void) void __init kvm_linear_init(void)
{ {
/* HPT */
kvm_linear_init_one(1 << HPT_ORDER, kvm_hpt_count, KVM_LINEAR_HPT);
/* RMA */ /* RMA */
/* Only do this on PPC970 in HV mode */ /* Only do this on PPC970 in HV mode */
if (!cpu_has_feature(CPU_FTR_HVMODE) || if (!cpu_has_feature(CPU_FTR_HVMODE) ||
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册