提交 774c47f1 编写于 作者: A Avi Kivity 提交者: Linus Torvalds

[PATCH] KVM: cpu hotplug support

On hotplug, we execute the hardware extension enable sequence.  On unplug, we
decache any vcpus that last ran on the exiting cpu, and execute the hardware
extension disable sequence.
Signed-off-by: NAvi Kivity <avi@qumranet.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 8d0be2b3
...@@ -341,6 +341,7 @@ struct kvm_arch_ops { ...@@ -341,6 +341,7 @@ struct kvm_arch_ops {
struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu); struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu);
void (*vcpu_put)(struct kvm_vcpu *vcpu); void (*vcpu_put)(struct kvm_vcpu *vcpu);
void (*vcpu_decache)(struct kvm_vcpu *vcpu);
int (*set_guest_debug)(struct kvm_vcpu *vcpu, int (*set_guest_debug)(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg); struct kvm_debug_guest *dbg);
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/file.h> #include <linux/file.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <linux/cpu.h>
#include "x86_emulate.h" #include "x86_emulate.h"
#include "segment_descriptor.h" #include "segment_descriptor.h"
...@@ -2039,6 +2040,64 @@ static struct notifier_block kvm_reboot_notifier = { ...@@ -2039,6 +2040,64 @@ static struct notifier_block kvm_reboot_notifier = {
.priority = 0, .priority = 0,
}; };
/*
* Make sure that a cpu that is being hot-unplugged does not have any vcpus
* cached on it.
*/
static void decache_vcpus_on_cpu(int cpu)
{
struct kvm *vm;
struct kvm_vcpu *vcpu;
int i;
spin_lock(&kvm_lock);
list_for_each_entry(vm, &vm_list, vm_list)
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = &vm->vcpus[i];
/*
* If the vcpu is locked, then it is running on some
* other cpu and therefore it is not cached on the
* cpu in question.
*
* If it's not locked, check the last cpu it executed
* on.
*/
if (mutex_trylock(&vcpu->mutex)) {
if (vcpu->cpu == cpu) {
kvm_arch_ops->vcpu_decache(vcpu);
vcpu->cpu = -1;
}
mutex_unlock(&vcpu->mutex);
}
}
spin_unlock(&kvm_lock);
}
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
void *v)
{
int cpu = (long)v;
switch (val) {
case CPU_DEAD:
case CPU_UP_CANCELED:
decache_vcpus_on_cpu(cpu);
smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
NULL, 0, 1);
break;
case CPU_UP_PREPARE:
smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
NULL, 0, 1);
break;
}
return NOTIFY_OK;
}
static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_hotplug,
.priority = 20, /* must be > scheduler priority */
};
static __init void kvm_init_debug(void) static __init void kvm_init_debug(void)
{ {
struct kvm_stats_debugfs_item *p; struct kvm_stats_debugfs_item *p;
...@@ -2085,6 +2144,9 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) ...@@ -2085,6 +2144,9 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
return r; return r;
on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1); on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
r = register_cpu_notifier(&kvm_cpu_notifier);
if (r)
goto out_free_1;
register_reboot_notifier(&kvm_reboot_notifier); register_reboot_notifier(&kvm_reboot_notifier);
kvm_chardev_ops.owner = module; kvm_chardev_ops.owner = module;
...@@ -2099,6 +2161,8 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) ...@@ -2099,6 +2161,8 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
out_free: out_free:
unregister_reboot_notifier(&kvm_reboot_notifier); unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
out_free_1:
on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
kvm_arch_ops->hardware_unsetup(); kvm_arch_ops->hardware_unsetup();
return r; return r;
......
...@@ -609,6 +609,10 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -609,6 +609,10 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
put_cpu(); put_cpu();
} }
static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
{
}
static void svm_cache_regs(struct kvm_vcpu *vcpu) static void svm_cache_regs(struct kvm_vcpu *vcpu)
{ {
vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax; vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax;
...@@ -1677,6 +1681,7 @@ static struct kvm_arch_ops svm_arch_ops = { ...@@ -1677,6 +1681,7 @@ static struct kvm_arch_ops svm_arch_ops = {
.vcpu_load = svm_vcpu_load, .vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put, .vcpu_put = svm_vcpu_put,
.vcpu_decache = svm_vcpu_decache,
.set_guest_debug = svm_guest_debug, .set_guest_debug = svm_guest_debug,
.get_msr = svm_get_msr, .get_msr = svm_get_msr,
......
...@@ -250,6 +250,11 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -250,6 +250,11 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
put_cpu(); put_cpu();
} }
static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
{
vcpu_clear(vcpu);
}
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{ {
return vmcs_readl(GUEST_RFLAGS); return vmcs_readl(GUEST_RFLAGS);
...@@ -509,7 +514,7 @@ static __init int vmx_disabled_by_bios(void) ...@@ -509,7 +514,7 @@ static __init int vmx_disabled_by_bios(void)
return (msr & 5) == 1; /* locked but not enabled */ return (msr & 5) == 1; /* locked but not enabled */
} }
static __init void hardware_enable(void *garbage) static void hardware_enable(void *garbage)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
...@@ -2023,6 +2028,7 @@ static struct kvm_arch_ops vmx_arch_ops = { ...@@ -2023,6 +2028,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
.vcpu_load = vmx_vcpu_load, .vcpu_load = vmx_vcpu_load,
.vcpu_put = vmx_vcpu_put, .vcpu_put = vmx_vcpu_put,
.vcpu_decache = vmx_vcpu_decache,
.set_guest_debug = set_guest_debug, .set_guest_debug = set_guest_debug,
.get_msr = vmx_get_msr, .get_msr = vmx_get_msr,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册