diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 5f43697aed3018f5d821a10d641642de88473c32..9cf1c4b1f92fe2e2b5dd835863724da7bd2ce2b9 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -465,7 +465,6 @@ struct kvm_arch { unsigned long metaphysical_rr4; unsigned long vmm_init_rr; - int online_vcpus; int is_sn2; struct kvm_ioapic *vioapic; diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index f922bbba37974657060182137487d49a65cf0285..cbadd8a65233e0e27207f6541ec7a620dbd603c2 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig @@ -25,6 +25,7 @@ config KVM select PREEMPT_NOTIFIERS select ANON_INODES select HAVE_KVM_IRQCHIP + select KVM_APIC_ARCHITECTURE ---help--- Support hosting fully virtualized guest machines using hardware virtualization extensions. You will need a fairly recent diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 4082665ace0a4183857377f3ab2430f066fed12c..d1f7bcda2c7fe08a7d9a7c894cff1ac9ad9661ea 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -338,7 +338,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, union ia64_lid lid; int i; - for (i = 0; i < kvm->arch.online_vcpus; i++) { + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { if (kvm->vcpus[i]) { lid.val = VCPU_LID(kvm->vcpus[i]); if (lid.id == id && lid.eid == eid) @@ -412,7 +412,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) call_data.ptc_g_data = p->u.ptc_g_data; - for (i = 0; i < kvm->arch.online_vcpus; i++) { + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == KVM_MP_STATE_UNINITIALIZED || vcpu == kvm->vcpus[i]) @@ -852,8 +852,6 @@ struct kvm *kvm_arch_create_vm(void) kvm_init_vm(kvm); - kvm->arch.online_vcpus = 0; - return kvm; } @@ -1356,8 +1354,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, goto fail; } - kvm->arch.online_vcpus++; - return vcpu; fail: return ERR_PTR(r); diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index 61a3320b62c11076d98a7f630442bcb2a1908e5e..dce75b70cdd5d9abce6b28d419b5aba6ba97cb0d 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c @@ -831,7 +831,7 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) kvm = (struct kvm *)KVM_VM_BASE; if (kvm_vcpu_is_bsp(vcpu)) { - for (i = 0; i < kvm->arch.online_vcpus; i++) { + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { v = (struct kvm_vcpu *)((char *)vcpu + sizeof(struct kvm_vcpu_data) * i); VMX(v, itc_offset) = itc_offset; diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 8cd2a4efe238c3dbb24674305397d75c4f7fde1c..7fbedfd34d6cf87d10acde14325ecc7fff678f36 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -27,6 +27,7 @@ config KVM select ANON_INODES select HAVE_KVM_IRQCHIP select HAVE_KVM_EVENTFD + select KVM_APIC_ARCHITECTURE ---help--- Support hosting fully virtualized guest machines using hardware virtualization extensions. You will need a fairly recent diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f1dada0519ebf8e7d7e817a6958bd1f145ae8c78..5037e170a70d2c6be842958f311b69531dd520a3 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -430,6 +430,7 @@ struct kvm_trace_rec { #ifdef __KVM_HAVE_PIT #define KVM_CAP_PIT2 33 #endif +#define KVM_CAP_SET_BOOT_CPU_ID 34 #ifdef KVM_CAP_IRQ_ROUTING @@ -537,6 +538,7 @@ struct kvm_irqfd { #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) +#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) /* * ioctls for vcpu fds diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a5bd429e9bd3400471e06f7b903561f431a897ba..d3fdf1a738c93538be752bab4a33d3bb8cb04d9f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -131,8 +131,12 @@ struct kvm { int nmemslots; struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS]; +#ifdef CONFIG_KVM_APIC_ARCHITECTURE + u32 bsp_vcpu_id; struct kvm_vcpu *bsp_vcpu; +#endif struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + atomic_t online_vcpus; struct list_head vm_list; struct mutex lock; struct kvm_io_bus mmio_bus; @@ -550,8 +554,10 @@ static inline void kvm_irqfd_release(struct kvm *kvm) {} #endif /* CONFIG_HAVE_KVM_EVENTFD */ +#ifdef CONFIG_KVM_APIC_ARCHITECTURE static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) { return vcpu->kvm->bsp_vcpu == vcpu; } #endif +#endif diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 56c6848d2df875ef2f2deec82fc1a6fc97e79083..daece36c0a5754a26af7e216b00e1d5ce71855a4 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -9,3 +9,6 @@ config HAVE_KVM_IRQCHIP config HAVE_KVM_EVENTFD bool select EVENTFD + +config KVM_APIC_ARCHITECTURE + bool diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0d54edecbc70bcade3343a88f8524548af0d0694..25e1f9c97b1a6d0f4aa8cf412ca072cb394ac744 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -689,11 +689,6 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, } #endif -static inline int valid_vcpu(int n) -{ - return likely(n >= 0 && n < KVM_MAX_VCPUS); -} - inline int kvm_is_mmio_pfn(pfn_t pfn) { if (pfn_valid(pfn)) { @@ -1714,24 +1709,18 @@ static struct file_operations kvm_vcpu_fops = { */ static int create_vcpu_fd(struct kvm_vcpu *vcpu) { - int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0); - if (fd < 0) - kvm_put_kvm(vcpu->kvm); - return fd; + return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0); } /* * Creates some virtual cpus. Good luck creating more than one. */ -static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) +static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) { int r; struct kvm_vcpu *vcpu; - if (!valid_vcpu(n)) - return -EINVAL; - - vcpu = kvm_arch_vcpu_create(kvm, n); + vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) return PTR_ERR(vcpu); @@ -1742,25 +1731,38 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) return r; mutex_lock(&kvm->lock); - if (kvm->vcpus[n]) { - r = -EEXIST; + if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { + r = -EINVAL; goto vcpu_destroy; } - kvm->vcpus[n] = vcpu; - if (n == 0) - kvm->bsp_vcpu = vcpu; - mutex_unlock(&kvm->lock); + + for (r = 0; r < atomic_read(&kvm->online_vcpus); r++) + if (kvm->vcpus[r]->vcpu_id == id) { + r = -EEXIST; + goto vcpu_destroy; + } + + BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); /* Now it's all set up, let userspace reach it */ kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); - if (r < 0) - goto unlink; + if (r < 0) { + kvm_put_kvm(kvm); + goto vcpu_destroy; + } + + kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; + smp_wmb(); + atomic_inc(&kvm->online_vcpus); + +#ifdef CONFIG_KVM_APIC_ARCHITECTURE + if (kvm->bsp_vcpu_id == id) + kvm->bsp_vcpu = vcpu; +#endif + mutex_unlock(&kvm->lock); return r; -unlink: - mutex_lock(&kvm->lock); - kvm->vcpus[n] = NULL; vcpu_destroy: mutex_unlock(&kvm->lock); kvm_arch_vcpu_destroy(vcpu); @@ -2233,6 +2235,15 @@ static long kvm_vm_ioctl(struct file *filp, r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); break; } +#ifdef CONFIG_KVM_APIC_ARCHITECTURE + case KVM_SET_BOOT_CPU_ID: + r = 0; + if (atomic_read(&kvm->online_vcpus) != 0) + r = -EBUSY; + else + kvm->bsp_vcpu_id = arg; + break; +#endif default: r = kvm_arch_vm_ioctl(filp, ioctl, arg); } @@ -2299,6 +2310,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg) case KVM_CAP_USER_MEMORY: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: +#ifdef CONFIG_KVM_APIC_ARCHITECTURE + case KVM_CAP_SET_BOOT_CPU_ID: +#endif return 1; #ifdef CONFIG_HAVE_KVM_IRQCHIP case KVM_CAP_IRQ_ROUTING: