提交 5d2007eb 编写于 作者: L Linus Torvalds

Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
  KVM: Fix pit memory leak if unable to allocate irq source id
  KVM: ia64: fix vmm_spin_{un}lock for !CONFIG_SMP
  KVM: VMX: Set IGMT bit in EPT entry
  KVM: Require the PCI subsystem
  x86: KVM guest: fix section mismatch warning in kvmclock.c
  KVM: ia64: Use guest signal mask when blocking
  KVM: MMU: increase per-vcpu rmap cache alloc size
...@@ -20,6 +20,8 @@ if VIRTUALIZATION ...@@ -20,6 +20,8 @@ if VIRTUALIZATION
config KVM config KVM
tristate "Kernel-based Virtual Machine (KVM) support" tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM && EXPERIMENTAL depends on HAVE_KVM && EXPERIMENTAL
# for device assignment:
depends on PCI
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
---help--- ---help---
......
...@@ -673,16 +673,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -673,16 +673,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu_load(vcpu); vcpu_load(vcpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu_put(vcpu); r = -EAGAIN;
return -EAGAIN; goto out;
} }
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
if (vcpu->mmio_needed) { if (vcpu->mmio_needed) {
memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
kvm_set_mmio_data(vcpu); kvm_set_mmio_data(vcpu);
...@@ -690,7 +690,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -690,7 +690,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->mmio_needed = 0; vcpu->mmio_needed = 0;
} }
r = __vcpu_run(vcpu, kvm_run); r = __vcpu_run(vcpu, kvm_run);
out:
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL); sigprocmask(SIG_SETMASK, &sigsaved, NULL);
......
...@@ -384,6 +384,10 @@ static inline u64 __gpfn_is_io(u64 gpfn) ...@@ -384,6 +384,10 @@ static inline u64 __gpfn_is_io(u64 gpfn)
#define MODE_IND(psr) \ #define MODE_IND(psr) \
(((psr).it << 2) + ((psr).dt << 1) + (psr).rt) (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
#ifndef CONFIG_SMP
#define _vmm_raw_spin_lock(x) do {}while(0)
#define _vmm_raw_spin_unlock(x) do {}while(0)
#else
#define _vmm_raw_spin_lock(x) \ #define _vmm_raw_spin_lock(x) \
do { \ do { \
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
...@@ -403,6 +407,7 @@ static inline u64 __gpfn_is_io(u64 gpfn) ...@@ -403,6 +407,7 @@ static inline u64 __gpfn_is_io(u64 gpfn)
do { barrier(); \ do { barrier(); \
((spinlock_t *)x)->raw_lock.lock = 0; } \ ((spinlock_t *)x)->raw_lock.lock = 0; } \
while (0) while (0)
#endif
void vmm_spin_lock(spinlock_t *lock); void vmm_spin_lock(spinlock_t *lock);
void vmm_spin_unlock(spinlock_t *lock); void vmm_spin_unlock(spinlock_t *lock);
......
...@@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt) ...@@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt)
} }
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
static void kvm_setup_secondary_clock(void) static void __devinit kvm_setup_secondary_clock(void)
{ {
/* /*
* Now that the first cpu already had this clocksource initialized, * Now that the first cpu already had this clocksource initialized,
......
...@@ -20,6 +20,8 @@ if VIRTUALIZATION ...@@ -20,6 +20,8 @@ if VIRTUALIZATION
config KVM config KVM
tristate "Kernel-based Virtual Machine (KVM) support" tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM depends on HAVE_KVM
# for device assignment:
depends on PCI
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select MMU_NOTIFIER select MMU_NOTIFIER
select ANON_INODES select ANON_INODES
......
...@@ -548,8 +548,10 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) ...@@ -548,8 +548,10 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
pit->irq_source_id = kvm_request_irq_source_id(kvm); pit->irq_source_id = kvm_request_irq_source_id(kvm);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
if (pit->irq_source_id < 0) if (pit->irq_source_id < 0) {
kfree(pit);
return NULL; return NULL;
}
mutex_init(&pit->pit_state.lock); mutex_init(&pit->pit_state.lock);
mutex_lock(&pit->pit_state.lock); mutex_lock(&pit->pit_state.lock);
......
...@@ -314,7 +314,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) ...@@ -314,7 +314,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
if (r) if (r)
goto out; goto out;
r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
rmap_desc_cache, 1); rmap_desc_cache, 4);
if (r) if (r)
goto out; goto out;
r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
......
...@@ -3564,7 +3564,8 @@ static int __init vmx_init(void) ...@@ -3564,7 +3564,8 @@ static int __init vmx_init(void)
bypass_guest_pf = 0; bypass_guest_pf = 0;
kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
VMX_EPT_WRITABLE_MASK | VMX_EPT_WRITABLE_MASK |
VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT |
VMX_EPT_IGMT_BIT);
kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
VMX_EPT_EXECUTABLE_MASK); VMX_EPT_EXECUTABLE_MASK);
kvm_enable_tdp(); kvm_enable_tdp();
......
...@@ -352,6 +352,7 @@ enum vmcs_field { ...@@ -352,6 +352,7 @@ enum vmcs_field {
#define VMX_EPT_READABLE_MASK 0x1ull #define VMX_EPT_READABLE_MASK 0x1ull
#define VMX_EPT_WRITABLE_MASK 0x2ull #define VMX_EPT_WRITABLE_MASK 0x2ull
#define VMX_EPT_EXECUTABLE_MASK 0x4ull #define VMX_EPT_EXECUTABLE_MASK 0x4ull
#define VMX_EPT_IGMT_BIT (1ull << 6)
#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册