提交 60911731 编写于 作者: L Linus Torvalds

Merge branch 'kvm-updates/2.6.30' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/2.6.30' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: Unregister cpufreq notifier on unload
  KVM: x86: release time_page on vcpu destruction
  KVM: Fix overlapping check for memory slots
  KVM: MMU: disable global page optimization
  KVM: ia64: fix locking order entering guest
  KVM: MMU: Fix off-by-one calculating large page count
...@@ -610,20 +610,22 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -610,20 +610,22 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r; int r;
again: again:
preempt_disable();
local_irq_disable();
if (signal_pending(current)) { if (signal_pending(current)) {
local_irq_enable();
preempt_enable();
r = -EINTR; r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_INTR; kvm_run->exit_reason = KVM_EXIT_INTR;
goto out; goto out;
} }
/*
* down_read() may sleep and return with interrupts enabled
*/
down_read(&vcpu->kvm->slots_lock);
preempt_disable();
local_irq_disable();
vcpu->guest_mode = 1; vcpu->guest_mode = 1;
kvm_guest_enter(); kvm_guest_enter();
down_read(&vcpu->kvm->slots_lock);
r = vti_vcpu_run(vcpu, kvm_run); r = vti_vcpu_run(vcpu, kvm_run);
if (r < 0) { if (r < 0) {
local_irq_enable(); local_irq_enable();
......
...@@ -1248,7 +1248,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -1248,7 +1248,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
sp->gfn = gfn; sp->gfn = gfn;
sp->role = role; sp->role = role;
sp->global = role.cr4_pge; sp->global = 0;
hlist_add_head(&sp->hash_link, bucket); hlist_add_head(&sp->hash_link, bucket);
if (!direct) { if (!direct) {
if (rmap_write_protect(vcpu->kvm, gfn)) if (rmap_write_protect(vcpu->kvm, gfn))
......
...@@ -2775,6 +2775,9 @@ int kvm_arch_init(void *opaque) ...@@ -2775,6 +2775,9 @@ int kvm_arch_init(void *opaque)
void kvm_arch_exit(void) void kvm_arch_exit(void)
{ {
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
kvm_x86_ops = NULL; kvm_x86_ops = NULL;
kvm_mmu_module_exit(); kvm_mmu_module_exit();
} }
...@@ -4159,6 +4162,11 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); ...@@ -4159,6 +4162,11 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.time_page) {
kvm_release_page_dirty(vcpu->arch.time_page);
vcpu->arch.time_page = NULL;
}
kvm_x86_ops->vcpu_free(vcpu); kvm_x86_ops->vcpu_free(vcpu);
} }
......
...@@ -409,6 +409,8 @@ struct kvm_trace_rec { ...@@ -409,6 +409,8 @@ struct kvm_trace_rec {
#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
#define KVM_CAP_DEVICE_DEASSIGNMENT 27 #define KVM_CAP_DEVICE_DEASSIGNMENT 27
#endif #endif
/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
...@@ -920,6 +920,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -920,6 +920,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
int r; int r;
gfn_t base_gfn; gfn_t base_gfn;
unsigned long npages; unsigned long npages;
int largepages;
unsigned long i; unsigned long i;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new; struct kvm_memory_slot old, new;
...@@ -960,7 +961,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -960,7 +961,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *s = &kvm->memslots[i]; struct kvm_memory_slot *s = &kvm->memslots[i];
if (s == memslot) if (s == memslot || !s->npages)
continue; continue;
if (!((base_gfn + npages <= s->base_gfn) || if (!((base_gfn + npages <= s->base_gfn) ||
(base_gfn >= s->base_gfn + s->npages))) (base_gfn >= s->base_gfn + s->npages)))
...@@ -995,11 +996,8 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -995,11 +996,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.userspace_addr = 0; new.userspace_addr = 0;
} }
if (npages && !new.lpage_info) { if (npages && !new.lpage_info) {
int largepages = npages / KVM_PAGES_PER_HPAGE; largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
if (npages % KVM_PAGES_PER_HPAGE) largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
largepages++;
if (base_gfn % KVM_PAGES_PER_HPAGE)
largepages++;
new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info)); new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
...@@ -1985,6 +1983,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg) ...@@ -1985,6 +1983,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
switch (arg) { switch (arg) {
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
return 1; return 1;
#ifdef CONFIG_HAVE_KVM_IRQCHIP #ifdef CONFIG_HAVE_KVM_IRQCHIP
case KVM_CAP_IRQ_ROUTING: case KVM_CAP_IRQ_ROUTING:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册