kvm_main.c 46.0 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

18
#include "iodev.h"
A
Avi Kivity 已提交
19

20
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
21 22 23 24 25 26 27 28 29 30 31 32
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
33
#include <linux/sysdev.h>
A
Avi Kivity 已提交
34
#include <linux/cpu.h>
A
Alexey Dobriyan 已提交
35
#include <linux/sched.h>
36 37
#include <linux/cpumask.h>
#include <linux/smp.h>
38
#include <linux/anon_inodes.h>
39
#include <linux/profile.h>
40
#include <linux/kvm_para.h>
41
#include <linux/pagemap.h>
42
#include <linux/mman.h>
43
#include <linux/swap.h>
A
Avi Kivity 已提交
44

A
Avi Kivity 已提交
45 46 47
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
48
#include <asm/pgtable.h>
A
Avi Kivity 已提交
49

50 51 52 53
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
#endif

54 55 56 57 58 59
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
#include <linux/pci.h>
#include <linux/interrupt.h>
#include "irq.h"
#endif

A
Avi Kivity 已提交
60 61 62
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

63 64
DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
65

66 67
static cpumask_t cpus_hardware_enabled;

68 69
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
A
Avi Kivity 已提交
70

71 72
static __read_mostly struct preempt_ops kvm_preempt_ops;

73
struct dentry *kvm_debugfs_dir;
A
Avi Kivity 已提交
74

A
Avi Kivity 已提交
75 76 77
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);

78 79
bool kvm_rebooting;

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
						      int assigned_dev_id)
{
	struct list_head *ptr;
	struct kvm_assigned_dev_kernel *match;

	list_for_each(ptr, head) {
		match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
		if (match->assigned_dev_id == assigned_dev_id)
			return match;
	}
	return NULL;
}

static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
{
	struct kvm_assigned_dev_kernel *assigned_dev;

	assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
				    interrupt_work);

	/* This is taken to safely inject irq inside the guest. When
	 * the interrupt injection (or the ioapic code) uses a
	 * finer-grained lock, update this
	 */
	mutex_lock(&assigned_dev->kvm->lock);
	kvm_set_irq(assigned_dev->kvm,
108
		    assigned_dev->irq_source_id,
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
		    assigned_dev->guest_irq, 1);
	mutex_unlock(&assigned_dev->kvm->lock);
	kvm_put_kvm(assigned_dev->kvm);
}

static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
{
	struct kvm_assigned_dev_kernel *assigned_dev =
		(struct kvm_assigned_dev_kernel *) dev_id;

	kvm_get_kvm(assigned_dev->kvm);
	schedule_work(&assigned_dev->interrupt_work);
	disable_irq_nosync(irq);
	return IRQ_HANDLED;
}

/* Ack the irq line for an assigned device */
static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
{
	struct kvm_assigned_dev_kernel *dev;

	if (kian->gsi == -1)
		return;

	dev = container_of(kian, struct kvm_assigned_dev_kernel,
			   ack_notifier);
135
	kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
136 137 138 139 140 141 142
	enable_irq(dev->host_irq);
}

static void kvm_free_assigned_device(struct kvm *kvm,
				     struct kvm_assigned_dev_kernel
				     *assigned_dev)
{
143
	if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested_type)
144 145
		free_irq(assigned_dev->host_irq, (void *)assigned_dev);

146
	kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier);
147
	kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
148 149 150 151 152 153 154

	if (cancel_work_sync(&assigned_dev->interrupt_work))
		/* We had pending work. That means we will have to take
		 * care of kvm_put_kvm.
		 */
		kvm_put_kvm(kvm);

155 156
	pci_reset_function(assigned_dev->dev);

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
	pci_release_regions(assigned_dev->dev);
	pci_disable_device(assigned_dev->dev);
	pci_dev_put(assigned_dev->dev);

	list_del(&assigned_dev->list);
	kfree(assigned_dev);
}

void kvm_free_all_assigned_devices(struct kvm *kvm)
{
	struct list_head *ptr, *ptr2;
	struct kvm_assigned_dev_kernel *assigned_dev;

	list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
		assigned_dev = list_entry(ptr,
					  struct kvm_assigned_dev_kernel,
					  list);

		kvm_free_assigned_device(kvm, assigned_dev);
	}
}

179 180 181 182
static int assigned_device_update_intx(struct kvm *kvm,
			struct kvm_assigned_dev_kernel *adev,
			struct kvm_assigned_irq *airq)
{
183 184 185 186
	adev->guest_irq = airq->guest_irq;
	adev->ack_notifier.gsi = airq->guest_irq;

	if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX)
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
		return 0;

	if (irqchip_in_kernel(kvm)) {
		if (!capable(CAP_SYS_RAWIO))
			return -EPERM;

		if (airq->host_irq)
			adev->host_irq = airq->host_irq;
		else
			adev->host_irq = adev->dev->irq;

		/* Even though this is PCI, we don't want to use shared
		 * interrupts. Sharing host devices with guest-assigned devices
		 * on the same interrupt line is not a happy situation: there
		 * are going to be long delays in accepting, acking, etc.
		 */
		if (request_irq(adev->host_irq, kvm_assigned_dev_intr,
				0, "kvm_assigned_intx_device", (void *)adev))
			return -EIO;
	}

208 209
	adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX |
				   KVM_ASSIGNED_DEV_HOST_INTX;
210 211 212
	return 0;
}

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
				   struct kvm_assigned_irq
				   *assigned_irq)
{
	int r = 0;
	struct kvm_assigned_dev_kernel *match;

	mutex_lock(&kvm->lock);

	match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
				      assigned_irq->assigned_dev_id);
	if (!match) {
		mutex_unlock(&kvm->lock);
		return -EINVAL;
	}

229
	if (!match->irq_requested_type) {
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
		INIT_WORK(&match->interrupt_work,
				kvm_assigned_dev_interrupt_work_handler);
		if (irqchip_in_kernel(kvm)) {
			/* Register ack nofitier */
			match->ack_notifier.gsi = -1;
			match->ack_notifier.irq_acked =
					kvm_assigned_dev_ack_irq;
			kvm_register_irq_ack_notifier(kvm,
					&match->ack_notifier);

			/* Request IRQ source ID */
			r = kvm_request_irq_source_id(kvm);
			if (r < 0)
				goto out_release;
			else
				match->irq_source_id = r;
		}
247 248
	}

249 250 251
	r = assigned_device_update_intx(kvm, match, assigned_irq);
	if (r)
		goto out_release;
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302

	mutex_unlock(&kvm->lock);
	return r;
out_release:
	mutex_unlock(&kvm->lock);
	kvm_free_assigned_device(kvm, match);
	return r;
}

static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
				      struct kvm_assigned_pci_dev *assigned_dev)
{
	int r = 0;
	struct kvm_assigned_dev_kernel *match;
	struct pci_dev *dev;

	mutex_lock(&kvm->lock);

	match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
				      assigned_dev->assigned_dev_id);
	if (match) {
		/* device already assigned */
		r = -EINVAL;
		goto out;
	}

	match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
	if (match == NULL) {
		printk(KERN_INFO "%s: Couldn't allocate memory\n",
		       __func__);
		r = -ENOMEM;
		goto out;
	}
	dev = pci_get_bus_and_slot(assigned_dev->busnr,
				   assigned_dev->devfn);
	if (!dev) {
		printk(KERN_INFO "%s: host device not found\n", __func__);
		r = -EINVAL;
		goto out_free;
	}
	if (pci_enable_device(dev)) {
		printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
		r = -EBUSY;
		goto out_put;
	}
	r = pci_request_regions(dev, "kvm_assigned_device");
	if (r) {
		printk(KERN_INFO "%s: Could not get access to device regions\n",
		       __func__);
		goto out_disable;
	}
303 304 305

	pci_reset_function(dev);

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	match->assigned_dev_id = assigned_dev->assigned_dev_id;
	match->host_busnr = assigned_dev->busnr;
	match->host_devfn = assigned_dev->devfn;
	match->dev = dev;

	match->kvm = kvm;

	list_add(&match->list, &kvm->arch.assigned_dev_head);

	if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
		r = kvm_iommu_map_guest(kvm, match);
		if (r)
			goto out_list_del;
	}

out:
	mutex_unlock(&kvm->lock);
	return r;
out_list_del:
	list_del(&match->list);
	pci_release_regions(dev);
out_disable:
	pci_disable_device(dev);
out_put:
	pci_dev_put(dev);
out_free:
	kfree(match);
	mutex_unlock(&kvm->lock);
	return r;
}
#endif

338 339 340 341 342
static inline int valid_vcpu(int n)
{
	return likely(n >= 0 && n < KVM_MAX_VCPUS);
}

343
inline int kvm_is_mmio_pfn(pfn_t pfn)
B
Ben-Ami Yassour 已提交
344 345 346 347 348 349 350
{
	if (pfn_valid(pfn))
		return PageReserved(pfn_to_page(pfn));

	return true;
}

A
Avi Kivity 已提交
351 352 353
/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
354
void vcpu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
355
{
356 357
	int cpu;

A
Avi Kivity 已提交
358
	mutex_lock(&vcpu->mutex);
359 360
	cpu = get_cpu();
	preempt_notifier_register(&vcpu->preempt_notifier);
361
	kvm_arch_vcpu_load(vcpu, cpu);
362
	put_cpu();
A
Avi Kivity 已提交
363 364
}

365
void vcpu_put(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
366
{
367
	preempt_disable();
368
	kvm_arch_vcpu_put(vcpu);
369 370
	preempt_notifier_unregister(&vcpu->preempt_notifier);
	preempt_enable();
A
Avi Kivity 已提交
371 372 373
	mutex_unlock(&vcpu->mutex);
}

374 375 376 377 378 379
static void ack_flush(void *_completed)
{
}

void kvm_flush_remote_tlbs(struct kvm *kvm)
{
380
	int i, cpu, me;
381 382 383
	cpumask_t cpus;
	struct kvm_vcpu *vcpu;

384
	me = get_cpu();
385
	cpus_clear(cpus);
R
Rusty Russell 已提交
386 387 388 389
	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
		vcpu = kvm->vcpus[i];
		if (!vcpu)
			continue;
390
		if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
391 392
			continue;
		cpu = vcpu->cpu;
393
		if (cpu != -1 && cpu != me)
394
			cpu_set(cpu, cpus);
395
	}
396
	if (cpus_empty(cpus))
397
		goto out;
398
	++kvm->stat.remote_tlb_flush;
399
	smp_call_function_mask(cpus, ack_flush, NULL, 1);
400 401
out:
	put_cpu();
402 403
}

404 405
void kvm_reload_remote_mmus(struct kvm *kvm)
{
406
	int i, cpu, me;
407 408 409
	cpumask_t cpus;
	struct kvm_vcpu *vcpu;

410
	me = get_cpu();
411 412 413 414 415 416 417 418
	cpus_clear(cpus);
	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
		vcpu = kvm->vcpus[i];
		if (!vcpu)
			continue;
		if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
			continue;
		cpu = vcpu->cpu;
419
		if (cpu != -1 && cpu != me)
420 421 422
			cpu_set(cpu, cpus);
	}
	if (cpus_empty(cpus))
423
		goto out;
424
	smp_call_function_mask(cpus, ack_flush, NULL, 1);
425 426
out:
	put_cpu();
427 428 429
}


R
Rusty Russell 已提交
430 431 432 433 434 435 436 437 438
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
	struct page *page;
	int r;

	mutex_init(&vcpu->mutex);
	vcpu->cpu = -1;
	vcpu->kvm = kvm;
	vcpu->vcpu_id = id;
E
Eddie Dong 已提交
439
	init_waitqueue_head(&vcpu->wq);
R
Rusty Russell 已提交
440 441 442 443 444 445 446 447

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
	vcpu->run = page_address(page);

448
	r = kvm_arch_vcpu_init(vcpu);
R
Rusty Russell 已提交
449
	if (r < 0)
450
		goto fail_free_run;
R
Rusty Russell 已提交
451 452 453 454 455
	return 0;

fail_free_run:
	free_page((unsigned long)vcpu->run);
fail:
456
	return r;
R
Rusty Russell 已提交
457 458 459 460 461
}
EXPORT_SYMBOL_GPL(kvm_vcpu_init);

void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
462
	kvm_arch_vcpu_uninit(vcpu);
R
Rusty Russell 已提交
463 464 465 466
	free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
	return container_of(mn, struct kvm, mmu_notifier);
}

static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int need_tlb_flush;

	/*
	 * When ->invalidate_page runs, the linux pte has been zapped
	 * already but the page is still allocated until
	 * ->invalidate_page returns. So if we increase the sequence
	 * here the kvm page fault will notice if the spte can't be
	 * established because the page is going to be freed. If
	 * instead the kvm page fault establishes the spte before
	 * ->invalidate_page runs, kvm_unmap_hva will release it
	 * before returning.
	 *
	 * The sequence increase only need to be seen at spin_unlock
	 * time, and not at spin_lock time.
	 *
	 * Increasing the sequence after the spin_unlock would be
	 * unsafe because the kvm page fault could then establish the
	 * pte after kvm_unmap_hva returned, without noticing the page
	 * is going to be freed.
	 */
	spin_lock(&kvm->mmu_lock);
	kvm->mmu_notifier_seq++;
	need_tlb_flush = kvm_unmap_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);

	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);

}

static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
						    struct mm_struct *mm,
						    unsigned long start,
						    unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int need_tlb_flush = 0;

	spin_lock(&kvm->mmu_lock);
	/*
	 * The count increase must become visible at unlock time as no
	 * spte can be established without taking the mmu_lock and
	 * count is also read inside the mmu_lock critical section.
	 */
	kvm->mmu_notifier_count++;
	for (; start < end; start += PAGE_SIZE)
		need_tlb_flush |= kvm_unmap_hva(kvm, start);
	spin_unlock(&kvm->mmu_lock);

	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);
}

static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
						  struct mm_struct *mm,
						  unsigned long start,
						  unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);

	spin_lock(&kvm->mmu_lock);
	/*
	 * This sequence increase will notify the kvm page fault that
	 * the page that is going to be mapped in the spte could have
	 * been freed.
	 */
	kvm->mmu_notifier_seq++;
	/*
	 * The above sequence increase must be visible before the
	 * below count decrease but both values are read by the kvm
	 * page fault under mmu_lock spinlock so we don't need to add
	 * a smb_wmb() here in between the two.
	 */
	kvm->mmu_notifier_count--;
	spin_unlock(&kvm->mmu_lock);

	BUG_ON(kvm->mmu_notifier_count < 0);
}

static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
					      struct mm_struct *mm,
					      unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int young;

	spin_lock(&kvm->mmu_lock);
	young = kvm_age_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);

	if (young)
		kvm_flush_remote_tlbs(kvm);

	return young;
}

static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
	.invalidate_page	= kvm_mmu_notifier_invalidate_page,
	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
};
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

584
static struct kvm *kvm_create_vm(void)
A
Avi Kivity 已提交
585
{
586
	struct kvm *kvm = kvm_arch_create_vm();
587 588 589
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	struct page *page;
#endif
A
Avi Kivity 已提交
590

591 592
	if (IS_ERR(kvm))
		goto out;
A
Avi Kivity 已提交
593

594 595 596 597 598 599 600 601 602 603
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		kfree(kvm);
		return ERR_PTR(-ENOMEM);
	}
	kvm->coalesced_mmio_ring =
			(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	{
		int err;
		kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
		err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
		if (err) {
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
			put_page(page);
#endif
			kfree(kvm);
			return ERR_PTR(err);
		}
	}
#endif

619 620
	kvm->mm = current->mm;
	atomic_inc(&kvm->mm->mm_count);
621
	spin_lock_init(&kvm->mmu_lock);
622
	kvm_io_bus_init(&kvm->pio_bus);
S
Shaohua Li 已提交
623
	mutex_init(&kvm->lock);
624
	kvm_io_bus_init(&kvm->mmio_bus);
625
	init_rwsem(&kvm->slots_lock);
I
Izik Eidus 已提交
626
	atomic_set(&kvm->users_count, 1);
627 628 629
	spin_lock(&kvm_lock);
	list_add(&kvm->vm_list, &vm_list);
	spin_unlock(&kvm_lock);
630 631 632
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	kvm_coalesced_mmio_init(kvm);
#endif
633
out:
634 635 636
	return kvm;
}

A
Avi Kivity 已提交
637 638 639 640 641 642
/*
 * Free any memory in @free but not in @dont.
 */
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
				  struct kvm_memory_slot *dont)
{
643 644
	if (!dont || free->rmap != dont->rmap)
		vfree(free->rmap);
A
Avi Kivity 已提交
645 646 647 648

	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
		vfree(free->dirty_bitmap);

M
Marcelo Tosatti 已提交
649 650 651
	if (!dont || free->lpage_info != dont->lpage_info)
		vfree(free->lpage_info);

A
Avi Kivity 已提交
652
	free->npages = 0;
A
Al Viro 已提交
653
	free->dirty_bitmap = NULL;
654
	free->rmap = NULL;
M
Marcelo Tosatti 已提交
655
	free->lpage_info = NULL;
A
Avi Kivity 已提交
656 657
}

658
void kvm_free_physmem(struct kvm *kvm)
A
Avi Kivity 已提交
659 660 661 662
{
	int i;

	for (i = 0; i < kvm->nmemslots; ++i)
A
Al Viro 已提交
663
		kvm_free_physmem_slot(&kvm->memslots[i], NULL);
A
Avi Kivity 已提交
664 665
}

666 667
static void kvm_destroy_vm(struct kvm *kvm)
{
668 669
	struct mm_struct *mm = kvm->mm;

670 671 672
	spin_lock(&kvm_lock);
	list_del(&kvm->vm_list);
	spin_unlock(&kvm_lock);
673
	kvm_io_bus_destroy(&kvm->pio_bus);
674
	kvm_io_bus_destroy(&kvm->mmio_bus);
675 676 677
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	if (kvm->coalesced_mmio_ring != NULL)
		free_page((unsigned long)kvm->coalesced_mmio_ring);
678 679 680
#endif
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
681
#endif
682
	kvm_arch_destroy_vm(kvm);
683
	mmdrop(mm);
684 685
}

I
Izik Eidus 已提交
686 687 688 689 690 691 692 693 694 695 696 697 698 699
void kvm_get_kvm(struct kvm *kvm)
{
	atomic_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);

void kvm_put_kvm(struct kvm *kvm)
{
	if (atomic_dec_and_test(&kvm->users_count))
		kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);


700 701 702 703
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
	struct kvm *kvm = filp->private_data;

I
Izik Eidus 已提交
704
	kvm_put_kvm(kvm);
A
Avi Kivity 已提交
705 706 707 708 709 710 711 712
	return 0;
}

/*
 * Allocate some memory and give it an address in the guest physical address
 * space.
 *
 * Discontiguous memory is allowed, mostly for framebuffers.
713
 *
714
 * Must be called holding mmap_sem for write.
A
Avi Kivity 已提交
715
 */
716 717 718
int __kvm_set_memory_region(struct kvm *kvm,
			    struct kvm_userspace_memory_region *mem,
			    int user_alloc)
A
Avi Kivity 已提交
719 720 721 722 723 724 725 726 727 728 729 730 731 732
{
	int r;
	gfn_t base_gfn;
	unsigned long npages;
	unsigned long i;
	struct kvm_memory_slot *memslot;
	struct kvm_memory_slot old, new;

	r = -EINVAL;
	/* General sanity checks */
	if (mem->memory_size & (PAGE_SIZE - 1))
		goto out;
	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
		goto out;
S
Sheng Yang 已提交
733
	if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
734
		goto out;
735
	if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
A
Avi Kivity 已提交
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
		goto out;
	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
		goto out;

	memslot = &kvm->memslots[mem->slot];
	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
	npages = mem->memory_size >> PAGE_SHIFT;

	if (!npages)
		mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;

	new = old = *memslot;

	new.base_gfn = base_gfn;
	new.npages = npages;
	new.flags = mem->flags;

	/* Disallow changing a memory slot's size. */
	r = -EINVAL;
	if (npages && old.npages && npages != old.npages)
756
		goto out_free;
A
Avi Kivity 已提交
757 758 759 760 761 762 763 764 765 766

	/* Check for overlaps */
	r = -EEXIST;
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *s = &kvm->memslots[i];

		if (s == memslot)
			continue;
		if (!((base_gfn + npages <= s->base_gfn) ||
		      (base_gfn >= s->base_gfn + s->npages)))
767
			goto out_free;
A
Avi Kivity 已提交
768 769 770 771
	}

	/* Free page dirty bitmap if unneeded */
	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
A
Al Viro 已提交
772
		new.dirty_bitmap = NULL;
A
Avi Kivity 已提交
773 774 775 776

	r = -ENOMEM;

	/* Allocate if a slot is being created */
777
#ifndef CONFIG_S390
778
	if (npages && !new.rmap) {
M
Mike Day 已提交
779
		new.rmap = vmalloc(npages * sizeof(struct page *));
780 781

		if (!new.rmap)
782
			goto out_free;
783 784

		memset(new.rmap, 0, npages * sizeof(*new.rmap));
785

786
		new.user_alloc = user_alloc;
787 788 789 790 791 792 793 794 795
		/*
		 * hva_to_rmmap() serialzies with the mmu_lock and to be
		 * safe it has to ignore memslots with !user_alloc &&
		 * !userspace_addr.
		 */
		if (user_alloc)
			new.userspace_addr = mem->userspace_addr;
		else
			new.userspace_addr = 0;
A
Avi Kivity 已提交
796
	}
M
Marcelo Tosatti 已提交
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	if (npages && !new.lpage_info) {
		int largepages = npages / KVM_PAGES_PER_HPAGE;
		if (npages % KVM_PAGES_PER_HPAGE)
			largepages++;
		if (base_gfn % KVM_PAGES_PER_HPAGE)
			largepages++;

		new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));

		if (!new.lpage_info)
			goto out_free;

		memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));

		if (base_gfn % KVM_PAGES_PER_HPAGE)
			new.lpage_info[0].write_count = 1;
		if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
			new.lpage_info[largepages-1].write_count = 1;
	}
A
Avi Kivity 已提交
816 817 818 819 820 821 822

	/* Allocate page dirty bitmap if needed */
	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
		unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;

		new.dirty_bitmap = vmalloc(dirty_bytes);
		if (!new.dirty_bitmap)
823
			goto out_free;
A
Avi Kivity 已提交
824 825
		memset(new.dirty_bitmap, 0, dirty_bytes);
	}
826
#endif /* not defined CONFIG_S390 */
A
Avi Kivity 已提交
827

828 829 830
	if (!npages)
		kvm_arch_flush_shadow(kvm);

831 832 833 834
	spin_lock(&kvm->mmu_lock);
	if (mem->slot >= kvm->nmemslots)
		kvm->nmemslots = mem->slot + 1;

835
	*memslot = new;
836
	spin_unlock(&kvm->mmu_lock);
837

838 839
	r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
	if (r) {
840
		spin_lock(&kvm->mmu_lock);
841
		*memslot = old;
842
		spin_unlock(&kvm->mmu_lock);
843
		goto out_free;
844 845
	}

A
Avi Kivity 已提交
846
	kvm_free_physmem_slot(&old, &new);
847
#ifdef CONFIG_DMAR
B
Ben-Ami Yassour 已提交
848 849 850 851
	/* map the pages in iommu page table */
	r = kvm_iommu_map_pages(kvm, base_gfn, npages);
	if (r)
		goto out;
852
#endif
A
Avi Kivity 已提交
853 854
	return 0;

855
out_free:
A
Avi Kivity 已提交
856 857 858
	kvm_free_physmem_slot(&new, &old);
out:
	return r;
859 860

}
861 862 863 864 865 866 867 868
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);

int kvm_set_memory_region(struct kvm *kvm,
			  struct kvm_userspace_memory_region *mem,
			  int user_alloc)
{
	int r;

869
	down_write(&kvm->slots_lock);
870
	r = __kvm_set_memory_region(kvm, mem, user_alloc);
871
	up_write(&kvm->slots_lock);
872 873
	return r;
}
874 875
EXPORT_SYMBOL_GPL(kvm_set_memory_region);

876 877 878 879
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
				   struct
				   kvm_userspace_memory_region *mem,
				   int user_alloc)
880
{
881 882
	if (mem->slot >= KVM_MEMORY_SLOTS)
		return -EINVAL;
883
	return kvm_set_memory_region(kvm, mem, user_alloc);
A
Avi Kivity 已提交
884 885
}

886 887
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty)
A
Avi Kivity 已提交
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
{
	struct kvm_memory_slot *memslot;
	int r, i;
	int n;
	unsigned long any = 0;

	r = -EINVAL;
	if (log->slot >= KVM_MEMORY_SLOTS)
		goto out;

	memslot = &kvm->memslots[log->slot];
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

903
	n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
A
Avi Kivity 已提交
904

905
	for (i = 0; !any && i < n/sizeof(long); ++i)
A
Avi Kivity 已提交
906 907 908 909 910 911
		any = memslot->dirty_bitmap[i];

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
		goto out;

912 913
	if (any)
		*is_dirty = 1;
A
Avi Kivity 已提交
914 915 916 917 918 919

	r = 0;
out:
	return r;
}

920 921 922 923 924 925
int is_error_page(struct page *page)
{
	return page == bad_page;
}
EXPORT_SYMBOL_GPL(is_error_page);

926 927 928 929 930 931
int is_error_pfn(pfn_t pfn)
{
	return pfn == bad_pfn;
}
EXPORT_SYMBOL_GPL(is_error_pfn);

I
Izik Eidus 已提交
932 933 934 935 936 937 938 939 940 941 942
static inline unsigned long bad_hva(void)
{
	return PAGE_OFFSET;
}

int kvm_is_error_hva(unsigned long addr)
{
	return addr == bad_hva();
}
EXPORT_SYMBOL_GPL(kvm_is_error_hva);

943
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
A
Avi Kivity 已提交
944 945 946 947 948 949 950 951 952 953
{
	int i;

	for (i = 0; i < kvm->nmemslots; ++i) {
		struct kvm_memory_slot *memslot = &kvm->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return memslot;
	}
A
Al Viro 已提交
954
	return NULL;
A
Avi Kivity 已提交
955
}
956
EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
957 958 959 960

struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
	gfn = unalias_gfn(kvm, gfn);
961
	return gfn_to_memslot_unaliased(kvm, gfn);
962
}
A
Avi Kivity 已提交
963

964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
	int i;

	gfn = unalias_gfn(kvm, gfn);
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *memslot = &kvm->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

M
Marcelo Tosatti 已提交
980
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
I
Izik Eidus 已提交
981 982 983 984
{
	struct kvm_memory_slot *slot;

	gfn = unalias_gfn(kvm, gfn);
985
	slot = gfn_to_memslot_unaliased(kvm, gfn);
I
Izik Eidus 已提交
986 987 988 989
	if (!slot)
		return bad_hva();
	return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
}
990
EXPORT_SYMBOL_GPL(gfn_to_hva);
I
Izik Eidus 已提交
991

992
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
A
Avi Kivity 已提交
993
{
994
	struct page *page[1];
I
Izik Eidus 已提交
995
	unsigned long addr;
996
	int npages;
997
	pfn_t pfn;
A
Avi Kivity 已提交
998

999 1000
	might_sleep();

I
Izik Eidus 已提交
1001 1002
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr)) {
1003
		get_page(bad_page);
1004
		return page_to_pfn(bad_page);
1005
	}
1006

1007
	npages = get_user_pages_fast(addr, 1, 1, page);
I
Izik Eidus 已提交
1008

1009 1010 1011
	if (unlikely(npages != 1)) {
		struct vm_area_struct *vma;

1012
		down_read(&current->mm->mmap_sem);
1013
		vma = find_vma(current->mm, addr);
1014

1015 1016
		if (vma == NULL || addr < vma->vm_start ||
		    !(vma->vm_flags & VM_PFNMAP)) {
1017
			up_read(&current->mm->mmap_sem);
1018 1019 1020 1021 1022
			get_page(bad_page);
			return page_to_pfn(bad_page);
		}

		pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1023
		up_read(&current->mm->mmap_sem);
1024
		BUG_ON(!kvm_is_mmio_pfn(pfn));
1025 1026
	} else
		pfn = page_to_pfn(page[0]);
1027

1028
	return pfn;
1029 1030 1031 1032 1033 1034
}

EXPORT_SYMBOL_GPL(gfn_to_pfn);

struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
1035 1036 1037
	pfn_t pfn;

	pfn = gfn_to_pfn(kvm, gfn);
1038
	if (!kvm_is_mmio_pfn(pfn))
1039 1040
		return pfn_to_page(pfn);

1041
	WARN_ON(kvm_is_mmio_pfn(pfn));
1042 1043 1044

	get_page(bad_page);
	return bad_page;
A
Avi Kivity 已提交
1045
}
1046

A
Avi Kivity 已提交
1047 1048
EXPORT_SYMBOL_GPL(gfn_to_page);

1049 1050
void kvm_release_page_clean(struct page *page)
{
1051
	kvm_release_pfn_clean(page_to_pfn(page));
1052 1053 1054
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);

1055 1056
void kvm_release_pfn_clean(pfn_t pfn)
{
1057
	if (!kvm_is_mmio_pfn(pfn))
1058
		put_page(pfn_to_page(pfn));
1059 1060 1061
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);

1062
void kvm_release_page_dirty(struct page *page)
1063
{
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
	kvm_release_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);

void kvm_release_pfn_dirty(pfn_t pfn)
{
	kvm_set_pfn_dirty(pfn);
	kvm_release_pfn_clean(pfn);
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);

void kvm_set_page_dirty(struct page *page)
{
	kvm_set_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_set_page_dirty);

void kvm_set_pfn_dirty(pfn_t pfn)
{
1083
	if (!kvm_is_mmio_pfn(pfn)) {
1084 1085 1086 1087
		struct page *page = pfn_to_page(pfn);
		if (!PageReserved(page))
			SetPageDirty(page);
	}
1088
}
1089 1090 1091 1092
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);

void kvm_set_pfn_accessed(pfn_t pfn)
{
1093
	if (!kvm_is_mmio_pfn(pfn))
1094
		mark_page_accessed(pfn_to_page(pfn));
1095 1096 1097 1098 1099
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);

void kvm_get_pfn(pfn_t pfn)
{
1100
	if (!kvm_is_mmio_pfn(pfn))
1101
		get_page(pfn_to_page(pfn));
1102 1103
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
1104

1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
static int next_segment(unsigned long len, int offset)
{
	if (len > PAGE_SIZE - offset)
		return PAGE_SIZE - offset;
	else
		return len;
}

int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len)
{
1116 1117
	int r;
	unsigned long addr;
1118

1119 1120 1121 1122 1123
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_from_user(data, (void __user *)addr + offset, len);
	if (r)
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);

int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len)
{
	int r;
	unsigned long addr;
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int offset = offset_in_page(gpa);

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
1160
	pagefault_disable();
1161
	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1162
	pagefault_enable();
1163 1164 1165 1166 1167 1168
	if (r)
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL(kvm_read_guest_atomic);

1169 1170 1171
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len)
{
1172 1173
	int r;
	unsigned long addr;
1174

1175 1176 1177 1178 1179
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_to_user((void __user *)addr + offset, data, len);
	if (r)
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		return -EFAULT;
	mark_page_dirty(kvm, gfn);
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);

int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}

int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
1208
	return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

        while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);

A
Avi Kivity 已提交
1231 1232
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
1233
	struct kvm_memory_slot *memslot;
A
Avi Kivity 已提交
1234

1235
	gfn = unalias_gfn(kvm, gfn);
1236
	memslot = gfn_to_memslot_unaliased(kvm, gfn);
R
Rusty Russell 已提交
1237 1238
	if (memslot && memslot->dirty_bitmap) {
		unsigned long rel_gfn = gfn - memslot->base_gfn;
A
Avi Kivity 已提交
1239

R
Rusty Russell 已提交
1240 1241 1242
		/* avoid RMW */
		if (!test_bit(rel_gfn, memslot->dirty_bitmap))
			set_bit(rel_gfn, memslot->dirty_bitmap);
A
Avi Kivity 已提交
1243 1244 1245
	}
}

E
Eddie Dong 已提交
1246 1247 1248
/*
 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
 */
1249
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1250
{
1251 1252 1253 1254 1255
	DEFINE_WAIT(wait);

	for (;;) {
		prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);

1256 1257 1258 1259
		if (kvm_cpu_has_interrupt(vcpu) ||
		    kvm_cpu_has_pending_timer(vcpu) ||
		    kvm_arch_vcpu_runnable(vcpu)) {
			set_bit(KVM_REQ_UNHALT, &vcpu->requests);
1260
			break;
1261
		}
1262 1263 1264
		if (signal_pending(current))
			break;

E
Eddie Dong 已提交
1265 1266 1267 1268
		vcpu_put(vcpu);
		schedule();
		vcpu_load(vcpu);
	}
1269

1270
	finish_wait(&vcpu->wq, &wait);
E
Eddie Dong 已提交
1271 1272
}

A
Avi Kivity 已提交
1273 1274
void kvm_resched(struct kvm_vcpu *vcpu)
{
1275 1276
	if (!need_resched())
		return;
A
Avi Kivity 已提交
1277 1278 1279 1280
	cond_resched();
}
EXPORT_SYMBOL_GPL(kvm_resched);

1281
static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1282 1283 1284 1285
{
	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
	struct page *page;

1286
	if (vmf->pgoff == 0)
1287
		page = virt_to_page(vcpu->run);
A
Avi Kivity 已提交
1288
#ifdef CONFIG_X86
1289
	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1290
		page = virt_to_page(vcpu->arch.pio_data);
1291 1292 1293 1294
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
A
Avi Kivity 已提交
1295
#endif
1296
	else
1297
		return VM_FAULT_SIGBUS;
1298
	get_page(page);
1299 1300
	vmf->page = page;
	return 0;
1301 1302 1303
}

static struct vm_operations_struct kvm_vcpu_vm_ops = {
1304
	.fault = kvm_vcpu_fault,
1305 1306 1307 1308 1309 1310 1311 1312
};

static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vcpu_vm_ops;
	return 0;
}

A
Avi Kivity 已提交
1313 1314 1315 1316
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
	struct kvm_vcpu *vcpu = filp->private_data;

A
Al Viro 已提交
1317
	kvm_put_kvm(vcpu->kvm);
A
Avi Kivity 已提交
1318 1319 1320
	return 0;
}

1321
static const struct file_operations kvm_vcpu_fops = {
A
Avi Kivity 已提交
1322 1323 1324
	.release        = kvm_vcpu_release,
	.unlocked_ioctl = kvm_vcpu_ioctl,
	.compat_ioctl   = kvm_vcpu_ioctl,
1325
	.mmap           = kvm_vcpu_mmap,
A
Avi Kivity 已提交
1326 1327 1328 1329 1330 1331 1332
};

/*
 * Allocates an inode for the vcpu.
 */
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
1333
	int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
A
Al Viro 已提交
1334
	if (fd < 0)
A
Al Viro 已提交
1335
		kvm_put_kvm(vcpu->kvm);
A
Avi Kivity 已提交
1336 1337 1338
	return fd;
}

1339 1340 1341 1342 1343 1344 1345 1346 1347
/*
 * Creates some virtual cpus.  Good luck creating more than one.
 */
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
{
	int r;
	struct kvm_vcpu *vcpu;

	if (!valid_vcpu(n))
R
Rusty Russell 已提交
1348
		return -EINVAL;
1349

1350
	vcpu = kvm_arch_vcpu_create(kvm, n);
R
Rusty Russell 已提交
1351 1352
	if (IS_ERR(vcpu))
		return PTR_ERR(vcpu);
1353

1354 1355
	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);

1356 1357
	r = kvm_arch_vcpu_setup(vcpu);
	if (r)
1358
		return r;
1359

S
Shaohua Li 已提交
1360
	mutex_lock(&kvm->lock);
R
Rusty Russell 已提交
1361 1362
	if (kvm->vcpus[n]) {
		r = -EEXIST;
1363
		goto vcpu_destroy;
R
Rusty Russell 已提交
1364 1365
	}
	kvm->vcpus[n] = vcpu;
S
Shaohua Li 已提交
1366
	mutex_unlock(&kvm->lock);
1367

R
Rusty Russell 已提交
1368
	/* Now it's all set up, let userspace reach it */
A
Al Viro 已提交
1369
	kvm_get_kvm(kvm);
A
Avi Kivity 已提交
1370 1371
	r = create_vcpu_fd(vcpu);
	if (r < 0)
R
Rusty Russell 已提交
1372 1373
		goto unlink;
	return r;
1374

R
Rusty Russell 已提交
1375
unlink:
S
Shaohua Li 已提交
1376
	mutex_lock(&kvm->lock);
R
Rusty Russell 已提交
1377
	kvm->vcpus[n] = NULL;
1378
vcpu_destroy:
1379
	mutex_unlock(&kvm->lock);
1380
	kvm_arch_vcpu_destroy(vcpu);
1381 1382 1383
	return r;
}

A
Avi Kivity 已提交
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
	if (sigset) {
		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
		vcpu->sigset_active = 1;
		vcpu->sigset = *sigset;
	} else
		vcpu->sigset_active = 0;
	return 0;
}

A
Avi Kivity 已提交
1395 1396
static long kvm_vcpu_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
A
Avi Kivity 已提交
1397
{
A
Avi Kivity 已提交
1398
	struct kvm_vcpu *vcpu = filp->private_data;
A
Al Viro 已提交
1399
	void __user *argp = (void __user *)arg;
1400
	int r;
1401 1402
	struct kvm_fpu *fpu = NULL;
	struct kvm_sregs *kvm_sregs = NULL;
A
Avi Kivity 已提交
1403

1404 1405
	if (vcpu->kvm->mm != current->mm)
		return -EIO;
A
Avi Kivity 已提交
1406
	switch (ioctl) {
1407
	case KVM_RUN:
1408 1409 1410
		r = -EINVAL;
		if (arg)
			goto out;
1411
		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
A
Avi Kivity 已提交
1412 1413
		break;
	case KVM_GET_REGS: {
1414
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
1415

1416 1417 1418
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
1419
			goto out;
1420 1421 1422
		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
		if (r)
			goto out_free1;
A
Avi Kivity 已提交
1423
		r = -EFAULT;
1424 1425
		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
			goto out_free1;
A
Avi Kivity 已提交
1426
		r = 0;
1427 1428
out_free1:
		kfree(kvm_regs);
A
Avi Kivity 已提交
1429 1430 1431
		break;
	}
	case KVM_SET_REGS: {
1432
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
1433

1434 1435 1436
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
1437
			goto out;
1438 1439 1440 1441
		r = -EFAULT;
		if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
			goto out_free2;
		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
A
Avi Kivity 已提交
1442
		if (r)
1443
			goto out_free2;
A
Avi Kivity 已提交
1444
		r = 0;
1445 1446
out_free2:
		kfree(kvm_regs);
A
Avi Kivity 已提交
1447 1448 1449
		break;
	}
	case KVM_GET_SREGS: {
1450 1451 1452 1453 1454
		kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
1455 1456 1457
		if (r)
			goto out;
		r = -EFAULT;
1458
		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
1459 1460 1461 1462 1463
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_SREGS: {
1464 1465 1466 1467
		kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
A
Avi Kivity 已提交
1468
		r = -EFAULT;
1469
		if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
1470
			goto out;
1471
		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
1472 1473 1474 1475 1476
		if (r)
			goto out;
		r = 0;
		break;
	}
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
	case KVM_GET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &mp_state, sizeof mp_state))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = -EFAULT;
		if (copy_from_user(&mp_state, argp, sizeof mp_state))
			goto out;
		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1501 1502 1503 1504
	case KVM_TRANSLATE: {
		struct kvm_translation tr;

		r = -EFAULT;
A
Al Viro 已提交
1505
		if (copy_from_user(&tr, argp, sizeof tr))
A
Avi Kivity 已提交
1506
			goto out;
1507
		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
A
Avi Kivity 已提交
1508 1509 1510
		if (r)
			goto out;
		r = -EFAULT;
A
Al Viro 已提交
1511
		if (copy_to_user(argp, &tr, sizeof tr))
A
Avi Kivity 已提交
1512 1513 1514 1515 1516 1517 1518 1519
			goto out;
		r = 0;
		break;
	}
	case KVM_DEBUG_GUEST: {
		struct kvm_debug_guest dbg;

		r = -EFAULT;
A
Al Viro 已提交
1520
		if (copy_from_user(&dbg, argp, sizeof dbg))
A
Avi Kivity 已提交
1521
			goto out;
1522
		r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
A
Avi Kivity 已提交
1523 1524 1525 1526 1527
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		sigset_t sigset, *p;

		p = NULL;
		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
					   sizeof kvm_sigmask))
				goto out;
			r = -EINVAL;
			if (kvm_sigmask.len != sizeof sigset)
				goto out;
			r = -EFAULT;
			if (copy_from_user(&sigset, sigmask_arg->sigset,
					   sizeof sigset))
				goto out;
			p = &sigset;
		}
		r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
		break;
	}
A
Avi Kivity 已提交
1551
	case KVM_GET_FPU: {
1552 1553 1554 1555 1556
		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
A
Avi Kivity 已提交
1557 1558 1559
		if (r)
			goto out;
		r = -EFAULT;
1560
		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
1561 1562 1563 1564 1565
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_FPU: {
1566 1567 1568 1569
		fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
A
Avi Kivity 已提交
1570
		r = -EFAULT;
1571
		if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
1572
			goto out;
1573
		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
A
Avi Kivity 已提交
1574 1575 1576 1577 1578
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1579
	default:
1580
		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
1581 1582
	}
out:
1583 1584
	kfree(fpu);
	kfree(kvm_sregs);
A
Avi Kivity 已提交
1585 1586 1587 1588 1589 1590 1591 1592
	return r;
}

static long kvm_vm_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
1593
	int r;
A
Avi Kivity 已提交
1594

1595 1596
	if (kvm->mm != current->mm)
		return -EIO;
A
Avi Kivity 已提交
1597 1598 1599 1600 1601 1602
	switch (ioctl) {
	case KVM_CREATE_VCPU:
		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
		if (r < 0)
			goto out;
		break;
1603 1604 1605 1606 1607 1608 1609 1610 1611
	case KVM_SET_USER_MEMORY_REGION: {
		struct kvm_userspace_memory_region kvm_userspace_mem;

		r = -EFAULT;
		if (copy_from_user(&kvm_userspace_mem, argp,
						sizeof kvm_userspace_mem))
			goto out;

		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
A
Avi Kivity 已提交
1612 1613 1614 1615 1616 1617 1618 1619
		if (r)
			goto out;
		break;
	}
	case KVM_GET_DIRTY_LOG: {
		struct kvm_dirty_log log;

		r = -EFAULT;
A
Al Viro 已提交
1620
		if (copy_from_user(&log, argp, sizeof log))
A
Avi Kivity 已提交
1621
			goto out;
1622
		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
A
Avi Kivity 已提交
1623 1624 1625 1626
		if (r)
			goto out;
		break;
	}
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	case KVM_REGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
		r = -EFAULT;
		if (copy_from_user(&zone, argp, sizeof zone))
			goto out;
		r = -ENXIO;
		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
		if (r)
			goto out;
		r = 0;
		break;
	}
	case KVM_UNREGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
		r = -EFAULT;
		if (copy_from_user(&zone, argp, sizeof zone))
			goto out;
		r = -ENXIO;
		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
		if (r)
			goto out;
		r = 0;
		break;
	}
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
#endif
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
	case KVM_ASSIGN_PCI_DEVICE: {
		struct kvm_assigned_pci_dev assigned_dev;

		r = -EFAULT;
		if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
			goto out;
		r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
		if (r)
			goto out;
		break;
	}
	case KVM_ASSIGN_IRQ: {
		struct kvm_assigned_irq assigned_irq;

		r = -EFAULT;
		if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
			goto out;
		r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
		if (r)
			goto out;
		break;
	}
1676
#endif
1677
	default:
1678
		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1679 1680 1681 1682 1683
	}
out:
	return r;
}

1684
static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1685
{
1686 1687 1688 1689
	struct page *page[1];
	unsigned long addr;
	int npages;
	gfn_t gfn = vmf->pgoff;
1690 1691
	struct kvm *kvm = vma->vm_file->private_data;

1692 1693
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
1694
		return VM_FAULT_SIGBUS;
1695 1696 1697 1698

	npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
				NULL);
	if (unlikely(npages != 1))
1699
		return VM_FAULT_SIGBUS;
1700 1701

	vmf->page = page[0];
1702
	return 0;
1703 1704 1705
}

static struct vm_operations_struct kvm_vm_vm_ops = {
1706
	.fault = kvm_vm_fault,
1707 1708 1709 1710 1711 1712 1713 1714
};

static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vm_vm_ops;
	return 0;
}

1715
static const struct file_operations kvm_vm_fops = {
1716 1717 1718 1719 1720 1721 1722 1723
	.release        = kvm_vm_release,
	.unlocked_ioctl = kvm_vm_ioctl,
	.compat_ioctl   = kvm_vm_ioctl,
	.mmap           = kvm_vm_mmap,
};

static int kvm_dev_ioctl_create_vm(void)
{
A
Al Viro 已提交
1724
	int fd;
1725 1726 1727
	struct kvm *kvm;

	kvm = kvm_create_vm();
1728 1729
	if (IS_ERR(kvm))
		return PTR_ERR(kvm);
1730
	fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
A
Al Viro 已提交
1731
	if (fd < 0)
A
Al Viro 已提交
1732
		kvm_put_kvm(kvm);
1733 1734 1735 1736 1737 1738 1739

	return fd;
}

static long kvm_dev_ioctl(struct file *filp,
			  unsigned int ioctl, unsigned long arg)
{
1740
	long r = -EINVAL;
1741 1742 1743

	switch (ioctl) {
	case KVM_GET_API_VERSION:
1744 1745 1746
		r = -EINVAL;
		if (arg)
			goto out;
1747 1748 1749
		r = KVM_API_VERSION;
		break;
	case KVM_CREATE_VM:
1750 1751 1752
		r = -EINVAL;
		if (arg)
			goto out;
1753 1754
		r = kvm_dev_ioctl_create_vm();
		break;
1755
	case KVM_CHECK_EXTENSION:
1756
		r = kvm_dev_ioctl_check_extension(arg);
1757
		break;
1758 1759 1760 1761
	case KVM_GET_VCPU_MMAP_SIZE:
		r = -EINVAL;
		if (arg)
			goto out;
1762 1763 1764
		r = PAGE_SIZE;     /* struct kvm_run */
#ifdef CONFIG_X86
		r += PAGE_SIZE;    /* pio data page */
1765 1766 1767
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
		r += PAGE_SIZE;    /* coalesced mmio ring page */
1768
#endif
1769
		break;
1770 1771 1772 1773 1774
	case KVM_TRACE_ENABLE:
	case KVM_TRACE_PAUSE:
	case KVM_TRACE_DISABLE:
		r = kvm_trace_ioctl(ioctl, arg);
		break;
A
Avi Kivity 已提交
1775
	default:
1776
		return kvm_arch_dev_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787
	}
out:
	return r;
}

static struct file_operations kvm_chardev_ops = {
	.unlocked_ioctl = kvm_dev_ioctl,
	.compat_ioctl   = kvm_dev_ioctl,
};

static struct miscdevice kvm_dev = {
A
Avi Kivity 已提交
1788
	KVM_MINOR,
A
Avi Kivity 已提交
1789 1790 1791 1792
	"kvm",
	&kvm_chardev_ops,
};

1793 1794 1795 1796 1797 1798 1799
static void hardware_enable(void *junk)
{
	int cpu = raw_smp_processor_id();

	if (cpu_isset(cpu, cpus_hardware_enabled))
		return;
	cpu_set(cpu, cpus_hardware_enabled);
1800
	kvm_arch_hardware_enable(NULL);
1801 1802 1803 1804 1805 1806 1807 1808 1809
}

static void hardware_disable(void *junk)
{
	int cpu = raw_smp_processor_id();

	if (!cpu_isset(cpu, cpus_hardware_enabled))
		return;
	cpu_clear(cpu, cpus_hardware_enabled);
1810
	kvm_arch_hardware_disable(NULL);
1811 1812
}

A
Avi Kivity 已提交
1813 1814 1815 1816 1817
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
			   void *v)
{
	int cpu = (long)v;

1818
	val &= ~CPU_TASKS_FROZEN;
A
Avi Kivity 已提交
1819
	switch (val) {
1820
	case CPU_DYING:
1821 1822 1823 1824
		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
		       cpu);
		hardware_disable(NULL);
		break;
A
Avi Kivity 已提交
1825
	case CPU_UP_CANCELED:
1826 1827
		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
		       cpu);
1828
		smp_call_function_single(cpu, hardware_disable, NULL, 1);
A
Avi Kivity 已提交
1829
		break;
1830 1831 1832
	case CPU_ONLINE:
		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
		       cpu);
1833
		smp_call_function_single(cpu, hardware_enable, NULL, 1);
A
Avi Kivity 已提交
1834 1835 1836 1837 1838
		break;
	}
	return NOTIFY_OK;
}

1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850

asmlinkage void kvm_handle_fault_on_reboot(void)
{
	if (kvm_rebooting)
		/* spin while reset goes on */
		while (true)
			;
	/* Fault while not rebooting.  We want the trace. */
	BUG();
}
EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);

1851
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
M
Mike Day 已提交
1852
		      void *v)
1853 1854 1855 1856 1857 1858 1859
{
	if (val == SYS_RESTART) {
		/*
		 * Some (well, at least mine) BIOSes hang on reboot if
		 * in vmx root mode.
		 */
		printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1860
		kvm_rebooting = true;
1861
		on_each_cpu(hardware_disable, NULL, 1);
1862 1863 1864 1865 1866 1867 1868 1869 1870
	}
	return NOTIFY_OK;
}

static struct notifier_block kvm_reboot_notifier = {
	.notifier_call = kvm_reboot,
	.priority = 0,
};

1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886
void kvm_io_bus_init(struct kvm_io_bus *bus)
{
	memset(bus, 0, sizeof(*bus));
}

void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
		struct kvm_io_device *pos = bus->devs[i];

		kvm_iodevice_destructor(pos);
	}
}

1887 1888
struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
					  gpa_t addr, int len, int is_write)
1889 1890 1891 1892 1893 1894
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
		struct kvm_io_device *pos = bus->devs[i];

1895
		if (pos->in_range(pos, addr, len, is_write))
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
			return pos;
	}

	return NULL;
}

void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
{
	BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));

	bus->devs[bus->dev_count++] = dev;
}

A
Avi Kivity 已提交
1909 1910 1911 1912 1913
static struct notifier_block kvm_cpu_notifier = {
	.notifier_call = kvm_cpu_hotplug,
	.priority = 20, /* must be > scheduler priority */
};

1914
static int vm_stat_get(void *_offset, u64 *val)
1915 1916 1917 1918
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;

1919
	*val = 0;
1920 1921
	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
1922
		*val += *(u32 *)((void *)kvm + offset);
1923
	spin_unlock(&kvm_lock);
1924
	return 0;
1925 1926 1927 1928
}

DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");

1929
static int vcpu_stat_get(void *_offset, u64 *val)
A
Avi Kivity 已提交
1930 1931 1932 1933 1934 1935
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;

1936
	*val = 0;
A
Avi Kivity 已提交
1937 1938 1939
	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
R
Rusty Russell 已提交
1940 1941
			vcpu = kvm->vcpus[i];
			if (vcpu)
1942
				*val += *(u32 *)((void *)vcpu + offset);
A
Avi Kivity 已提交
1943 1944
		}
	spin_unlock(&kvm_lock);
1945
	return 0;
A
Avi Kivity 已提交
1946 1947
}

1948 1949 1950 1951 1952 1953
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");

static struct file_operations *stat_fops[] = {
	[KVM_STAT_VCPU] = &vcpu_stat_fops,
	[KVM_STAT_VM]   = &vm_stat_fops,
};
A
Avi Kivity 已提交
1954

1955
static void kvm_init_debug(void)
A
Avi Kivity 已提交
1956 1957 1958
{
	struct kvm_stats_debugfs_item *p;

1959
	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
A
Avi Kivity 已提交
1960
	for (p = debugfs_entries; p->name; ++p)
1961
		p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
A
Avi Kivity 已提交
1962
						(void *)(long)p->offset,
1963
						stat_fops[p->kind]);
A
Avi Kivity 已提交
1964 1965 1966 1967 1968 1969 1970 1971
}

static void kvm_exit_debug(void)
{
	struct kvm_stats_debugfs_item *p;

	for (p = debugfs_entries; p->name; ++p)
		debugfs_remove(p->dentry);
1972
	debugfs_remove(kvm_debugfs_dir);
A
Avi Kivity 已提交
1973 1974
}

1975 1976
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
A
Avi Kivity 已提交
1977
	hardware_disable(NULL);
1978 1979 1980 1981 1982
	return 0;
}

static int kvm_resume(struct sys_device *dev)
{
A
Avi Kivity 已提交
1983
	hardware_enable(NULL);
1984 1985 1986 1987
	return 0;
}

static struct sysdev_class kvm_sysdev_class = {
1988
	.name = "kvm",
1989 1990 1991 1992 1993 1994 1995 1996 1997
	.suspend = kvm_suspend,
	.resume = kvm_resume,
};

static struct sys_device kvm_sysdev = {
	.id = 0,
	.cls = &kvm_sysdev_class,
};

1998
struct page *bad_page;
1999
pfn_t bad_pfn;
A
Avi Kivity 已提交
2000

2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
	return container_of(pn, struct kvm_vcpu, preempt_notifier);
}

static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

2011
	kvm_arch_vcpu_load(vcpu, cpu);
2012 2013 2014 2015 2016 2017 2018
}

static void kvm_sched_out(struct preempt_notifier *pn,
			  struct task_struct *next)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

2019
	kvm_arch_vcpu_put(vcpu);
2020 2021
}

2022
int kvm_init(void *opaque, unsigned int vcpu_size,
2023
		  struct module *module)
A
Avi Kivity 已提交
2024 2025
{
	int r;
Y
Yang, Sheng 已提交
2026
	int cpu;
A
Avi Kivity 已提交
2027

2028 2029
	kvm_init_debug();

2030 2031
	r = kvm_arch_init(opaque);
	if (r)
2032
		goto out_fail;
2033 2034 2035 2036 2037 2038 2039 2040

	bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);

	if (bad_page == NULL) {
		r = -ENOMEM;
		goto out;
	}

2041 2042
	bad_pfn = page_to_pfn(bad_page);

2043
	r = kvm_arch_hardware_setup();
A
Avi Kivity 已提交
2044
	if (r < 0)
2045
		goto out_free_0;
A
Avi Kivity 已提交
2046

Y
Yang, Sheng 已提交
2047 2048
	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu,
2049
				kvm_arch_check_processor_compat,
2050
				&r, 1);
Y
Yang, Sheng 已提交
2051
		if (r < 0)
2052
			goto out_free_1;
Y
Yang, Sheng 已提交
2053 2054
	}

2055
	on_each_cpu(hardware_enable, NULL, 1);
A
Avi Kivity 已提交
2056 2057
	r = register_cpu_notifier(&kvm_cpu_notifier);
	if (r)
2058
		goto out_free_2;
A
Avi Kivity 已提交
2059 2060
	register_reboot_notifier(&kvm_reboot_notifier);

2061 2062
	r = sysdev_class_register(&kvm_sysdev_class);
	if (r)
2063
		goto out_free_3;
2064 2065 2066

	r = sysdev_register(&kvm_sysdev);
	if (r)
2067
		goto out_free_4;
2068

2069 2070
	/* A kmem cache lets us meet the alignment requirements of fx_save. */
	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
J
Joe Perches 已提交
2071 2072
					   __alignof__(struct kvm_vcpu),
					   0, NULL);
2073 2074
	if (!kvm_vcpu_cache) {
		r = -ENOMEM;
2075
		goto out_free_5;
2076 2077
	}

A
Avi Kivity 已提交
2078 2079 2080 2081
	kvm_chardev_ops.owner = module;

	r = misc_register(&kvm_dev);
	if (r) {
M
Mike Day 已提交
2082
		printk(KERN_ERR "kvm: misc device register failed\n");
A
Avi Kivity 已提交
2083 2084 2085
		goto out_free;
	}

2086 2087 2088
	kvm_preempt_ops.sched_in = kvm_sched_in;
	kvm_preempt_ops.sched_out = kvm_sched_out;

2089
	return 0;
A
Avi Kivity 已提交
2090 2091

out_free:
2092
	kmem_cache_destroy(kvm_vcpu_cache);
2093
out_free_5:
2094
	sysdev_unregister(&kvm_sysdev);
2095
out_free_4:
2096
	sysdev_class_unregister(&kvm_sysdev_class);
2097
out_free_3:
A
Avi Kivity 已提交
2098
	unregister_reboot_notifier(&kvm_reboot_notifier);
A
Avi Kivity 已提交
2099
	unregister_cpu_notifier(&kvm_cpu_notifier);
2100
out_free_2:
2101
	on_each_cpu(hardware_disable, NULL, 1);
2102
out_free_1:
2103
	kvm_arch_hardware_unsetup();
2104 2105
out_free_0:
	__free_page(bad_page);
2106
out:
2107
	kvm_arch_exit();
2108
	kvm_exit_debug();
2109
out_fail:
A
Avi Kivity 已提交
2110 2111
	return r;
}
2112
EXPORT_SYMBOL_GPL(kvm_init);
A
Avi Kivity 已提交
2113

2114
void kvm_exit(void)
A
Avi Kivity 已提交
2115
{
2116
	kvm_trace_cleanup();
A
Avi Kivity 已提交
2117
	misc_deregister(&kvm_dev);
2118
	kmem_cache_destroy(kvm_vcpu_cache);
2119 2120
	sysdev_unregister(&kvm_sysdev);
	sysdev_class_unregister(&kvm_sysdev_class);
A
Avi Kivity 已提交
2121
	unregister_reboot_notifier(&kvm_reboot_notifier);
2122
	unregister_cpu_notifier(&kvm_cpu_notifier);
2123
	on_each_cpu(hardware_disable, NULL, 1);
2124
	kvm_arch_hardware_unsetup();
2125
	kvm_arch_exit();
A
Avi Kivity 已提交
2126
	kvm_exit_debug();
2127
	__free_page(bad_page);
A
Avi Kivity 已提交
2128
}
2129
EXPORT_SYMBOL_GPL(kvm_exit);