kvm_main.c 54.1 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

18
#include "iodev.h"
A
Avi Kivity 已提交
19

20
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
21 22 23 24 25 26 27 28 29 30 31 32
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
33
#include <linux/sysdev.h>
A
Avi Kivity 已提交
34
#include <linux/cpu.h>
A
Alexey Dobriyan 已提交
35
#include <linux/sched.h>
36 37
#include <linux/cpumask.h>
#include <linux/smp.h>
38
#include <linux/anon_inodes.h>
39
#include <linux/profile.h>
40
#include <linux/kvm_para.h>
41
#include <linux/pagemap.h>
42
#include <linux/mman.h>
43
#include <linux/swap.h>
A
Avi Kivity 已提交
44

A
Avi Kivity 已提交
45 46 47
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
48
#include <asm/pgtable.h>
A
Avi Kivity 已提交
49

50 51 52 53
#ifdef CONFIG_X86
#include <asm/msidef.h>
#endif

54 55 56 57
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
#endif

58 59 60 61 62 63
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
#include <linux/pci.h>
#include <linux/interrupt.h>
#include "irq.h"
#endif

A
Avi Kivity 已提交
64 65 66
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

S
Sheng Yang 已提交
67 68 69
static int msi2intx = 1;
module_param(msi2intx, bool, 0);

70 71
DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
72

73
static cpumask_var_t cpus_hardware_enabled;
74

75 76
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
A
Avi Kivity 已提交
77

78 79
static __read_mostly struct preempt_ops kvm_preempt_ops;

80
struct dentry *kvm_debugfs_dir;
A
Avi Kivity 已提交
81

A
Avi Kivity 已提交
82 83 84
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);

H
Hannes Eder 已提交
85
static bool kvm_rebooting;
86

87
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138

#ifdef CONFIG_X86
static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
{
	int vcpu_id;
	struct kvm_vcpu *vcpu;
	struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
	int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK)
			>> MSI_ADDR_DEST_ID_SHIFT;
	int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK)
			>> MSI_DATA_VECTOR_SHIFT;
	int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
				(unsigned long *)&dev->guest_msi.address_lo);
	int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
				(unsigned long *)&dev->guest_msi.data);
	int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
				(unsigned long *)&dev->guest_msi.data);
	u32 deliver_bitmask;

	BUG_ON(!ioapic);

	deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
				dest_id, dest_mode);
	/* IOAPIC delivery mode value is the same as MSI here */
	switch (delivery_mode) {
	case IOAPIC_LOWEST_PRIORITY:
		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
				deliver_bitmask);
		if (vcpu != NULL)
			kvm_apic_set_irq(vcpu, vector, trig_mode);
		else
			printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
		break;
	case IOAPIC_FIXED:
		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
			if (!(deliver_bitmask & (1 << vcpu_id)))
				continue;
			deliver_bitmask &= ~(1 << vcpu_id);
			vcpu = ioapic->kvm->vcpus[vcpu_id];
			if (vcpu)
				kvm_apic_set_irq(vcpu, vector, trig_mode);
		}
		break;
	default:
		printk(KERN_INFO "kvm: unsupported MSI delivery mode\n");
	}
}
#else
static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {}
#endif

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
						      int assigned_dev_id)
{
	struct list_head *ptr;
	struct kvm_assigned_dev_kernel *match;

	list_for_each(ptr, head) {
		match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
		if (match->assigned_dev_id == assigned_dev_id)
			return match;
	}
	return NULL;
}

static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
{
	struct kvm_assigned_dev_kernel *assigned_dev;

	assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
				    interrupt_work);

	/* This is taken to safely inject irq inside the guest. When
	 * the interrupt injection (or the ioapic code) uses a
	 * finer-grained lock, update this
	 */
	mutex_lock(&assigned_dev->kvm->lock);
S
Sheng Yang 已提交
165 166 167 168 169 170 171 172
	if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX)
		kvm_set_irq(assigned_dev->kvm,
			    assigned_dev->irq_source_id,
			    assigned_dev->guest_irq, 1);
	else if (assigned_dev->irq_requested_type &
				KVM_ASSIGNED_DEV_GUEST_MSI) {
		assigned_device_msi_dispatch(assigned_dev);
		enable_irq(assigned_dev->host_irq);
173
		assigned_dev->host_irq_disabled = false;
S
Sheng Yang 已提交
174
	}
175 176 177 178 179 180 181 182 183
	mutex_unlock(&assigned_dev->kvm->lock);
}

static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
{
	struct kvm_assigned_dev_kernel *assigned_dev =
		(struct kvm_assigned_dev_kernel *) dev_id;

	schedule_work(&assigned_dev->interrupt_work);
184

185
	disable_irq_nosync(irq);
186 187
	assigned_dev->host_irq_disabled = true;

188 189 190 191 192 193 194 195 196 197 198 199 200
	return IRQ_HANDLED;
}

/* Ack the irq line for an assigned device */
static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
{
	struct kvm_assigned_dev_kernel *dev;

	if (kian->gsi == -1)
		return;

	dev = container_of(kian, struct kvm_assigned_dev_kernel,
			   ack_notifier);
201

202
	kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
203 204 205 206 207 208 209 210

	/* The guest irq may be shared so this ack may be
	 * from another device.
	 */
	if (dev->host_irq_disabled) {
		enable_irq(dev->host_irq);
		dev->host_irq_disabled = false;
	}
211 212
}

213
/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
214 215
static void kvm_free_assigned_irq(struct kvm *kvm,
				  struct kvm_assigned_dev_kernel *assigned_dev)
216
{
217 218
	if (!irqchip_in_kernel(kvm))
		return;
219

220
	kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier);
221 222 223 224

	if (assigned_dev->irq_source_id != -1)
		kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
	assigned_dev->irq_source_id = -1;
225

226 227 228
	if (!assigned_dev->irq_requested_type)
		return;

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
	/*
	 * In kvm_free_device_irq, cancel_work_sync return true if:
	 * 1. work is scheduled, and then cancelled.
	 * 2. work callback is executed.
	 *
	 * The first one ensured that the irq is disabled and no more events
	 * would happen. But for the second one, the irq may be enabled (e.g.
	 * for MSI). So we disable irq here to prevent further events.
	 *
	 * Notice this maybe result in nested disable if the interrupt type is
	 * INTx, but it's OK for we are going to free it.
	 *
	 * If this function is a part of VM destroy, please ensure that till
	 * now, the kvm state is still legal for probably we also have to wait
	 * interrupt_work done.
	 */
	disable_irq_nosync(assigned_dev->host_irq);
	cancel_work_sync(&assigned_dev->interrupt_work);
247

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
	free_irq(assigned_dev->host_irq, (void *)assigned_dev);

	if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
		pci_disable_msi(assigned_dev->dev);

	assigned_dev->irq_requested_type = 0;
}


static void kvm_free_assigned_device(struct kvm *kvm,
				     struct kvm_assigned_dev_kernel
				     *assigned_dev)
{
	kvm_free_assigned_irq(kvm, assigned_dev);

263 264
	pci_reset_function(assigned_dev->dev);

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	pci_release_regions(assigned_dev->dev);
	pci_disable_device(assigned_dev->dev);
	pci_dev_put(assigned_dev->dev);

	list_del(&assigned_dev->list);
	kfree(assigned_dev);
}

void kvm_free_all_assigned_devices(struct kvm *kvm)
{
	struct list_head *ptr, *ptr2;
	struct kvm_assigned_dev_kernel *assigned_dev;

	list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
		assigned_dev = list_entry(ptr,
					  struct kvm_assigned_dev_kernel,
					  list);

		kvm_free_assigned_device(kvm, assigned_dev);
	}
}

287 288 289 290
static int assigned_device_update_intx(struct kvm *kvm,
			struct kvm_assigned_dev_kernel *adev,
			struct kvm_assigned_irq *airq)
{
291 292 293 294
	adev->guest_irq = airq->guest_irq;
	adev->ack_notifier.gsi = airq->guest_irq;

	if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX)
295 296 297
		return 0;

	if (irqchip_in_kernel(kvm)) {
S
Sheng Yang 已提交
298
		if (!msi2intx &&
S
Sheng Yang 已提交
299 300
		    (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)) {
			free_irq(adev->host_irq, (void *)adev);
S
Sheng Yang 已提交
301 302 303
			pci_disable_msi(adev->dev);
		}

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
		if (!capable(CAP_SYS_RAWIO))
			return -EPERM;

		if (airq->host_irq)
			adev->host_irq = airq->host_irq;
		else
			adev->host_irq = adev->dev->irq;

		/* Even though this is PCI, we don't want to use shared
		 * interrupts. Sharing host devices with guest-assigned devices
		 * on the same interrupt line is not a happy situation: there
		 * are going to be long delays in accepting, acking, etc.
		 */
		if (request_irq(adev->host_irq, kvm_assigned_dev_intr,
				0, "kvm_assigned_intx_device", (void *)adev))
			return -EIO;
	}

322 323
	adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX |
				   KVM_ASSIGNED_DEV_HOST_INTX;
324 325 326
	return 0;
}

S
Sheng Yang 已提交
327 328 329 330 331 332 333
#ifdef CONFIG_X86
static int assigned_device_update_msi(struct kvm *kvm,
			struct kvm_assigned_dev_kernel *adev,
			struct kvm_assigned_irq *airq)
{
	int r;

S
Sheng Yang 已提交
334 335 336 337 338 339 340 341 342 343 344 345
	if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
		/* x86 don't care upper address of guest msi message addr */
		adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
		adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
		adev->guest_msi.address_lo = airq->guest_msi.addr_lo;
		adev->guest_msi.data = airq->guest_msi.data;
		adev->ack_notifier.gsi = -1;
	} else if (msi2intx) {
		adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
		adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
		adev->guest_irq = airq->guest_irq;
		adev->ack_notifier.gsi = airq->guest_irq;
346 347 348 349 350 351 352 353
	} else {
		/*
		 * Guest require to disable device MSI, we disable MSI and
		 * re-enable INTx by default again. Notice it's only for
		 * non-msi2intx.
		 */
		assigned_device_update_intx(kvm, adev, airq);
		return 0;
S
Sheng Yang 已提交
354
	}
S
Sheng Yang 已提交
355 356 357 358 359

	if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
		return 0;

	if (irqchip_in_kernel(kvm)) {
S
Sheng Yang 已提交
360 361 362 363 364 365 366 367 368
		if (!msi2intx) {
			if (adev->irq_requested_type &
					KVM_ASSIGNED_DEV_HOST_INTX)
				free_irq(adev->host_irq, (void *)adev);

			r = pci_enable_msi(adev->dev);
			if (r)
				return r;
		}
S
Sheng Yang 已提交
369 370 371 372 373 374 375

		adev->host_irq = adev->dev->irq;
		if (request_irq(adev->host_irq, kvm_assigned_dev_intr, 0,
				"kvm_assigned_msi_device", (void *)adev))
			return -EIO;
	}

S
Sheng Yang 已提交
376 377 378 379
	if (!msi2intx)
		adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_MSI;

	adev->irq_requested_type |= KVM_ASSIGNED_DEV_HOST_MSI;
S
Sheng Yang 已提交
380 381 382 383
	return 0;
}
#endif

384 385 386 387 388 389
static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
				   struct kvm_assigned_irq
				   *assigned_irq)
{
	int r = 0;
	struct kvm_assigned_dev_kernel *match;
390
	u32 current_flags = 0, changed_flags;
391 392 393 394 395 396 397 398 399 400

	mutex_lock(&kvm->lock);

	match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
				      assigned_irq->assigned_dev_id);
	if (!match) {
		mutex_unlock(&kvm->lock);
		return -EINVAL;
	}

401
	if (!match->irq_requested_type) {
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
		INIT_WORK(&match->interrupt_work,
				kvm_assigned_dev_interrupt_work_handler);
		if (irqchip_in_kernel(kvm)) {
			/* Register ack nofitier */
			match->ack_notifier.gsi = -1;
			match->ack_notifier.irq_acked =
					kvm_assigned_dev_ack_irq;
			kvm_register_irq_ack_notifier(kvm,
					&match->ack_notifier);

			/* Request IRQ source ID */
			r = kvm_request_irq_source_id(kvm);
			if (r < 0)
				goto out_release;
			else
				match->irq_source_id = r;
S
Sheng Yang 已提交
418 419 420 421 422 423 424

#ifdef CONFIG_X86
			/* Determine host device irq type, we can know the
			 * result from dev->msi_enabled */
			if (msi2intx)
				pci_enable_msi(match->dev);
#endif
425
		}
426 427
	}

428 429 430 431 432 433 434
	if ((match->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) &&
		 (match->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_MSI))
		current_flags |= KVM_DEV_IRQ_ASSIGN_ENABLE_MSI;

	changed_flags = assigned_irq->flags ^ current_flags;

	if ((changed_flags & KVM_DEV_IRQ_ASSIGN_MSI_ACTION) ||
S
Sheng Yang 已提交
435
	    (msi2intx && match->dev->msi_enabled)) {
S
Sheng Yang 已提交
436 437 438 439 440 441 442 443 444 445 446 447
#ifdef CONFIG_X86
		r = assigned_device_update_msi(kvm, match, assigned_irq);
		if (r) {
			printk(KERN_WARNING "kvm: failed to enable "
					"MSI device!\n");
			goto out_release;
		}
#else
		r = -ENOTTY;
#endif
	} else if (assigned_irq->host_irq == 0 && match->dev->irq == 0) {
		/* Host device IRQ 0 means don't support INTx */
S
Sheng Yang 已提交
448 449 450 451 452 453 454 455 456 457
		if (!msi2intx) {
			printk(KERN_WARNING
			       "kvm: wait device to enable MSI!\n");
			r = 0;
		} else {
			printk(KERN_WARNING
			       "kvm: failed to enable MSI device!\n");
			r = -ENOTTY;
			goto out_release;
		}
S
Sheng Yang 已提交
458 459 460 461 462 463 464 465 466
	} else {
		/* Non-sharing INTx mode */
		r = assigned_device_update_intx(kvm, match, assigned_irq);
		if (r) {
			printk(KERN_WARNING "kvm: failed to enable "
					"INTx device!\n");
			goto out_release;
		}
	}
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482

	mutex_unlock(&kvm->lock);
	return r;
out_release:
	mutex_unlock(&kvm->lock);
	kvm_free_assigned_device(kvm, match);
	return r;
}

static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
				      struct kvm_assigned_pci_dev *assigned_dev)
{
	int r = 0;
	struct kvm_assigned_dev_kernel *match;
	struct pci_dev *dev;

483
	down_read(&kvm->slots_lock);
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	mutex_lock(&kvm->lock);

	match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
				      assigned_dev->assigned_dev_id);
	if (match) {
		/* device already assigned */
		r = -EINVAL;
		goto out;
	}

	match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
	if (match == NULL) {
		printk(KERN_INFO "%s: Couldn't allocate memory\n",
		       __func__);
		r = -ENOMEM;
		goto out;
	}
	dev = pci_get_bus_and_slot(assigned_dev->busnr,
				   assigned_dev->devfn);
	if (!dev) {
		printk(KERN_INFO "%s: host device not found\n", __func__);
		r = -EINVAL;
		goto out_free;
	}
	if (pci_enable_device(dev)) {
		printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
		r = -EBUSY;
		goto out_put;
	}
	r = pci_request_regions(dev, "kvm_assigned_device");
	if (r) {
		printk(KERN_INFO "%s: Could not get access to device regions\n",
		       __func__);
		goto out_disable;
	}
519 520 521

	pci_reset_function(dev);

522 523 524
	match->assigned_dev_id = assigned_dev->assigned_dev_id;
	match->host_busnr = assigned_dev->busnr;
	match->host_devfn = assigned_dev->devfn;
525
	match->flags = assigned_dev->flags;
526
	match->dev = dev;
527
	match->irq_source_id = -1;
528 529 530 531 532
	match->kvm = kvm;

	list_add(&match->list, &kvm->arch.assigned_dev_head);

	if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
J
Joerg Roedel 已提交
533
		if (!kvm->arch.iommu_domain) {
W
Weidong Han 已提交
534 535 536 537 538
			r = kvm_iommu_map_guest(kvm);
			if (r)
				goto out_list_del;
		}
		r = kvm_assign_device(kvm, match);
539 540 541 542 543 544
		if (r)
			goto out_list_del;
	}

out:
	mutex_unlock(&kvm->lock);
545
	up_read(&kvm->slots_lock);
546 547 548 549 550 551 552 553 554 555 556
	return r;
out_list_del:
	list_del(&match->list);
	pci_release_regions(dev);
out_disable:
	pci_disable_device(dev);
out_put:
	pci_dev_put(dev);
out_free:
	kfree(match);
	mutex_unlock(&kvm->lock);
557
	up_read(&kvm->slots_lock);
558 559 560 561
	return r;
}
#endif

W
Weidong Han 已提交
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
		struct kvm_assigned_pci_dev *assigned_dev)
{
	int r = 0;
	struct kvm_assigned_dev_kernel *match;

	mutex_lock(&kvm->lock);

	match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
				      assigned_dev->assigned_dev_id);
	if (!match) {
		printk(KERN_INFO "%s: device hasn't been assigned before, "
		  "so cannot be deassigned\n", __func__);
		r = -EINVAL;
		goto out;
	}

	if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
		kvm_deassign_device(kvm, match);

	kvm_free_assigned_device(kvm, match);

out:
	mutex_unlock(&kvm->lock);
	return r;
}
#endif

591 592 593 594 595
static inline int valid_vcpu(int n)
{
	return likely(n >= 0 && n < KVM_MAX_VCPUS);
}

596
inline int kvm_is_mmio_pfn(pfn_t pfn)
B
Ben-Ami Yassour 已提交
597 598 599 600 601 602 603
{
	if (pfn_valid(pfn))
		return PageReserved(pfn_to_page(pfn));

	return true;
}

A
Avi Kivity 已提交
604 605 606
/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
607
void vcpu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
608
{
609 610
	int cpu;

A
Avi Kivity 已提交
611
	mutex_lock(&vcpu->mutex);
612 613
	cpu = get_cpu();
	preempt_notifier_register(&vcpu->preempt_notifier);
614
	kvm_arch_vcpu_load(vcpu, cpu);
615
	put_cpu();
A
Avi Kivity 已提交
616 617
}

618
void vcpu_put(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
619
{
620
	preempt_disable();
621
	kvm_arch_vcpu_put(vcpu);
622 623
	preempt_notifier_unregister(&vcpu->preempt_notifier);
	preempt_enable();
A
Avi Kivity 已提交
624 625 626
	mutex_unlock(&vcpu->mutex);
}

627 628 629 630
static void ack_flush(void *_completed)
{
}

631
static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
632
{
633
	int i, cpu, me;
634 635
	cpumask_var_t cpus;
	bool called = true;
636 637
	struct kvm_vcpu *vcpu;

638 639 640
	if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
		cpumask_clear(cpus);

641
	me = get_cpu();
R
Rusty Russell 已提交
642 643 644 645
	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
		vcpu = kvm->vcpus[i];
		if (!vcpu)
			continue;
646
		if (test_and_set_bit(req, &vcpu->requests))
647 648
			continue;
		cpu = vcpu->cpu;
649 650
		if (cpus != NULL && cpu != -1 && cpu != me)
			cpumask_set_cpu(cpu, cpus);
651
	}
652 653 654 655 656 657
	if (unlikely(cpus == NULL))
		smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
	else if (!cpumask_empty(cpus))
		smp_call_function_many(cpus, ack_flush, NULL, 1);
	else
		called = false;
658
	put_cpu();
659
	free_cpumask_var(cpus);
660
	return called;
661 662
}

663
void kvm_flush_remote_tlbs(struct kvm *kvm)
664
{
665 666
	if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
		++kvm->stat.remote_tlb_flush;
667 668
}

669 670 671 672
void kvm_reload_remote_mmus(struct kvm *kvm)
{
	make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
}
673

R
Rusty Russell 已提交
674 675 676 677 678 679 680 681 682
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
	struct page *page;
	int r;

	mutex_init(&vcpu->mutex);
	vcpu->cpu = -1;
	vcpu->kvm = kvm;
	vcpu->vcpu_id = id;
E
Eddie Dong 已提交
683
	init_waitqueue_head(&vcpu->wq);
R
Rusty Russell 已提交
684 685 686 687 688 689 690 691

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
	vcpu->run = page_address(page);

692
	r = kvm_arch_vcpu_init(vcpu);
R
Rusty Russell 已提交
693
	if (r < 0)
694
		goto fail_free_run;
R
Rusty Russell 已提交
695 696 697 698 699
	return 0;

fail_free_run:
	free_page((unsigned long)vcpu->run);
fail:
700
	return r;
R
Rusty Russell 已提交
701 702 703 704 705
}
EXPORT_SYMBOL_GPL(kvm_vcpu_init);

void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
706
	kvm_arch_vcpu_uninit(vcpu);
R
Rusty Russell 已提交
707 708 709 710
	free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
	return container_of(mn, struct kvm, mmu_notifier);
}

static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int need_tlb_flush;

	/*
	 * When ->invalidate_page runs, the linux pte has been zapped
	 * already but the page is still allocated until
	 * ->invalidate_page returns. So if we increase the sequence
	 * here the kvm page fault will notice if the spte can't be
	 * established because the page is going to be freed. If
	 * instead the kvm page fault establishes the spte before
	 * ->invalidate_page runs, kvm_unmap_hva will release it
	 * before returning.
	 *
	 * The sequence increase only need to be seen at spin_unlock
	 * time, and not at spin_lock time.
	 *
	 * Increasing the sequence after the spin_unlock would be
	 * unsafe because the kvm page fault could then establish the
	 * pte after kvm_unmap_hva returned, without noticing the page
	 * is going to be freed.
	 */
	spin_lock(&kvm->mmu_lock);
	kvm->mmu_notifier_seq++;
	need_tlb_flush = kvm_unmap_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);

	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);

}

static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
						    struct mm_struct *mm,
						    unsigned long start,
						    unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int need_tlb_flush = 0;

	spin_lock(&kvm->mmu_lock);
	/*
	 * The count increase must become visible at unlock time as no
	 * spte can be established without taking the mmu_lock and
	 * count is also read inside the mmu_lock critical section.
	 */
	kvm->mmu_notifier_count++;
	for (; start < end; start += PAGE_SIZE)
		need_tlb_flush |= kvm_unmap_hva(kvm, start);
	spin_unlock(&kvm->mmu_lock);

	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);
}

static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
						  struct mm_struct *mm,
						  unsigned long start,
						  unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);

	spin_lock(&kvm->mmu_lock);
	/*
	 * This sequence increase will notify the kvm page fault that
	 * the page that is going to be mapped in the spte could have
	 * been freed.
	 */
	kvm->mmu_notifier_seq++;
	/*
	 * The above sequence increase must be visible before the
	 * below count decrease but both values are read by the kvm
	 * page fault under mmu_lock spinlock so we don't need to add
	 * a smb_wmb() here in between the two.
	 */
	kvm->mmu_notifier_count--;
	spin_unlock(&kvm->mmu_lock);

	BUG_ON(kvm->mmu_notifier_count < 0);
}

static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
					      struct mm_struct *mm,
					      unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int young;

	spin_lock(&kvm->mmu_lock);
	young = kvm_age_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);

	if (young)
		kvm_flush_remote_tlbs(kvm);

	return young;
}

820 821 822 823 824 825 826
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
				     struct mm_struct *mm)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	kvm_arch_flush_shadow(kvm);
}

827 828 829 830 831
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
	.invalidate_page	= kvm_mmu_notifier_invalidate_page,
	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
832
	.release		= kvm_mmu_notifier_release,
833 834 835
};
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

836
static struct kvm *kvm_create_vm(void)
A
Avi Kivity 已提交
837
{
838
	struct kvm *kvm = kvm_arch_create_vm();
839 840 841
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	struct page *page;
#endif
A
Avi Kivity 已提交
842

843 844
	if (IS_ERR(kvm))
		goto out;
A
Avi Kivity 已提交
845

846 847 848 849 850 851 852 853 854 855
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		kfree(kvm);
		return ERR_PTR(-ENOMEM);
	}
	kvm->coalesced_mmio_ring =
			(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif

856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	{
		int err;
		kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
		err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
		if (err) {
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
			put_page(page);
#endif
			kfree(kvm);
			return ERR_PTR(err);
		}
	}
#endif

871 872
	kvm->mm = current->mm;
	atomic_inc(&kvm->mm->mm_count);
873
	spin_lock_init(&kvm->mmu_lock);
874
	kvm_io_bus_init(&kvm->pio_bus);
S
Shaohua Li 已提交
875
	mutex_init(&kvm->lock);
876
	kvm_io_bus_init(&kvm->mmio_bus);
877
	init_rwsem(&kvm->slots_lock);
I
Izik Eidus 已提交
878
	atomic_set(&kvm->users_count, 1);
879 880 881
	spin_lock(&kvm_lock);
	list_add(&kvm->vm_list, &vm_list);
	spin_unlock(&kvm_lock);
882 883 884
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	kvm_coalesced_mmio_init(kvm);
#endif
885
out:
886 887 888
	return kvm;
}

A
Avi Kivity 已提交
889 890 891 892 893 894
/*
 * Free any memory in @free but not in @dont.
 */
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
				  struct kvm_memory_slot *dont)
{
895 896
	if (!dont || free->rmap != dont->rmap)
		vfree(free->rmap);
A
Avi Kivity 已提交
897 898 899 900

	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
		vfree(free->dirty_bitmap);

M
Marcelo Tosatti 已提交
901 902 903
	if (!dont || free->lpage_info != dont->lpage_info)
		vfree(free->lpage_info);

A
Avi Kivity 已提交
904
	free->npages = 0;
A
Al Viro 已提交
905
	free->dirty_bitmap = NULL;
906
	free->rmap = NULL;
M
Marcelo Tosatti 已提交
907
	free->lpage_info = NULL;
A
Avi Kivity 已提交
908 909
}

910
void kvm_free_physmem(struct kvm *kvm)
A
Avi Kivity 已提交
911 912 913 914
{
	int i;

	for (i = 0; i < kvm->nmemslots; ++i)
A
Al Viro 已提交
915
		kvm_free_physmem_slot(&kvm->memslots[i], NULL);
A
Avi Kivity 已提交
916 917
}

918 919
static void kvm_destroy_vm(struct kvm *kvm)
{
920 921
	struct mm_struct *mm = kvm->mm;

922
	kvm_arch_sync_events(kvm);
923 924 925
	spin_lock(&kvm_lock);
	list_del(&kvm->vm_list);
	spin_unlock(&kvm_lock);
926
	kvm_io_bus_destroy(&kvm->pio_bus);
927
	kvm_io_bus_destroy(&kvm->mmio_bus);
928 929 930
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	if (kvm->coalesced_mmio_ring != NULL)
		free_page((unsigned long)kvm->coalesced_mmio_ring);
931 932 933
#endif
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
934
#endif
935
	kvm_arch_destroy_vm(kvm);
936
	mmdrop(mm);
937 938
}

I
Izik Eidus 已提交
939 940 941 942 943 944 945 946 947 948 949 950 951 952
void kvm_get_kvm(struct kvm *kvm)
{
	atomic_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);

void kvm_put_kvm(struct kvm *kvm)
{
	if (atomic_dec_and_test(&kvm->users_count))
		kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);


953 954 955 956
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
	struct kvm *kvm = filp->private_data;

I
Izik Eidus 已提交
957
	kvm_put_kvm(kvm);
A
Avi Kivity 已提交
958 959 960 961 962 963 964 965
	return 0;
}

/*
 * Allocate some memory and give it an address in the guest physical address
 * space.
 *
 * Discontiguous memory is allowed, mostly for framebuffers.
966
 *
967
 * Must be called holding mmap_sem for write.
A
Avi Kivity 已提交
968
 */
969 970 971
int __kvm_set_memory_region(struct kvm *kvm,
			    struct kvm_userspace_memory_region *mem,
			    int user_alloc)
A
Avi Kivity 已提交
972 973 974 975 976 977 978 979 980 981 982 983 984 985
{
	int r;
	gfn_t base_gfn;
	unsigned long npages;
	unsigned long i;
	struct kvm_memory_slot *memslot;
	struct kvm_memory_slot old, new;

	r = -EINVAL;
	/* General sanity checks */
	if (mem->memory_size & (PAGE_SIZE - 1))
		goto out;
	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
		goto out;
S
Sheng Yang 已提交
986
	if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
987
		goto out;
988
	if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
A
Avi Kivity 已提交
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
		goto out;
	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
		goto out;

	memslot = &kvm->memslots[mem->slot];
	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
	npages = mem->memory_size >> PAGE_SHIFT;

	if (!npages)
		mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;

	new = old = *memslot;

	new.base_gfn = base_gfn;
	new.npages = npages;
	new.flags = mem->flags;

	/* Disallow changing a memory slot's size. */
	r = -EINVAL;
	if (npages && old.npages && npages != old.npages)
1009
		goto out_free;
A
Avi Kivity 已提交
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019

	/* Check for overlaps */
	r = -EEXIST;
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *s = &kvm->memslots[i];

		if (s == memslot)
			continue;
		if (!((base_gfn + npages <= s->base_gfn) ||
		      (base_gfn >= s->base_gfn + s->npages)))
1020
			goto out_free;
A
Avi Kivity 已提交
1021 1022 1023 1024
	}

	/* Free page dirty bitmap if unneeded */
	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
A
Al Viro 已提交
1025
		new.dirty_bitmap = NULL;
A
Avi Kivity 已提交
1026 1027 1028 1029

	r = -ENOMEM;

	/* Allocate if a slot is being created */
1030
#ifndef CONFIG_S390
1031
	if (npages && !new.rmap) {
M
Mike Day 已提交
1032
		new.rmap = vmalloc(npages * sizeof(struct page *));
1033 1034

		if (!new.rmap)
1035
			goto out_free;
1036 1037

		memset(new.rmap, 0, npages * sizeof(*new.rmap));
1038

1039
		new.user_alloc = user_alloc;
1040 1041 1042 1043 1044 1045 1046 1047 1048
		/*
		 * hva_to_rmmap() serialzies with the mmu_lock and to be
		 * safe it has to ignore memslots with !user_alloc &&
		 * !userspace_addr.
		 */
		if (user_alloc)
			new.userspace_addr = mem->userspace_addr;
		else
			new.userspace_addr = 0;
A
Avi Kivity 已提交
1049
	}
M
Marcelo Tosatti 已提交
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	if (npages && !new.lpage_info) {
		int largepages = npages / KVM_PAGES_PER_HPAGE;
		if (npages % KVM_PAGES_PER_HPAGE)
			largepages++;
		if (base_gfn % KVM_PAGES_PER_HPAGE)
			largepages++;

		new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));

		if (!new.lpage_info)
			goto out_free;

		memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));

		if (base_gfn % KVM_PAGES_PER_HPAGE)
			new.lpage_info[0].write_count = 1;
		if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
			new.lpage_info[largepages-1].write_count = 1;
	}
A
Avi Kivity 已提交
1069 1070 1071 1072 1073 1074 1075

	/* Allocate page dirty bitmap if needed */
	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
		unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;

		new.dirty_bitmap = vmalloc(dirty_bytes);
		if (!new.dirty_bitmap)
1076
			goto out_free;
A
Avi Kivity 已提交
1077 1078
		memset(new.dirty_bitmap, 0, dirty_bytes);
	}
1079
#endif /* not defined CONFIG_S390 */
A
Avi Kivity 已提交
1080

1081 1082 1083
	if (!npages)
		kvm_arch_flush_shadow(kvm);

1084 1085 1086 1087
	spin_lock(&kvm->mmu_lock);
	if (mem->slot >= kvm->nmemslots)
		kvm->nmemslots = mem->slot + 1;

1088
	*memslot = new;
1089
	spin_unlock(&kvm->mmu_lock);
1090

1091 1092
	r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
	if (r) {
1093
		spin_lock(&kvm->mmu_lock);
1094
		*memslot = old;
1095
		spin_unlock(&kvm->mmu_lock);
1096
		goto out_free;
1097 1098
	}

1099 1100 1101 1102
	kvm_free_physmem_slot(&old, npages ? &new : NULL);
	/* Slot deletion case: we have to update the current slot */
	if (!npages)
		*memslot = old;
1103
#ifdef CONFIG_DMAR
B
Ben-Ami Yassour 已提交
1104 1105 1106 1107
	/* map the pages in iommu page table */
	r = kvm_iommu_map_pages(kvm, base_gfn, npages);
	if (r)
		goto out;
1108
#endif
A
Avi Kivity 已提交
1109 1110
	return 0;

1111
out_free:
A
Avi Kivity 已提交
1112 1113 1114
	kvm_free_physmem_slot(&new, &old);
out:
	return r;
1115 1116

}
1117 1118 1119 1120 1121 1122 1123 1124
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);

int kvm_set_memory_region(struct kvm *kvm,
			  struct kvm_userspace_memory_region *mem,
			  int user_alloc)
{
	int r;

1125
	down_write(&kvm->slots_lock);
1126
	r = __kvm_set_memory_region(kvm, mem, user_alloc);
1127
	up_write(&kvm->slots_lock);
1128 1129
	return r;
}
1130 1131
EXPORT_SYMBOL_GPL(kvm_set_memory_region);

1132 1133 1134 1135
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
				   struct
				   kvm_userspace_memory_region *mem,
				   int user_alloc)
1136
{
1137 1138
	if (mem->slot >= KVM_MEMORY_SLOTS)
		return -EINVAL;
1139
	return kvm_set_memory_region(kvm, mem, user_alloc);
A
Avi Kivity 已提交
1140 1141
}

1142 1143
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty)
A
Avi Kivity 已提交
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
{
	struct kvm_memory_slot *memslot;
	int r, i;
	int n;
	unsigned long any = 0;

	r = -EINVAL;
	if (log->slot >= KVM_MEMORY_SLOTS)
		goto out;

	memslot = &kvm->memslots[log->slot];
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

1159
	n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
A
Avi Kivity 已提交
1160

1161
	for (i = 0; !any && i < n/sizeof(long); ++i)
A
Avi Kivity 已提交
1162 1163 1164 1165 1166 1167
		any = memslot->dirty_bitmap[i];

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
		goto out;

1168 1169
	if (any)
		*is_dirty = 1;
A
Avi Kivity 已提交
1170 1171 1172 1173 1174 1175

	r = 0;
out:
	return r;
}

1176 1177 1178 1179 1180 1181
int is_error_page(struct page *page)
{
	return page == bad_page;
}
EXPORT_SYMBOL_GPL(is_error_page);

1182 1183 1184 1185 1186 1187
int is_error_pfn(pfn_t pfn)
{
	return pfn == bad_pfn;
}
EXPORT_SYMBOL_GPL(is_error_pfn);

I
Izik Eidus 已提交
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
static inline unsigned long bad_hva(void)
{
	return PAGE_OFFSET;
}

int kvm_is_error_hva(unsigned long addr)
{
	return addr == bad_hva();
}
EXPORT_SYMBOL_GPL(kvm_is_error_hva);

1199
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
A
Avi Kivity 已提交
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
{
	int i;

	for (i = 0; i < kvm->nmemslots; ++i) {
		struct kvm_memory_slot *memslot = &kvm->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return memslot;
	}
A
Al Viro 已提交
1210
	return NULL;
A
Avi Kivity 已提交
1211
}
1212
EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
1213 1214 1215 1216

struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
	gfn = unalias_gfn(kvm, gfn);
1217
	return gfn_to_memslot_unaliased(kvm, gfn);
1218
}
A
Avi Kivity 已提交
1219

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
	int i;

	gfn = unalias_gfn(kvm, gfn);
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *memslot = &kvm->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

M
Marcelo Tosatti 已提交
1236
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
I
Izik Eidus 已提交
1237 1238 1239 1240
{
	struct kvm_memory_slot *slot;

	gfn = unalias_gfn(kvm, gfn);
1241
	slot = gfn_to_memslot_unaliased(kvm, gfn);
I
Izik Eidus 已提交
1242 1243 1244 1245
	if (!slot)
		return bad_hva();
	return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
}
1246
EXPORT_SYMBOL_GPL(gfn_to_hva);
I
Izik Eidus 已提交
1247

1248
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
A
Avi Kivity 已提交
1249
{
1250
	struct page *page[1];
I
Izik Eidus 已提交
1251
	unsigned long addr;
1252
	int npages;
1253
	pfn_t pfn;
A
Avi Kivity 已提交
1254

1255 1256
	might_sleep();

I
Izik Eidus 已提交
1257 1258
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr)) {
1259
		get_page(bad_page);
1260
		return page_to_pfn(bad_page);
1261
	}
1262

1263
	npages = get_user_pages_fast(addr, 1, 1, page);
I
Izik Eidus 已提交
1264

1265 1266 1267
	if (unlikely(npages != 1)) {
		struct vm_area_struct *vma;

1268
		down_read(&current->mm->mmap_sem);
1269
		vma = find_vma(current->mm, addr);
1270

1271 1272
		if (vma == NULL || addr < vma->vm_start ||
		    !(vma->vm_flags & VM_PFNMAP)) {
1273
			up_read(&current->mm->mmap_sem);
1274 1275 1276 1277 1278
			get_page(bad_page);
			return page_to_pfn(bad_page);
		}

		pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1279
		up_read(&current->mm->mmap_sem);
1280
		BUG_ON(!kvm_is_mmio_pfn(pfn));
1281 1282
	} else
		pfn = page_to_pfn(page[0]);
1283

1284
	return pfn;
1285 1286 1287 1288 1289 1290
}

EXPORT_SYMBOL_GPL(gfn_to_pfn);

struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
1291 1292 1293
	pfn_t pfn;

	pfn = gfn_to_pfn(kvm, gfn);
1294
	if (!kvm_is_mmio_pfn(pfn))
1295 1296
		return pfn_to_page(pfn);

1297
	WARN_ON(kvm_is_mmio_pfn(pfn));
1298 1299 1300

	get_page(bad_page);
	return bad_page;
A
Avi Kivity 已提交
1301
}
1302

A
Avi Kivity 已提交
1303 1304
EXPORT_SYMBOL_GPL(gfn_to_page);

1305 1306
void kvm_release_page_clean(struct page *page)
{
1307
	kvm_release_pfn_clean(page_to_pfn(page));
1308 1309 1310
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);

1311 1312
void kvm_release_pfn_clean(pfn_t pfn)
{
1313
	if (!kvm_is_mmio_pfn(pfn))
1314
		put_page(pfn_to_page(pfn));
1315 1316 1317
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);

1318
void kvm_release_page_dirty(struct page *page)
1319
{
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
	kvm_release_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);

void kvm_release_pfn_dirty(pfn_t pfn)
{
	kvm_set_pfn_dirty(pfn);
	kvm_release_pfn_clean(pfn);
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);

void kvm_set_page_dirty(struct page *page)
{
	kvm_set_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_set_page_dirty);

void kvm_set_pfn_dirty(pfn_t pfn)
{
1339
	if (!kvm_is_mmio_pfn(pfn)) {
1340 1341 1342 1343
		struct page *page = pfn_to_page(pfn);
		if (!PageReserved(page))
			SetPageDirty(page);
	}
1344
}
1345 1346 1347 1348
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);

void kvm_set_pfn_accessed(pfn_t pfn)
{
1349
	if (!kvm_is_mmio_pfn(pfn))
1350
		mark_page_accessed(pfn_to_page(pfn));
1351 1352 1353 1354 1355
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);

void kvm_get_pfn(pfn_t pfn)
{
1356
	if (!kvm_is_mmio_pfn(pfn))
1357
		get_page(pfn_to_page(pfn));
1358 1359
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
1360

1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
static int next_segment(unsigned long len, int offset)
{
	if (len > PAGE_SIZE - offset)
		return PAGE_SIZE - offset;
	else
		return len;
}

int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len)
{
1372 1373
	int r;
	unsigned long addr;
1374

1375 1376 1377 1378 1379
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_from_user(data, (void __user *)addr + offset, len);
	if (r)
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);

int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);

1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len)
{
	int r;
	unsigned long addr;
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int offset = offset_in_page(gpa);

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
1416
	pagefault_disable();
1417
	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1418
	pagefault_enable();
1419 1420 1421 1422 1423 1424
	if (r)
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL(kvm_read_guest_atomic);

1425 1426 1427
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len)
{
1428 1429
	int r;
	unsigned long addr;
1430

1431 1432 1433 1434 1435
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_to_user((void __user *)addr + offset, data, len);
	if (r)
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463
		return -EFAULT;
	mark_page_dirty(kvm, gfn);
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);

int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}

int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
1464
	return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

        while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);

A
Avi Kivity 已提交
1487 1488
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
1489
	struct kvm_memory_slot *memslot;
A
Avi Kivity 已提交
1490

1491
	gfn = unalias_gfn(kvm, gfn);
1492
	memslot = gfn_to_memslot_unaliased(kvm, gfn);
R
Rusty Russell 已提交
1493 1494
	if (memslot && memslot->dirty_bitmap) {
		unsigned long rel_gfn = gfn - memslot->base_gfn;
A
Avi Kivity 已提交
1495

R
Rusty Russell 已提交
1496 1497 1498
		/* avoid RMW */
		if (!test_bit(rel_gfn, memslot->dirty_bitmap))
			set_bit(rel_gfn, memslot->dirty_bitmap);
A
Avi Kivity 已提交
1499 1500 1501
	}
}

E
Eddie Dong 已提交
1502 1503 1504
/*
 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
 */
1505
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1506
{
1507 1508 1509 1510 1511
	DEFINE_WAIT(wait);

	for (;;) {
		prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);

1512 1513 1514 1515
		if (kvm_cpu_has_interrupt(vcpu) ||
		    kvm_cpu_has_pending_timer(vcpu) ||
		    kvm_arch_vcpu_runnable(vcpu)) {
			set_bit(KVM_REQ_UNHALT, &vcpu->requests);
1516
			break;
1517
		}
1518 1519 1520
		if (signal_pending(current))
			break;

E
Eddie Dong 已提交
1521 1522 1523 1524
		vcpu_put(vcpu);
		schedule();
		vcpu_load(vcpu);
	}
1525

1526
	finish_wait(&vcpu->wq, &wait);
E
Eddie Dong 已提交
1527 1528
}

A
Avi Kivity 已提交
1529 1530
void kvm_resched(struct kvm_vcpu *vcpu)
{
1531 1532
	if (!need_resched())
		return;
A
Avi Kivity 已提交
1533 1534 1535 1536
	cond_resched();
}
EXPORT_SYMBOL_GPL(kvm_resched);

1537
static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1538 1539 1540 1541
{
	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
	struct page *page;

1542
	if (vmf->pgoff == 0)
1543
		page = virt_to_page(vcpu->run);
A
Avi Kivity 已提交
1544
#ifdef CONFIG_X86
1545
	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1546
		page = virt_to_page(vcpu->arch.pio_data);
1547 1548 1549 1550
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
A
Avi Kivity 已提交
1551
#endif
1552
	else
1553
		return VM_FAULT_SIGBUS;
1554
	get_page(page);
1555 1556
	vmf->page = page;
	return 0;
1557 1558 1559
}

static struct vm_operations_struct kvm_vcpu_vm_ops = {
1560
	.fault = kvm_vcpu_fault,
1561 1562 1563 1564 1565 1566 1567 1568
};

static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vcpu_vm_ops;
	return 0;
}

A
Avi Kivity 已提交
1569 1570 1571 1572
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
	struct kvm_vcpu *vcpu = filp->private_data;

A
Al Viro 已提交
1573
	kvm_put_kvm(vcpu->kvm);
A
Avi Kivity 已提交
1574 1575 1576
	return 0;
}

1577
static struct file_operations kvm_vcpu_fops = {
A
Avi Kivity 已提交
1578 1579 1580
	.release        = kvm_vcpu_release,
	.unlocked_ioctl = kvm_vcpu_ioctl,
	.compat_ioctl   = kvm_vcpu_ioctl,
1581
	.mmap           = kvm_vcpu_mmap,
A
Avi Kivity 已提交
1582 1583 1584 1585 1586 1587 1588
};

/*
 * Allocates an inode for the vcpu.
 */
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
1589
	int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
A
Al Viro 已提交
1590
	if (fd < 0)
A
Al Viro 已提交
1591
		kvm_put_kvm(vcpu->kvm);
A
Avi Kivity 已提交
1592 1593 1594
	return fd;
}

1595 1596 1597 1598 1599 1600 1601 1602 1603
/*
 * Creates some virtual cpus.  Good luck creating more than one.
 */
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
{
	int r;
	struct kvm_vcpu *vcpu;

	if (!valid_vcpu(n))
R
Rusty Russell 已提交
1604
		return -EINVAL;
1605

1606
	vcpu = kvm_arch_vcpu_create(kvm, n);
R
Rusty Russell 已提交
1607 1608
	if (IS_ERR(vcpu))
		return PTR_ERR(vcpu);
1609

1610 1611
	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);

1612 1613
	r = kvm_arch_vcpu_setup(vcpu);
	if (r)
1614
		return r;
1615

S
Shaohua Li 已提交
1616
	mutex_lock(&kvm->lock);
R
Rusty Russell 已提交
1617 1618
	if (kvm->vcpus[n]) {
		r = -EEXIST;
1619
		goto vcpu_destroy;
R
Rusty Russell 已提交
1620 1621
	}
	kvm->vcpus[n] = vcpu;
S
Shaohua Li 已提交
1622
	mutex_unlock(&kvm->lock);
1623

R
Rusty Russell 已提交
1624
	/* Now it's all set up, let userspace reach it */
A
Al Viro 已提交
1625
	kvm_get_kvm(kvm);
A
Avi Kivity 已提交
1626 1627
	r = create_vcpu_fd(vcpu);
	if (r < 0)
R
Rusty Russell 已提交
1628 1629
		goto unlink;
	return r;
1630

R
Rusty Russell 已提交
1631
unlink:
S
Shaohua Li 已提交
1632
	mutex_lock(&kvm->lock);
R
Rusty Russell 已提交
1633
	kvm->vcpus[n] = NULL;
1634
vcpu_destroy:
1635
	mutex_unlock(&kvm->lock);
1636
	kvm_arch_vcpu_destroy(vcpu);
1637 1638 1639
	return r;
}

A
Avi Kivity 已提交
1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
	if (sigset) {
		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
		vcpu->sigset_active = 1;
		vcpu->sigset = *sigset;
	} else
		vcpu->sigset_active = 0;
	return 0;
}

A
Avi Kivity 已提交
1651 1652
static long kvm_vcpu_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
A
Avi Kivity 已提交
1653
{
A
Avi Kivity 已提交
1654
	struct kvm_vcpu *vcpu = filp->private_data;
A
Al Viro 已提交
1655
	void __user *argp = (void __user *)arg;
1656
	int r;
1657 1658
	struct kvm_fpu *fpu = NULL;
	struct kvm_sregs *kvm_sregs = NULL;
A
Avi Kivity 已提交
1659

1660 1661
	if (vcpu->kvm->mm != current->mm)
		return -EIO;
A
Avi Kivity 已提交
1662
	switch (ioctl) {
1663
	case KVM_RUN:
1664 1665 1666
		r = -EINVAL;
		if (arg)
			goto out;
1667
		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
A
Avi Kivity 已提交
1668 1669
		break;
	case KVM_GET_REGS: {
1670
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
1671

1672 1673 1674
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
1675
			goto out;
1676 1677 1678
		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
		if (r)
			goto out_free1;
A
Avi Kivity 已提交
1679
		r = -EFAULT;
1680 1681
		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
			goto out_free1;
A
Avi Kivity 已提交
1682
		r = 0;
1683 1684
out_free1:
		kfree(kvm_regs);
A
Avi Kivity 已提交
1685 1686 1687
		break;
	}
	case KVM_SET_REGS: {
1688
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
1689

1690 1691 1692
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
1693
			goto out;
1694 1695 1696 1697
		r = -EFAULT;
		if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
			goto out_free2;
		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
A
Avi Kivity 已提交
1698
		if (r)
1699
			goto out_free2;
A
Avi Kivity 已提交
1700
		r = 0;
1701 1702
out_free2:
		kfree(kvm_regs);
A
Avi Kivity 已提交
1703 1704 1705
		break;
	}
	case KVM_GET_SREGS: {
1706 1707 1708 1709 1710
		kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
1711 1712 1713
		if (r)
			goto out;
		r = -EFAULT;
1714
		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
1715 1716 1717 1718 1719
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_SREGS: {
1720 1721 1722 1723
		kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
A
Avi Kivity 已提交
1724
		r = -EFAULT;
1725
		if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
1726
			goto out;
1727
		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
1728 1729 1730 1731 1732
		if (r)
			goto out;
		r = 0;
		break;
	}
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
	case KVM_GET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &mp_state, sizeof mp_state))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = -EFAULT;
		if (copy_from_user(&mp_state, argp, sizeof mp_state))
			goto out;
		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1757 1758 1759 1760
	case KVM_TRANSLATE: {
		struct kvm_translation tr;

		r = -EFAULT;
A
Al Viro 已提交
1761
		if (copy_from_user(&tr, argp, sizeof tr))
A
Avi Kivity 已提交
1762
			goto out;
1763
		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
A
Avi Kivity 已提交
1764 1765 1766
		if (r)
			goto out;
		r = -EFAULT;
A
Al Viro 已提交
1767
		if (copy_to_user(argp, &tr, sizeof tr))
A
Avi Kivity 已提交
1768 1769 1770 1771
			goto out;
		r = 0;
		break;
	}
J
Jan Kiszka 已提交
1772 1773
	case KVM_SET_GUEST_DEBUG: {
		struct kvm_guest_debug dbg;
A
Avi Kivity 已提交
1774 1775

		r = -EFAULT;
A
Al Viro 已提交
1776
		if (copy_from_user(&dbg, argp, sizeof dbg))
A
Avi Kivity 已提交
1777
			goto out;
J
Jan Kiszka 已提交
1778
		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
A
Avi Kivity 已提交
1779 1780 1781 1782 1783
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		sigset_t sigset, *p;

		p = NULL;
		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
					   sizeof kvm_sigmask))
				goto out;
			r = -EINVAL;
			if (kvm_sigmask.len != sizeof sigset)
				goto out;
			r = -EFAULT;
			if (copy_from_user(&sigset, sigmask_arg->sigset,
					   sizeof sigset))
				goto out;
			p = &sigset;
		}
		r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
		break;
	}
A
Avi Kivity 已提交
1807
	case KVM_GET_FPU: {
1808 1809 1810 1811 1812
		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
A
Avi Kivity 已提交
1813 1814 1815
		if (r)
			goto out;
		r = -EFAULT;
1816
		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
1817 1818 1819 1820 1821
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_FPU: {
1822 1823 1824 1825
		fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
A
Avi Kivity 已提交
1826
		r = -EFAULT;
1827
		if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
1828
			goto out;
1829
		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
A
Avi Kivity 已提交
1830 1831 1832 1833 1834
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1835
	default:
1836
		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
1837 1838
	}
out:
1839 1840
	kfree(fpu);
	kfree(kvm_sregs);
A
Avi Kivity 已提交
1841 1842 1843 1844 1845 1846 1847 1848
	return r;
}

static long kvm_vm_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
1849
	int r;
A
Avi Kivity 已提交
1850

1851 1852
	if (kvm->mm != current->mm)
		return -EIO;
A
Avi Kivity 已提交
1853 1854 1855 1856 1857 1858
	switch (ioctl) {
	case KVM_CREATE_VCPU:
		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
		if (r < 0)
			goto out;
		break;
1859 1860 1861 1862 1863 1864 1865 1866 1867
	case KVM_SET_USER_MEMORY_REGION: {
		struct kvm_userspace_memory_region kvm_userspace_mem;

		r = -EFAULT;
		if (copy_from_user(&kvm_userspace_mem, argp,
						sizeof kvm_userspace_mem))
			goto out;

		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
A
Avi Kivity 已提交
1868 1869 1870 1871 1872 1873 1874 1875
		if (r)
			goto out;
		break;
	}
	case KVM_GET_DIRTY_LOG: {
		struct kvm_dirty_log log;

		r = -EFAULT;
A
Al Viro 已提交
1876
		if (copy_from_user(&log, argp, sizeof log))
A
Avi Kivity 已提交
1877
			goto out;
1878
		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
A
Avi Kivity 已提交
1879 1880 1881 1882
		if (r)
			goto out;
		break;
	}
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	case KVM_REGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
		r = -EFAULT;
		if (copy_from_user(&zone, argp, sizeof zone))
			goto out;
		r = -ENXIO;
		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
		if (r)
			goto out;
		r = 0;
		break;
	}
	case KVM_UNREGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
		r = -EFAULT;
		if (copy_from_user(&zone, argp, sizeof zone))
			goto out;
		r = -ENXIO;
		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
		if (r)
			goto out;
		r = 0;
		break;
	}
1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
#endif
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
	case KVM_ASSIGN_PCI_DEVICE: {
		struct kvm_assigned_pci_dev assigned_dev;

		r = -EFAULT;
		if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
			goto out;
		r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
		if (r)
			goto out;
		break;
	}
	case KVM_ASSIGN_IRQ: {
		struct kvm_assigned_irq assigned_irq;

		r = -EFAULT;
		if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
			goto out;
		r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
		if (r)
			goto out;
		break;
	}
W
Weidong Han 已提交
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
#endif
#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
	case KVM_DEASSIGN_PCI_DEVICE: {
		struct kvm_assigned_pci_dev assigned_dev;

		r = -EFAULT;
		if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
			goto out;
		r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
		if (r)
			goto out;
		break;
	}
1945
#endif
1946
	default:
1947
		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1948 1949 1950 1951 1952
	}
out:
	return r;
}

1953
static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1954
{
1955 1956 1957 1958
	struct page *page[1];
	unsigned long addr;
	int npages;
	gfn_t gfn = vmf->pgoff;
1959 1960
	struct kvm *kvm = vma->vm_file->private_data;

1961 1962
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
1963
		return VM_FAULT_SIGBUS;
1964 1965 1966 1967

	npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
				NULL);
	if (unlikely(npages != 1))
1968
		return VM_FAULT_SIGBUS;
1969 1970

	vmf->page = page[0];
1971
	return 0;
1972 1973 1974
}

static struct vm_operations_struct kvm_vm_vm_ops = {
1975
	.fault = kvm_vm_fault,
1976 1977 1978 1979 1980 1981 1982 1983
};

static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vm_vm_ops;
	return 0;
}

1984
static struct file_operations kvm_vm_fops = {
1985 1986 1987 1988 1989 1990 1991 1992
	.release        = kvm_vm_release,
	.unlocked_ioctl = kvm_vm_ioctl,
	.compat_ioctl   = kvm_vm_ioctl,
	.mmap           = kvm_vm_mmap,
};

static int kvm_dev_ioctl_create_vm(void)
{
A
Al Viro 已提交
1993
	int fd;
1994 1995 1996
	struct kvm *kvm;

	kvm = kvm_create_vm();
1997 1998
	if (IS_ERR(kvm))
		return PTR_ERR(kvm);
1999
	fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
A
Al Viro 已提交
2000
	if (fd < 0)
A
Al Viro 已提交
2001
		kvm_put_kvm(kvm);
2002 2003 2004 2005

	return fd;
}

2006 2007 2008
static long kvm_dev_ioctl_check_extension_generic(long arg)
{
	switch (arg) {
2009
	case KVM_CAP_USER_MEMORY:
2010 2011 2012 2013 2014 2015 2016 2017
	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
		return 1;
	default:
		break;
	}
	return kvm_dev_ioctl_check_extension(arg);
}

2018 2019 2020
static long kvm_dev_ioctl(struct file *filp,
			  unsigned int ioctl, unsigned long arg)
{
2021
	long r = -EINVAL;
2022 2023 2024

	switch (ioctl) {
	case KVM_GET_API_VERSION:
2025 2026 2027
		r = -EINVAL;
		if (arg)
			goto out;
2028 2029 2030
		r = KVM_API_VERSION;
		break;
	case KVM_CREATE_VM:
2031 2032 2033
		r = -EINVAL;
		if (arg)
			goto out;
2034 2035
		r = kvm_dev_ioctl_create_vm();
		break;
2036
	case KVM_CHECK_EXTENSION:
2037
		r = kvm_dev_ioctl_check_extension_generic(arg);
2038
		break;
2039 2040 2041 2042
	case KVM_GET_VCPU_MMAP_SIZE:
		r = -EINVAL;
		if (arg)
			goto out;
2043 2044 2045
		r = PAGE_SIZE;     /* struct kvm_run */
#ifdef CONFIG_X86
		r += PAGE_SIZE;    /* pio data page */
2046 2047 2048
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
		r += PAGE_SIZE;    /* coalesced mmio ring page */
2049
#endif
2050
		break;
2051 2052 2053 2054 2055
	case KVM_TRACE_ENABLE:
	case KVM_TRACE_PAUSE:
	case KVM_TRACE_DISABLE:
		r = kvm_trace_ioctl(ioctl, arg);
		break;
A
Avi Kivity 已提交
2056
	default:
2057
		return kvm_arch_dev_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
	}
out:
	return r;
}

static struct file_operations kvm_chardev_ops = {
	.unlocked_ioctl = kvm_dev_ioctl,
	.compat_ioctl   = kvm_dev_ioctl,
};

static struct miscdevice kvm_dev = {
A
Avi Kivity 已提交
2069
	KVM_MINOR,
A
Avi Kivity 已提交
2070 2071 2072 2073
	"kvm",
	&kvm_chardev_ops,
};

2074 2075 2076 2077
static void hardware_enable(void *junk)
{
	int cpu = raw_smp_processor_id();

2078
	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2079
		return;
2080
	cpumask_set_cpu(cpu, cpus_hardware_enabled);
2081
	kvm_arch_hardware_enable(NULL);
2082 2083 2084 2085 2086 2087
}

static void hardware_disable(void *junk)
{
	int cpu = raw_smp_processor_id();

2088
	if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2089
		return;
2090
	cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2091
	kvm_arch_hardware_disable(NULL);
2092 2093
}

A
Avi Kivity 已提交
2094 2095 2096 2097 2098
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
			   void *v)
{
	int cpu = (long)v;

2099
	val &= ~CPU_TASKS_FROZEN;
A
Avi Kivity 已提交
2100
	switch (val) {
2101
	case CPU_DYING:
2102 2103 2104 2105
		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
		       cpu);
		hardware_disable(NULL);
		break;
A
Avi Kivity 已提交
2106
	case CPU_UP_CANCELED:
2107 2108
		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
		       cpu);
2109
		smp_call_function_single(cpu, hardware_disable, NULL, 1);
A
Avi Kivity 已提交
2110
		break;
2111 2112 2113
	case CPU_ONLINE:
		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
		       cpu);
2114
		smp_call_function_single(cpu, hardware_enable, NULL, 1);
A
Avi Kivity 已提交
2115 2116 2117 2118 2119
		break;
	}
	return NOTIFY_OK;
}

2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131

asmlinkage void kvm_handle_fault_on_reboot(void)
{
	if (kvm_rebooting)
		/* spin while reset goes on */
		while (true)
			;
	/* Fault while not rebooting.  We want the trace. */
	BUG();
}
EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);

2132
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
M
Mike Day 已提交
2133
		      void *v)
2134 2135 2136 2137 2138 2139 2140
{
	if (val == SYS_RESTART) {
		/*
		 * Some (well, at least mine) BIOSes hang on reboot if
		 * in vmx root mode.
		 */
		printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2141
		kvm_rebooting = true;
2142
		on_each_cpu(hardware_disable, NULL, 1);
2143 2144 2145 2146 2147 2148 2149 2150 2151
	}
	return NOTIFY_OK;
}

static struct notifier_block kvm_reboot_notifier = {
	.notifier_call = kvm_reboot,
	.priority = 0,
};

2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167
void kvm_io_bus_init(struct kvm_io_bus *bus)
{
	memset(bus, 0, sizeof(*bus));
}

void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
		struct kvm_io_device *pos = bus->devs[i];

		kvm_iodevice_destructor(pos);
	}
}

2168 2169
struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
					  gpa_t addr, int len, int is_write)
2170 2171 2172 2173 2174 2175
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
		struct kvm_io_device *pos = bus->devs[i];

2176
		if (pos->in_range(pos, addr, len, is_write))
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
			return pos;
	}

	return NULL;
}

void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
{
	BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));

	bus->devs[bus->dev_count++] = dev;
}

A
Avi Kivity 已提交
2190 2191 2192 2193 2194
static struct notifier_block kvm_cpu_notifier = {
	.notifier_call = kvm_cpu_hotplug,
	.priority = 20, /* must be > scheduler priority */
};

2195
static int vm_stat_get(void *_offset, u64 *val)
2196 2197 2198 2199
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;

2200
	*val = 0;
2201 2202
	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
2203
		*val += *(u32 *)((void *)kvm + offset);
2204
	spin_unlock(&kvm_lock);
2205
	return 0;
2206 2207 2208 2209
}

DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");

2210
static int vcpu_stat_get(void *_offset, u64 *val)
A
Avi Kivity 已提交
2211 2212 2213 2214 2215 2216
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;

2217
	*val = 0;
A
Avi Kivity 已提交
2218 2219 2220
	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
R
Rusty Russell 已提交
2221 2222
			vcpu = kvm->vcpus[i];
			if (vcpu)
2223
				*val += *(u32 *)((void *)vcpu + offset);
A
Avi Kivity 已提交
2224 2225
		}
	spin_unlock(&kvm_lock);
2226
	return 0;
A
Avi Kivity 已提交
2227 2228
}

2229 2230 2231 2232 2233 2234
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");

static struct file_operations *stat_fops[] = {
	[KVM_STAT_VCPU] = &vcpu_stat_fops,
	[KVM_STAT_VM]   = &vm_stat_fops,
};
A
Avi Kivity 已提交
2235

2236
static void kvm_init_debug(void)
A
Avi Kivity 已提交
2237 2238 2239
{
	struct kvm_stats_debugfs_item *p;

2240
	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
A
Avi Kivity 已提交
2241
	for (p = debugfs_entries; p->name; ++p)
2242
		p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
A
Avi Kivity 已提交
2243
						(void *)(long)p->offset,
2244
						stat_fops[p->kind]);
A
Avi Kivity 已提交
2245 2246 2247 2248 2249 2250 2251 2252
}

static void kvm_exit_debug(void)
{
	struct kvm_stats_debugfs_item *p;

	for (p = debugfs_entries; p->name; ++p)
		debugfs_remove(p->dentry);
2253
	debugfs_remove(kvm_debugfs_dir);
A
Avi Kivity 已提交
2254 2255
}

2256 2257
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
A
Avi Kivity 已提交
2258
	hardware_disable(NULL);
2259 2260 2261 2262 2263
	return 0;
}

static int kvm_resume(struct sys_device *dev)
{
A
Avi Kivity 已提交
2264
	hardware_enable(NULL);
2265 2266 2267 2268
	return 0;
}

static struct sysdev_class kvm_sysdev_class = {
2269
	.name = "kvm",
2270 2271 2272 2273 2274 2275 2276 2277 2278
	.suspend = kvm_suspend,
	.resume = kvm_resume,
};

static struct sys_device kvm_sysdev = {
	.id = 0,
	.cls = &kvm_sysdev_class,
};

2279
struct page *bad_page;
2280
pfn_t bad_pfn;
A
Avi Kivity 已提交
2281

2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
	return container_of(pn, struct kvm_vcpu, preempt_notifier);
}

static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

2292
	kvm_arch_vcpu_load(vcpu, cpu);
2293 2294 2295 2296 2297 2298 2299
}

static void kvm_sched_out(struct preempt_notifier *pn,
			  struct task_struct *next)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

2300
	kvm_arch_vcpu_put(vcpu);
2301 2302
}

2303
int kvm_init(void *opaque, unsigned int vcpu_size,
2304
		  struct module *module)
A
Avi Kivity 已提交
2305 2306
{
	int r;
Y
Yang, Sheng 已提交
2307
	int cpu;
A
Avi Kivity 已提交
2308

2309 2310
	kvm_init_debug();

2311 2312
	r = kvm_arch_init(opaque);
	if (r)
2313
		goto out_fail;
2314 2315 2316 2317 2318 2319 2320 2321

	bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);

	if (bad_page == NULL) {
		r = -ENOMEM;
		goto out;
	}

2322 2323
	bad_pfn = page_to_pfn(bad_page);

2324 2325 2326 2327 2328
	if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
		r = -ENOMEM;
		goto out_free_0;
	}

2329
	r = kvm_arch_hardware_setup();
A
Avi Kivity 已提交
2330
	if (r < 0)
2331
		goto out_free_0a;
A
Avi Kivity 已提交
2332

Y
Yang, Sheng 已提交
2333 2334
	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu,
2335
				kvm_arch_check_processor_compat,
2336
				&r, 1);
Y
Yang, Sheng 已提交
2337
		if (r < 0)
2338
			goto out_free_1;
Y
Yang, Sheng 已提交
2339 2340
	}

2341
	on_each_cpu(hardware_enable, NULL, 1);
A
Avi Kivity 已提交
2342 2343
	r = register_cpu_notifier(&kvm_cpu_notifier);
	if (r)
2344
		goto out_free_2;
A
Avi Kivity 已提交
2345 2346
	register_reboot_notifier(&kvm_reboot_notifier);

2347 2348
	r = sysdev_class_register(&kvm_sysdev_class);
	if (r)
2349
		goto out_free_3;
2350 2351 2352

	r = sysdev_register(&kvm_sysdev);
	if (r)
2353
		goto out_free_4;
2354

2355 2356
	/* A kmem cache lets us meet the alignment requirements of fx_save. */
	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
J
Joe Perches 已提交
2357 2358
					   __alignof__(struct kvm_vcpu),
					   0, NULL);
2359 2360
	if (!kvm_vcpu_cache) {
		r = -ENOMEM;
2361
		goto out_free_5;
2362 2363
	}

A
Avi Kivity 已提交
2364
	kvm_chardev_ops.owner = module;
2365 2366
	kvm_vm_fops.owner = module;
	kvm_vcpu_fops.owner = module;
A
Avi Kivity 已提交
2367 2368 2369

	r = misc_register(&kvm_dev);
	if (r) {
M
Mike Day 已提交
2370
		printk(KERN_ERR "kvm: misc device register failed\n");
A
Avi Kivity 已提交
2371 2372 2373
		goto out_free;
	}

2374 2375
	kvm_preempt_ops.sched_in = kvm_sched_in;
	kvm_preempt_ops.sched_out = kvm_sched_out;
S
Sheng Yang 已提交
2376 2377 2378
#ifndef CONFIG_X86
	msi2intx = 0;
#endif
2379

2380
	return 0;
A
Avi Kivity 已提交
2381 2382

out_free:
2383
	kmem_cache_destroy(kvm_vcpu_cache);
2384
out_free_5:
2385
	sysdev_unregister(&kvm_sysdev);
2386
out_free_4:
2387
	sysdev_class_unregister(&kvm_sysdev_class);
2388
out_free_3:
A
Avi Kivity 已提交
2389
	unregister_reboot_notifier(&kvm_reboot_notifier);
A
Avi Kivity 已提交
2390
	unregister_cpu_notifier(&kvm_cpu_notifier);
2391
out_free_2:
2392
	on_each_cpu(hardware_disable, NULL, 1);
2393
out_free_1:
2394
	kvm_arch_hardware_unsetup();
2395 2396
out_free_0a:
	free_cpumask_var(cpus_hardware_enabled);
2397 2398
out_free_0:
	__free_page(bad_page);
2399
out:
2400
	kvm_arch_exit();
2401
	kvm_exit_debug();
2402
out_fail:
A
Avi Kivity 已提交
2403 2404
	return r;
}
2405
EXPORT_SYMBOL_GPL(kvm_init);
A
Avi Kivity 已提交
2406

2407
void kvm_exit(void)
A
Avi Kivity 已提交
2408
{
2409
	kvm_trace_cleanup();
A
Avi Kivity 已提交
2410
	misc_deregister(&kvm_dev);
2411
	kmem_cache_destroy(kvm_vcpu_cache);
2412 2413
	sysdev_unregister(&kvm_sysdev);
	sysdev_class_unregister(&kvm_sysdev_class);
A
Avi Kivity 已提交
2414
	unregister_reboot_notifier(&kvm_reboot_notifier);
2415
	unregister_cpu_notifier(&kvm_cpu_notifier);
2416
	on_each_cpu(hardware_disable, NULL, 1);
2417
	kvm_arch_hardware_unsetup();
2418
	kvm_arch_exit();
A
Avi Kivity 已提交
2419
	kvm_exit_debug();
2420
	free_cpumask_var(cpus_hardware_enabled);
2421
	__free_page(bad_page);
A
Avi Kivity 已提交
2422
}
2423
EXPORT_SYMBOL_GPL(kvm_exit);