kvm_main.c 43.7 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

#include "kvm.h"
19
#include "x86.h"
A
Avi Kivity 已提交
20
#include "x86_emulate.h"
21
#include "irq.h"
A
Avi Kivity 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34

#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
35
#include <linux/sysdev.h>
A
Avi Kivity 已提交
36
#include <linux/cpu.h>
A
Alexey Dobriyan 已提交
37
#include <linux/sched.h>
38 39
#include <linux/cpumask.h>
#include <linux/smp.h>
40
#include <linux/anon_inodes.h>
41
#include <linux/profile.h>
42
#include <linux/kvm_para.h>
43
#include <linux/pagemap.h>
44
#include <linux/mman.h>
A
Avi Kivity 已提交
45

A
Avi Kivity 已提交
46 47 48 49 50
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
A
Avi Kivity 已提交
51 52 53 54

MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

55 56 57
static DEFINE_SPINLOCK(kvm_lock);
static LIST_HEAD(vm_list);

58 59
static cpumask_t cpus_hardware_enabled;

60
struct kvm_x86_ops *kvm_x86_ops;
61 62
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
A
Avi Kivity 已提交
63

64 65
static __read_mostly struct preempt_ops kvm_preempt_ops;

A
Avi Kivity 已提交
66 67
static struct dentry *debugfs_dir;

A
Avi Kivity 已提交
68 69 70
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);

71 72 73 74 75
static inline int valid_vcpu(int n)
{
	return likely(n >= 0 && n < KVM_MAX_VCPUS);
}

A
Avi Kivity 已提交
76 77 78
/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
79
void vcpu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
80
{
81 82
	int cpu;

A
Avi Kivity 已提交
83
	mutex_lock(&vcpu->mutex);
84 85
	cpu = get_cpu();
	preempt_notifier_register(&vcpu->preempt_notifier);
86
	kvm_arch_vcpu_load(vcpu, cpu);
87
	put_cpu();
A
Avi Kivity 已提交
88 89
}

90
void vcpu_put(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
91
{
92
	preempt_disable();
93
	kvm_arch_vcpu_put(vcpu);
94 95
	preempt_notifier_unregister(&vcpu->preempt_notifier);
	preempt_enable();
A
Avi Kivity 已提交
96 97 98
	mutex_unlock(&vcpu->mutex);
}

99 100 101 102 103 104
static void ack_flush(void *_completed)
{
}

void kvm_flush_remote_tlbs(struct kvm *kvm)
{
105
	int i, cpu;
106 107 108 109
	cpumask_t cpus;
	struct kvm_vcpu *vcpu;

	cpus_clear(cpus);
R
Rusty Russell 已提交
110 111 112 113
	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
		vcpu = kvm->vcpus[i];
		if (!vcpu)
			continue;
114
		if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
115 116 117
			continue;
		cpu = vcpu->cpu;
		if (cpu != -1 && cpu != raw_smp_processor_id())
118
			cpu_set(cpu, cpus);
119
	}
120
	smp_call_function_mask(cpus, ack_flush, NULL, 1);
121 122
}

R
Rusty Russell 已提交
123 124 125 126 127 128 129 130 131 132
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
	struct page *page;
	int r;

	mutex_init(&vcpu->mutex);
	vcpu->cpu = -1;
	vcpu->mmu.root_hpa = INVALID_PAGE;
	vcpu->kvm = kvm;
	vcpu->vcpu_id = id;
133 134 135 136
	if (!irqchip_in_kernel(kvm) || id == 0)
		vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
	else
		vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
E
Eddie Dong 已提交
137
	init_waitqueue_head(&vcpu->wq);
R
Rusty Russell 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
	vcpu->run = page_address(page);

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail_free_run;
	}
	vcpu->pio_data = page_address(page);

	r = kvm_mmu_create(vcpu);
	if (r < 0)
		goto fail_free_pio_data;

157 158 159 160 161 162
	if (irqchip_in_kernel(kvm)) {
		r = kvm_create_lapic(vcpu);
		if (r < 0)
			goto fail_mmu_destroy;
	}

R
Rusty Russell 已提交
163 164
	return 0;

165 166
fail_mmu_destroy:
	kvm_mmu_destroy(vcpu);
R
Rusty Russell 已提交
167 168 169 170 171
fail_free_pio_data:
	free_page((unsigned long)vcpu->pio_data);
fail_free_run:
	free_page((unsigned long)vcpu->run);
fail:
172
	return r;
R
Rusty Russell 已提交
173 174 175 176 177
}
EXPORT_SYMBOL_GPL(kvm_vcpu_init);

void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
178
	kvm_free_lapic(vcpu);
R
Rusty Russell 已提交
179 180 181 182 183 184
	kvm_mmu_destroy(vcpu);
	free_page((unsigned long)vcpu->pio_data);
	free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);

185
static struct kvm *kvm_create_vm(void)
A
Avi Kivity 已提交
186 187 188 189
{
	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);

	if (!kvm)
190
		return ERR_PTR(-ENOMEM);
A
Avi Kivity 已提交
191

192
	kvm_io_bus_init(&kvm->pio_bus);
S
Shaohua Li 已提交
193
	mutex_init(&kvm->lock);
A
Avi Kivity 已提交
194
	INIT_LIST_HEAD(&kvm->active_mmu_pages);
195
	kvm_io_bus_init(&kvm->mmio_bus);
196 197 198
	spin_lock(&kvm_lock);
	list_add(&kvm->vm_list, &vm_list);
	spin_unlock(&kvm_lock);
199 200 201
	return kvm;
}

A
Avi Kivity 已提交
202 203 204 205 206 207
/*
 * Free any memory in @free but not in @dont.
 */
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
				  struct kvm_memory_slot *dont)
{
208 209
	if (!dont || free->rmap != dont->rmap)
		vfree(free->rmap);
A
Avi Kivity 已提交
210 211 212 213 214

	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
		vfree(free->dirty_bitmap);

	free->npages = 0;
A
Al Viro 已提交
215
	free->dirty_bitmap = NULL;
216
	free->rmap = NULL;
A
Avi Kivity 已提交
217 218 219 220 221 222 223
}

static void kvm_free_physmem(struct kvm *kvm)
{
	int i;

	for (i = 0; i < kvm->nmemslots; ++i)
A
Al Viro 已提交
224
		kvm_free_physmem_slot(&kvm->memslots[i], NULL);
A
Avi Kivity 已提交
225 226
}

A
Avi Kivity 已提交
227 228 229 230 231 232 233
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
	vcpu_load(vcpu);
	kvm_mmu_unload(vcpu);
	vcpu_put(vcpu);
}

A
Avi Kivity 已提交
234 235 236 237
static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;

A
Avi Kivity 已提交
238 239 240 241
	/*
	 * Unpin any mmu pages first.
	 */
	for (i = 0; i < KVM_MAX_VCPUS; ++i)
R
Rusty Russell 已提交
242 243 244 245
		if (kvm->vcpus[i])
			kvm_unload_vcpu_mmu(kvm->vcpus[i]);
	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
		if (kvm->vcpus[i]) {
246
			kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
R
Rusty Russell 已提交
247 248 249 250
			kvm->vcpus[i] = NULL;
		}
	}

A
Avi Kivity 已提交
251 252
}

253 254
static void kvm_destroy_vm(struct kvm *kvm)
{
255 256 257
	spin_lock(&kvm_lock);
	list_del(&kvm->vm_list);
	spin_unlock(&kvm_lock);
258
	kvm_io_bus_destroy(&kvm->pio_bus);
259
	kvm_io_bus_destroy(&kvm->mmio_bus);
260
	kfree(kvm->vpic);
E
Eddie Dong 已提交
261
	kfree(kvm->vioapic);
A
Avi Kivity 已提交
262 263 264
	kvm_free_vcpus(kvm);
	kvm_free_physmem(kvm);
	kfree(kvm);
265 266 267 268 269 270 271
}

static int kvm_vm_release(struct inode *inode, struct file *filp)
{
	struct kvm *kvm = filp->private_data;

	kvm_destroy_vm(kvm);
A
Avi Kivity 已提交
272 273 274 275 276 277 278 279
	return 0;
}

/*
 * Allocate some memory and give it an address in the guest physical address
 * space.
 *
 * Discontiguous memory is allowed, mostly for framebuffers.
280 281
 *
 * Must be called holding kvm->lock.
A
Avi Kivity 已提交
282
 */
283 284 285
int __kvm_set_memory_region(struct kvm *kvm,
			    struct kvm_userspace_memory_region *mem,
			    int user_alloc)
A
Avi Kivity 已提交
286 287 288 289 290 291 292 293 294 295 296 297 298 299
{
	int r;
	gfn_t base_gfn;
	unsigned long npages;
	unsigned long i;
	struct kvm_memory_slot *memslot;
	struct kvm_memory_slot old, new;

	r = -EINVAL;
	/* General sanity checks */
	if (mem->memory_size & (PAGE_SIZE - 1))
		goto out;
	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
		goto out;
300
	if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
A
Avi Kivity 已提交
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
		goto out;
	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
		goto out;

	memslot = &kvm->memslots[mem->slot];
	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
	npages = mem->memory_size >> PAGE_SHIFT;

	if (!npages)
		mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;

	new = old = *memslot;

	new.base_gfn = base_gfn;
	new.npages = npages;
	new.flags = mem->flags;

	/* Disallow changing a memory slot's size. */
	r = -EINVAL;
	if (npages && old.npages && npages != old.npages)
321
		goto out_free;
A
Avi Kivity 已提交
322 323 324 325 326 327 328 329 330 331

	/* Check for overlaps */
	r = -EEXIST;
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *s = &kvm->memslots[i];

		if (s == memslot)
			continue;
		if (!((base_gfn + npages <= s->base_gfn) ||
		      (base_gfn >= s->base_gfn + s->npages)))
332
			goto out_free;
A
Avi Kivity 已提交
333 334 335 336
	}

	/* Free page dirty bitmap if unneeded */
	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
A
Al Viro 已提交
337
		new.dirty_bitmap = NULL;
A
Avi Kivity 已提交
338 339 340 341

	r = -ENOMEM;

	/* Allocate if a slot is being created */
342
	if (npages && !new.rmap) {
M
Mike Day 已提交
343
		new.rmap = vmalloc(npages * sizeof(struct page *));
344 345

		if (!new.rmap)
346
			goto out_free;
347 348

		memset(new.rmap, 0, npages * sizeof(*new.rmap));
349

350
		new.user_alloc = user_alloc;
351
		if (user_alloc)
352
			new.userspace_addr = mem->userspace_addr;
353 354 355 356 357 358 359 360 361 362
		else {
			down_write(&current->mm->mmap_sem);
			new.userspace_addr = do_mmap(NULL, 0,
						     npages * PAGE_SIZE,
						     PROT_READ | PROT_WRITE,
						     MAP_SHARED | MAP_ANONYMOUS,
						     0);
			up_write(&current->mm->mmap_sem);

			if (IS_ERR((void *)new.userspace_addr))
363
				goto out_free;
A
Avi Kivity 已提交
364
		}
365 366 367 368 369 370 371 372 373 374 375 376 377
	} else {
		if (!old.user_alloc && old.rmap) {
			int ret;

			down_write(&current->mm->mmap_sem);
			ret = do_munmap(current->mm, old.userspace_addr,
					old.npages * PAGE_SIZE);
			up_write(&current->mm->mmap_sem);
			if (ret < 0)
				printk(KERN_WARNING
				       "kvm_vm_ioctl_set_memory_region: "
				       "failed to munmap memory\n");
		}
A
Avi Kivity 已提交
378 379 380 381 382 383 384 385
	}

	/* Allocate page dirty bitmap if needed */
	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
		unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;

		new.dirty_bitmap = vmalloc(dirty_bytes);
		if (!new.dirty_bitmap)
386
			goto out_free;
A
Avi Kivity 已提交
387 388 389 390 391 392
		memset(new.dirty_bitmap, 0, dirty_bytes);
	}

	if (mem->slot >= kvm->nmemslots)
		kvm->nmemslots = mem->slot + 1;

393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
	if (!kvm->n_requested_mmu_pages) {
		unsigned int n_pages;

		if (npages) {
			n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
			kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
						 n_pages);
		} else {
			unsigned int nr_mmu_pages;

			n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
			nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
			nr_mmu_pages = max(nr_mmu_pages,
				        (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
			kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
		}
	}

A
Avi Kivity 已提交
411 412
	*memslot = new;

413 414
	kvm_mmu_slot_remove_write_access(kvm, mem->slot);
	kvm_flush_remote_tlbs(kvm);
A
Avi Kivity 已提交
415 416 417 418

	kvm_free_physmem_slot(&old, &new);
	return 0;

419
out_free:
A
Avi Kivity 已提交
420 421 422
	kvm_free_physmem_slot(&new, &old);
out:
	return r;
423 424

}
425 426 427 428 429 430 431 432 433 434 435 436 437
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);

int kvm_set_memory_region(struct kvm *kvm,
			  struct kvm_userspace_memory_region *mem,
			  int user_alloc)
{
	int r;

	mutex_lock(&kvm->lock);
	r = __kvm_set_memory_region(kvm, mem, user_alloc);
	mutex_unlock(&kvm->lock);
	return r;
}
438 439
EXPORT_SYMBOL_GPL(kvm_set_memory_region);

440 441 442 443
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
				   struct
				   kvm_userspace_memory_region *mem,
				   int user_alloc)
444
{
445 446
	if (mem->slot >= KVM_MEMORY_SLOTS)
		return -EINVAL;
447
	return kvm_set_memory_region(kvm, mem, user_alloc);
A
Avi Kivity 已提交
448 449 450 451 452
}

/*
 * Get (and clear) the dirty memory log for a memory slot.
 */
453 454
static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
				      struct kvm_dirty_log *log)
A
Avi Kivity 已提交
455 456 457 458 459 460
{
	struct kvm_memory_slot *memslot;
	int r, i;
	int n;
	unsigned long any = 0;

S
Shaohua Li 已提交
461
	mutex_lock(&kvm->lock);
A
Avi Kivity 已提交
462 463 464 465 466 467 468 469 470 471

	r = -EINVAL;
	if (log->slot >= KVM_MEMORY_SLOTS)
		goto out;

	memslot = &kvm->memslots[log->slot];
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

472
	n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
A
Avi Kivity 已提交
473

474
	for (i = 0; !any && i < n/sizeof(long); ++i)
A
Avi Kivity 已提交
475 476 477 478 479 480
		any = memslot->dirty_bitmap[i];

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
		goto out;

481 482 483 484 485 486
	/* If nothing is dirty, don't bother messing with page tables. */
	if (any) {
		kvm_mmu_slot_remove_write_access(kvm, log->slot);
		kvm_flush_remote_tlbs(kvm);
		memset(memslot->dirty_bitmap, 0, n);
	}
A
Avi Kivity 已提交
487 488 489 490

	r = 0;

out:
S
Shaohua Li 已提交
491
	mutex_unlock(&kvm->lock);
A
Avi Kivity 已提交
492 493 494
	return r;
}

495 496 497 498 499 500
int is_error_page(struct page *page)
{
	return page == bad_page;
}
EXPORT_SYMBOL_GPL(is_error_page);

501
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
502 503 504 505 506 507 508 509 510 511 512 513 514 515
{
	int i;
	struct kvm_mem_alias *alias;

	for (i = 0; i < kvm->naliases; ++i) {
		alias = &kvm->aliases[i];
		if (gfn >= alias->base_gfn
		    && gfn < alias->base_gfn + alias->npages)
			return alias->target_gfn + gfn - alias->base_gfn;
	}
	return gfn;
}

static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
A
Avi Kivity 已提交
516 517 518 519 520 521 522 523 524 525
{
	int i;

	for (i = 0; i < kvm->nmemslots; ++i) {
		struct kvm_memory_slot *memslot = &kvm->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return memslot;
	}
A
Al Viro 已提交
526
	return NULL;
A
Avi Kivity 已提交
527
}
528 529 530 531 532 533

struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
	gfn = unalias_gfn(kvm, gfn);
	return __gfn_to_memslot(kvm, gfn);
}
A
Avi Kivity 已提交
534

535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
	int i;

	gfn = unalias_gfn(kvm, gfn);
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *memslot = &kvm->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

551 552 553 554
/*
 * Requires current->mm->mmap_sem to be held
 */
static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
A
Avi Kivity 已提交
555 556
{
	struct kvm_memory_slot *slot;
557 558
	struct page *page[1];
	int npages;
A
Avi Kivity 已提交
559

560 561
	might_sleep();

562 563
	gfn = unalias_gfn(kvm, gfn);
	slot = __gfn_to_memslot(kvm, gfn);
564 565
	if (!slot) {
		get_page(bad_page);
566
		return bad_page;
567
	}
568 569 570 571 572 573 574 575

	npages = get_user_pages(current, current->mm,
				slot->userspace_addr
				+ (gfn - slot->base_gfn) * PAGE_SIZE, 1,
				1, 1, page, NULL);
	if (npages != 1) {
		get_page(bad_page);
		return bad_page;
576
	}
577 578

	return page[0];
A
Avi Kivity 已提交
579
}
580 581 582 583 584 585 586 587 588 589 590 591

struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
	struct page *page;

	down_read(&current->mm->mmap_sem);
	page = __gfn_to_page(kvm, gfn);
	up_read(&current->mm->mmap_sem);

	return page;
}

A
Avi Kivity 已提交
592 593
EXPORT_SYMBOL_GPL(gfn_to_page);

594 595 596 597 598 599 600 601
void kvm_release_page(struct page *page)
{
	if (!PageReserved(page))
		SetPageDirty(page);
	put_page(page);
}
EXPORT_SYMBOL_GPL(kvm_release_page);

602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
static int next_segment(unsigned long len, int offset)
{
	if (len > PAGE_SIZE - offset)
		return PAGE_SIZE - offset;
	else
		return len;
}

int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len)
{
	void *page_virt;
	struct page *page;

	page = gfn_to_page(kvm, gfn);
617 618
	if (is_error_page(page)) {
		kvm_release_page(page);
619
		return -EFAULT;
620
	}
621 622 623 624 625
	page_virt = kmap_atomic(page, KM_USER0);

	memcpy(data, page_virt + offset, len);

	kunmap_atomic(page_virt, KM_USER0);
626
	kvm_release_page(page);
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);

int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);

int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len)
{
	void *page_virt;
	struct page *page;

	page = gfn_to_page(kvm, gfn);
658 659
	if (is_error_page(page)) {
		kvm_release_page(page);
660
		return -EFAULT;
661
	}
662 663 664 665 666 667
	page_virt = kmap_atomic(page, KM_USER0);

	memcpy(page_virt + offset, data, len);

	kunmap_atomic(page_virt, KM_USER0);
	mark_page_dirty(kvm, gfn);
668
	kvm_release_page(page);
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);

int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}

int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
	void *page_virt;
	struct page *page;

	page = gfn_to_page(kvm, gfn);
699 700
	if (is_error_page(page)) {
		kvm_release_page(page);
701
		return -EFAULT;
702
	}
703 704 705 706 707
	page_virt = kmap_atomic(page, KM_USER0);

	memset(page_virt + offset, 0, len);

	kunmap_atomic(page_virt, KM_USER0);
708
	kvm_release_page(page);
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

        while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);

A
Avi Kivity 已提交
732 733
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
734
	struct kvm_memory_slot *memslot;
A
Avi Kivity 已提交
735

736
	gfn = unalias_gfn(kvm, gfn);
R
Rusty Russell 已提交
737 738 739
	memslot = __gfn_to_memslot(kvm, gfn);
	if (memslot && memslot->dirty_bitmap) {
		unsigned long rel_gfn = gfn - memslot->base_gfn;
A
Avi Kivity 已提交
740

R
Rusty Russell 已提交
741 742 743
		/* avoid RMW */
		if (!test_bit(rel_gfn, memslot->dirty_bitmap))
			set_bit(rel_gfn, memslot->dirty_bitmap);
A
Avi Kivity 已提交
744 745 746
	}
}

E
Eddie Dong 已提交
747 748 749
/*
 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
 */
750
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
751
{
E
Eddie Dong 已提交
752 753 754 755 756 757 758
	DECLARE_WAITQUEUE(wait, current);

	add_wait_queue(&vcpu->wq, &wait);

	/*
	 * We will block until either an interrupt or a signal wakes us up
	 */
759 760 761 762
	while (!kvm_cpu_has_interrupt(vcpu)
	       && !signal_pending(current)
	       && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
	       && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
E
Eddie Dong 已提交
763 764 765 766 767
		set_current_state(TASK_INTERRUPTIBLE);
		vcpu_put(vcpu);
		schedule();
		vcpu_load(vcpu);
	}
768

769
	__set_current_state(TASK_RUNNING);
E
Eddie Dong 已提交
770 771 772
	remove_wait_queue(&vcpu->wq, &wait);
}

A
Avi Kivity 已提交
773 774
void kvm_resched(struct kvm_vcpu *vcpu)
{
775 776
	if (!need_resched())
		return;
A
Avi Kivity 已提交
777 778 779 780
	cond_resched();
}
EXPORT_SYMBOL_GPL(kvm_resched);

781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
/*
 * Check if userspace requested an interrupt window, and that the
 * interrupt window is open.
 *
 * No need to exit to userspace if we already have an interrupt queued.
 */
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
					  struct kvm_run *kvm_run)
{
	return (!vcpu->irq_summary &&
		kvm_run->request_interrupt_window &&
		vcpu->interrupt_window_open &&
		(kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
}

static void post_kvm_run_save(struct kvm_vcpu *vcpu,
			      struct kvm_run *kvm_run)
{
	kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
	kvm_run->cr8 = get_cr8(vcpu);
	kvm_run->apic_base = kvm_get_apic_base(vcpu);
	if (irqchip_in_kernel(vcpu->kvm))
		kvm_run->ready_for_interrupt_injection = 1;
	else
		kvm_run->ready_for_interrupt_injection =
					(vcpu->interrupt_window_open &&
					 vcpu->irq_summary == 0);
}

static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
	int r;

	if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
M
Mike Day 已提交
815
		pr_debug("vcpu %d received sipi with vector # %x\n",
816 817
		       vcpu->vcpu_id, vcpu->sipi_vector);
		kvm_lapic_reset(vcpu);
818 819 820
		r = kvm_x86_ops->vcpu_reset(vcpu);
		if (r)
			return r;
821 822 823 824 825 826 827 828 829 830 831 832
		vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
	}

preempted:
	if (vcpu->guest_debug.enabled)
		kvm_x86_ops->guest_debug_pre(vcpu);

again:
	r = kvm_mmu_reload(vcpu);
	if (unlikely(r))
		goto out;

833 834
	kvm_inject_pending_timer_irqs(vcpu);

835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
	preempt_disable();

	kvm_x86_ops->prepare_guest_switch(vcpu);
	kvm_load_guest_fpu(vcpu);

	local_irq_disable();

	if (signal_pending(current)) {
		local_irq_enable();
		preempt_enable();
		r = -EINTR;
		kvm_run->exit_reason = KVM_EXIT_INTR;
		++vcpu->stat.signal_exits;
		goto out;
	}

	if (irqchip_in_kernel(vcpu->kvm))
		kvm_x86_ops->inject_pending_irq(vcpu);
	else if (!vcpu->mmio_read_completed)
		kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);

	vcpu->guest_mode = 1;
857
	kvm_guest_enter();
858 859

	if (vcpu->requests)
860
		if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
861 862 863 864 865 866 867 868 869
			kvm_x86_ops->tlb_flush(vcpu);

	kvm_x86_ops->run(vcpu, kvm_run);

	vcpu->guest_mode = 0;
	local_irq_enable();

	++vcpu->stat.exits;

870 871 872 873 874 875 876 877 878 879
	/*
	 * We must have an instruction between local_irq_enable() and
	 * kvm_guest_exit(), so the timer interrupt isn't delayed by
	 * the interrupt shadow.  The stat.exits increment will do nicely.
	 * But we need to prevent reordering, hence this barrier():
	 */
	barrier();

	kvm_guest_exit();

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
	preempt_enable();

	/*
	 * Profile KVM exit RIPs:
	 */
	if (unlikely(prof_on == KVM_PROFILING)) {
		kvm_x86_ops->cache_regs(vcpu);
		profile_hit(KVM_PROFILING, (void *)vcpu->rip);
	}

	r = kvm_x86_ops->handle_exit(kvm_run, vcpu);

	if (r > 0) {
		if (dm_request_for_irq_injection(vcpu, kvm_run)) {
			r = -EINTR;
			kvm_run->exit_reason = KVM_EXIT_INTR;
			++vcpu->stat.request_irq_exits;
			goto out;
		}
		if (!need_resched()) {
			++vcpu->stat.light_exits;
			goto again;
		}
	}

out:
	if (r > 0) {
		kvm_resched(vcpu);
		goto preempted;
	}

	post_kvm_run_save(vcpu, kvm_run);

	return r;
}


A
Avi Kivity 已提交
917
static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
A
Avi Kivity 已提交
918 919
{
	int r;
A
Avi Kivity 已提交
920
	sigset_t sigsaved;
A
Avi Kivity 已提交
921

A
Avi Kivity 已提交
922
	vcpu_load(vcpu);
A
Avi Kivity 已提交
923

924 925 926 927 928 929
	if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
		kvm_vcpu_block(vcpu);
		vcpu_put(vcpu);
		return -EAGAIN;
	}

A
Avi Kivity 已提交
930 931 932
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

933
	/* re-sync apic's tpr */
934 935
	if (!irqchip_in_kernel(vcpu->kvm))
		set_cr8(vcpu, kvm_run->cr8);
936

937 938 939 940 941
	if (vcpu->pio.cur_count) {
		r = complete_pio(vcpu);
		if (r)
			goto out;
	}
942
#if CONFIG_HAS_IOMEM
943 944 945 946 947
	if (vcpu->mmio_needed) {
		memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
		vcpu->mmio_read_completed = 1;
		vcpu->mmio_needed = 0;
		r = emulate_instruction(vcpu, kvm_run,
948
					vcpu->mmio_fault_cr2, 0, 1);
949 950 951 952 953 954
		if (r == EMULATE_DO_MMIO) {
			/*
			 * Read-modify-write.  Back to userspace.
			 */
			r = 0;
			goto out;
955
		}
A
Avi Kivity 已提交
956
	}
957
#endif
958
	if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
959
		kvm_x86_ops->cache_regs(vcpu);
960
		vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
961
		kvm_x86_ops->decache_regs(vcpu);
962 963
	}

964
	r = __vcpu_run(vcpu, kvm_run);
A
Avi Kivity 已提交
965

966
out:
A
Avi Kivity 已提交
967 968 969
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

A
Avi Kivity 已提交
970 971 972 973
	vcpu_put(vcpu);
	return r;
}

A
Avi Kivity 已提交
974 975
static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
				   struct kvm_regs *regs)
A
Avi Kivity 已提交
976
{
A
Avi Kivity 已提交
977
	vcpu_load(vcpu);
A
Avi Kivity 已提交
978

979
	kvm_x86_ops->cache_regs(vcpu);
A
Avi Kivity 已提交
980 981 982 983 984 985 986 987 988

	regs->rax = vcpu->regs[VCPU_REGS_RAX];
	regs->rbx = vcpu->regs[VCPU_REGS_RBX];
	regs->rcx = vcpu->regs[VCPU_REGS_RCX];
	regs->rdx = vcpu->regs[VCPU_REGS_RDX];
	regs->rsi = vcpu->regs[VCPU_REGS_RSI];
	regs->rdi = vcpu->regs[VCPU_REGS_RDI];
	regs->rsp = vcpu->regs[VCPU_REGS_RSP];
	regs->rbp = vcpu->regs[VCPU_REGS_RBP];
989
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
990 991 992 993 994 995 996 997 998 999 1000
	regs->r8 = vcpu->regs[VCPU_REGS_R8];
	regs->r9 = vcpu->regs[VCPU_REGS_R9];
	regs->r10 = vcpu->regs[VCPU_REGS_R10];
	regs->r11 = vcpu->regs[VCPU_REGS_R11];
	regs->r12 = vcpu->regs[VCPU_REGS_R12];
	regs->r13 = vcpu->regs[VCPU_REGS_R13];
	regs->r14 = vcpu->regs[VCPU_REGS_R14];
	regs->r15 = vcpu->regs[VCPU_REGS_R15];
#endif

	regs->rip = vcpu->rip;
1001
	regs->rflags = kvm_x86_ops->get_rflags(vcpu);
A
Avi Kivity 已提交
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013

	/*
	 * Don't leak debug flags in case they were set for guest debugging
	 */
	if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
		regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);

	vcpu_put(vcpu);

	return 0;
}

A
Avi Kivity 已提交
1014 1015
static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
				   struct kvm_regs *regs)
A
Avi Kivity 已提交
1016
{
A
Avi Kivity 已提交
1017
	vcpu_load(vcpu);
A
Avi Kivity 已提交
1018 1019 1020 1021 1022 1023 1024 1025 1026

	vcpu->regs[VCPU_REGS_RAX] = regs->rax;
	vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
	vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
	vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
	vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
	vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
	vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
	vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
1027
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	vcpu->regs[VCPU_REGS_R8] = regs->r8;
	vcpu->regs[VCPU_REGS_R9] = regs->r9;
	vcpu->regs[VCPU_REGS_R10] = regs->r10;
	vcpu->regs[VCPU_REGS_R11] = regs->r11;
	vcpu->regs[VCPU_REGS_R12] = regs->r12;
	vcpu->regs[VCPU_REGS_R13] = regs->r13;
	vcpu->regs[VCPU_REGS_R14] = regs->r14;
	vcpu->regs[VCPU_REGS_R15] = regs->r15;
#endif

	vcpu->rip = regs->rip;
1039
	kvm_x86_ops->set_rflags(vcpu, regs->rflags);
A
Avi Kivity 已提交
1040

1041
	kvm_x86_ops->decache_regs(vcpu);
A
Avi Kivity 已提交
1042 1043 1044 1045 1046 1047 1048 1049 1050

	vcpu_put(vcpu);

	return 0;
}

static void get_segment(struct kvm_vcpu *vcpu,
			struct kvm_segment *var, int seg)
{
1051
	return kvm_x86_ops->get_segment(vcpu, var, seg);
A
Avi Kivity 已提交
1052 1053
}

A
Avi Kivity 已提交
1054 1055
static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				    struct kvm_sregs *sregs)
A
Avi Kivity 已提交
1056 1057
{
	struct descriptor_table dt;
E
Eddie Dong 已提交
1058
	int pending_vec;
A
Avi Kivity 已提交
1059

A
Avi Kivity 已提交
1060
	vcpu_load(vcpu);
A
Avi Kivity 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071

	get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
	get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
	get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
	get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
	get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
	get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);

	get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
	get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);

1072
	kvm_x86_ops->get_idt(vcpu, &dt);
A
Avi Kivity 已提交
1073 1074
	sregs->idt.limit = dt.limit;
	sregs->idt.base = dt.base;
1075
	kvm_x86_ops->get_gdt(vcpu, &dt);
A
Avi Kivity 已提交
1076 1077 1078
	sregs->gdt.limit = dt.limit;
	sregs->gdt.base = dt.base;

1079
	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
A
Avi Kivity 已提交
1080 1081 1082 1083
	sregs->cr0 = vcpu->cr0;
	sregs->cr2 = vcpu->cr2;
	sregs->cr3 = vcpu->cr3;
	sregs->cr4 = vcpu->cr4;
1084
	sregs->cr8 = get_cr8(vcpu);
A
Avi Kivity 已提交
1085
	sregs->efer = vcpu->shadow_efer;
1086
	sregs->apic_base = kvm_get_apic_base(vcpu);
A
Avi Kivity 已提交
1087

E
Eddie Dong 已提交
1088
	if (irqchip_in_kernel(vcpu->kvm)) {
1089 1090
		memset(sregs->interrupt_bitmap, 0,
		       sizeof sregs->interrupt_bitmap);
1091
		pending_vec = kvm_x86_ops->get_irq(vcpu);
E
Eddie Dong 已提交
1092
		if (pending_vec >= 0)
M
Mike Day 已提交
1093 1094
			set_bit(pending_vec,
				(unsigned long *)sregs->interrupt_bitmap);
E
Eddie Dong 已提交
1095
	} else
1096 1097
		memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
		       sizeof sregs->interrupt_bitmap);
A
Avi Kivity 已提交
1098 1099 1100 1101 1102 1103 1104 1105 1106

	vcpu_put(vcpu);

	return 0;
}

static void set_segment(struct kvm_vcpu *vcpu,
			struct kvm_segment *var, int seg)
{
1107
	return kvm_x86_ops->set_segment(vcpu, var, seg);
A
Avi Kivity 已提交
1108 1109
}

A
Avi Kivity 已提交
1110 1111
static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				    struct kvm_sregs *sregs)
A
Avi Kivity 已提交
1112 1113
{
	int mmu_reset_needed = 0;
E
Eddie Dong 已提交
1114
	int i, pending_vec, max_bits;
A
Avi Kivity 已提交
1115 1116
	struct descriptor_table dt;

A
Avi Kivity 已提交
1117
	vcpu_load(vcpu);
A
Avi Kivity 已提交
1118 1119 1120

	dt.limit = sregs->idt.limit;
	dt.base = sregs->idt.base;
1121
	kvm_x86_ops->set_idt(vcpu, &dt);
A
Avi Kivity 已提交
1122 1123
	dt.limit = sregs->gdt.limit;
	dt.base = sregs->gdt.base;
1124
	kvm_x86_ops->set_gdt(vcpu, &dt);
A
Avi Kivity 已提交
1125 1126 1127 1128 1129

	vcpu->cr2 = sregs->cr2;
	mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
	vcpu->cr3 = sregs->cr3;

1130
	set_cr8(vcpu, sregs->cr8);
A
Avi Kivity 已提交
1131 1132

	mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
1133
#ifdef CONFIG_X86_64
1134
	kvm_x86_ops->set_efer(vcpu, sregs->efer);
A
Avi Kivity 已提交
1135
#endif
1136
	kvm_set_apic_base(vcpu, sregs->apic_base);
A
Avi Kivity 已提交
1137

1138
	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
1139

A
Avi Kivity 已提交
1140
	mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
R
Rusty Russell 已提交
1141
	vcpu->cr0 = sregs->cr0;
1142
	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
A
Avi Kivity 已提交
1143 1144

	mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
1145
	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
1146 1147
	if (!is_long_mode(vcpu) && is_pae(vcpu))
		load_pdptrs(vcpu, vcpu->cr3);
A
Avi Kivity 已提交
1148 1149 1150 1151

	if (mmu_reset_needed)
		kvm_mmu_reset_context(vcpu);

1152 1153 1154 1155 1156 1157 1158
	if (!irqchip_in_kernel(vcpu->kvm)) {
		memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
		       sizeof vcpu->irq_pending);
		vcpu->irq_summary = 0;
		for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
			if (vcpu->irq_pending[i])
				__set_bit(i, &vcpu->irq_summary);
E
Eddie Dong 已提交
1159 1160 1161 1162 1163 1164 1165
	} else {
		max_bits = (sizeof sregs->interrupt_bitmap) << 3;
		pending_vec = find_first_bit(
			(const unsigned long *)sregs->interrupt_bitmap,
			max_bits);
		/* Only pending external irq is handled here */
		if (pending_vec < max_bits) {
1166
			kvm_x86_ops->set_irq(vcpu, pending_vec);
M
Mike Day 已提交
1167 1168
			pr_debug("Set back pending irq %d\n",
				 pending_vec);
E
Eddie Dong 已提交
1169
		}
1170
	}
A
Avi Kivity 已提交
1171

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
	set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
	set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
	set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
	set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
	set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);

	set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
	set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);

A
Avi Kivity 已提交
1182 1183 1184 1185 1186
	vcpu_put(vcpu);

	return 0;
}

1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
	struct kvm_segment cs;

	get_segment(vcpu, &cs, VCPU_SREG_CS);
	*db = cs.db;
	*l = cs.l;
}
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);

A
Avi Kivity 已提交
1197 1198 1199
/*
 * Translate a guest virtual address to a guest physical address.
 */
A
Avi Kivity 已提交
1200 1201
static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				    struct kvm_translation *tr)
A
Avi Kivity 已提交
1202 1203 1204 1205
{
	unsigned long vaddr = tr->linear_address;
	gpa_t gpa;

A
Avi Kivity 已提交
1206
	vcpu_load(vcpu);
S
Shaohua Li 已提交
1207
	mutex_lock(&vcpu->kvm->lock);
A
Avi Kivity 已提交
1208 1209 1210 1211 1212
	gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
	tr->physical_address = gpa;
	tr->valid = gpa != UNMAPPED_GVA;
	tr->writeable = 1;
	tr->usermode = 0;
S
Shaohua Li 已提交
1213
	mutex_unlock(&vcpu->kvm->lock);
A
Avi Kivity 已提交
1214 1215 1216 1217 1218
	vcpu_put(vcpu);

	return 0;
}

A
Avi Kivity 已提交
1219 1220
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
				    struct kvm_interrupt *irq)
A
Avi Kivity 已提交
1221 1222 1223
{
	if (irq->irq < 0 || irq->irq >= 256)
		return -EINVAL;
E
Eddie Dong 已提交
1224 1225
	if (irqchip_in_kernel(vcpu->kvm))
		return -ENXIO;
A
Avi Kivity 已提交
1226
	vcpu_load(vcpu);
A
Avi Kivity 已提交
1227 1228 1229 1230 1231 1232 1233 1234 1235

	set_bit(irq->irq, vcpu->irq_pending);
	set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);

	vcpu_put(vcpu);

	return 0;
}

A
Avi Kivity 已提交
1236 1237
static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
				      struct kvm_debug_guest *dbg)
A
Avi Kivity 已提交
1238 1239 1240
{
	int r;

A
Avi Kivity 已提交
1241
	vcpu_load(vcpu);
A
Avi Kivity 已提交
1242

1243
	r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
A
Avi Kivity 已提交
1244 1245 1246 1247 1248 1249

	vcpu_put(vcpu);

	return r;
}

1250 1251 1252 1253 1254 1255 1256 1257 1258
static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
				    unsigned long address,
				    int *type)
{
	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
	unsigned long pgoff;
	struct page *page;

	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1259 1260 1261 1262 1263
	if (pgoff == 0)
		page = virt_to_page(vcpu->run);
	else if (pgoff == KVM_PIO_PAGE_OFFSET)
		page = virt_to_page(vcpu->pio_data);
	else
1264 1265
		return NOPAGE_SIGBUS;
	get_page(page);
1266 1267 1268
	if (type != NULL)
		*type = VM_FAULT_MINOR;

1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
	return page;
}

static struct vm_operations_struct kvm_vcpu_vm_ops = {
	.nopage = kvm_vcpu_nopage,
};

static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vcpu_vm_ops;
	return 0;
}

A
Avi Kivity 已提交
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
	struct kvm_vcpu *vcpu = filp->private_data;

	fput(vcpu->kvm->filp);
	return 0;
}

static struct file_operations kvm_vcpu_fops = {
	.release        = kvm_vcpu_release,
	.unlocked_ioctl = kvm_vcpu_ioctl,
	.compat_ioctl   = kvm_vcpu_ioctl,
1294
	.mmap           = kvm_vcpu_mmap,
A
Avi Kivity 已提交
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
};

/*
 * Allocates an inode for the vcpu.
 */
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
	int fd, r;
	struct inode *inode;
	struct file *file;

1306 1307 1308 1309
	r = anon_inode_getfd(&fd, &inode, &file,
			     "kvm-vcpu", &kvm_vcpu_fops, vcpu);
	if (r)
		return r;
A
Avi Kivity 已提交
1310 1311 1312 1313
	atomic_inc(&vcpu->kvm->filp->f_count);
	return fd;
}

1314 1315 1316 1317 1318 1319 1320 1321 1322
/*
 * Creates some virtual cpus.  Good luck creating more than one.
 */
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
{
	int r;
	struct kvm_vcpu *vcpu;

	if (!valid_vcpu(n))
R
Rusty Russell 已提交
1323
		return -EINVAL;
1324

1325
	vcpu = kvm_x86_ops->vcpu_create(kvm, n);
R
Rusty Russell 已提交
1326 1327
	if (IS_ERR(vcpu))
		return PTR_ERR(vcpu);
1328

1329 1330
	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);

1331 1332 1333
	/* We do fxsave: this must be aligned. */
	BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);

R
Rusty Russell 已提交
1334
	vcpu_load(vcpu);
1335 1336 1337
	r = kvm_x86_ops->vcpu_reset(vcpu);
	if (r == 0)
		r = kvm_mmu_setup(vcpu);
1338 1339
	vcpu_put(vcpu);
	if (r < 0)
R
Rusty Russell 已提交
1340 1341
		goto free_vcpu;

S
Shaohua Li 已提交
1342
	mutex_lock(&kvm->lock);
R
Rusty Russell 已提交
1343 1344
	if (kvm->vcpus[n]) {
		r = -EEXIST;
S
Shaohua Li 已提交
1345
		mutex_unlock(&kvm->lock);
R
Rusty Russell 已提交
1346 1347 1348
		goto mmu_unload;
	}
	kvm->vcpus[n] = vcpu;
S
Shaohua Li 已提交
1349
	mutex_unlock(&kvm->lock);
1350

R
Rusty Russell 已提交
1351
	/* Now it's all set up, let userspace reach it */
A
Avi Kivity 已提交
1352 1353
	r = create_vcpu_fd(vcpu);
	if (r < 0)
R
Rusty Russell 已提交
1354 1355
		goto unlink;
	return r;
1356

R
Rusty Russell 已提交
1357
unlink:
S
Shaohua Li 已提交
1358
	mutex_lock(&kvm->lock);
R
Rusty Russell 已提交
1359
	kvm->vcpus[n] = NULL;
S
Shaohua Li 已提交
1360
	mutex_unlock(&kvm->lock);
1361

R
Rusty Russell 已提交
1362 1363 1364 1365
mmu_unload:
	vcpu_load(vcpu);
	kvm_mmu_unload(vcpu);
	vcpu_put(vcpu);
1366

R
Rusty Russell 已提交
1367
free_vcpu:
1368
	kvm_x86_ops->vcpu_free(vcpu);
1369 1370 1371
	return r;
}

A
Avi Kivity 已提交
1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
	if (sigset) {
		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
		vcpu->sigset_active = 1;
		vcpu->sigset = *sigset;
	} else
		vcpu->sigset_active = 0;
	return 0;
}

A
Avi Kivity 已提交
1383 1384
static long kvm_vcpu_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
A
Avi Kivity 已提交
1385
{
A
Avi Kivity 已提交
1386
	struct kvm_vcpu *vcpu = filp->private_data;
A
Al Viro 已提交
1387
	void __user *argp = (void __user *)arg;
1388
	int r;
A
Avi Kivity 已提交
1389 1390

	switch (ioctl) {
1391
	case KVM_RUN:
1392 1393 1394
		r = -EINVAL;
		if (arg)
			goto out;
1395
		r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
A
Avi Kivity 已提交
1396 1397 1398 1399
		break;
	case KVM_GET_REGS: {
		struct kvm_regs kvm_regs;

A
Avi Kivity 已提交
1400 1401
		memset(&kvm_regs, 0, sizeof kvm_regs);
		r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
A
Avi Kivity 已提交
1402 1403 1404
		if (r)
			goto out;
		r = -EFAULT;
A
Al Viro 已提交
1405
		if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
A
Avi Kivity 已提交
1406 1407 1408 1409 1410 1411 1412 1413
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_REGS: {
		struct kvm_regs kvm_regs;

		r = -EFAULT;
A
Al Viro 已提交
1414
		if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
A
Avi Kivity 已提交
1415
			goto out;
A
Avi Kivity 已提交
1416
		r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
A
Avi Kivity 已提交
1417 1418 1419 1420 1421 1422 1423 1424
		if (r)
			goto out;
		r = 0;
		break;
	}
	case KVM_GET_SREGS: {
		struct kvm_sregs kvm_sregs;

A
Avi Kivity 已提交
1425 1426
		memset(&kvm_sregs, 0, sizeof kvm_sregs);
		r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
A
Avi Kivity 已提交
1427 1428 1429
		if (r)
			goto out;
		r = -EFAULT;
A
Al Viro 已提交
1430
		if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
A
Avi Kivity 已提交
1431 1432 1433 1434 1435 1436 1437 1438
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_SREGS: {
		struct kvm_sregs kvm_sregs;

		r = -EFAULT;
A
Al Viro 已提交
1439
		if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
A
Avi Kivity 已提交
1440
			goto out;
A
Avi Kivity 已提交
1441
		r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
A
Avi Kivity 已提交
1442 1443 1444 1445 1446 1447 1448 1449 1450
		if (r)
			goto out;
		r = 0;
		break;
	}
	case KVM_TRANSLATE: {
		struct kvm_translation tr;

		r = -EFAULT;
A
Al Viro 已提交
1451
		if (copy_from_user(&tr, argp, sizeof tr))
A
Avi Kivity 已提交
1452
			goto out;
A
Avi Kivity 已提交
1453
		r = kvm_vcpu_ioctl_translate(vcpu, &tr);
A
Avi Kivity 已提交
1454 1455 1456
		if (r)
			goto out;
		r = -EFAULT;
A
Al Viro 已提交
1457
		if (copy_to_user(argp, &tr, sizeof tr))
A
Avi Kivity 已提交
1458 1459 1460 1461 1462 1463 1464 1465
			goto out;
		r = 0;
		break;
	}
	case KVM_INTERRUPT: {
		struct kvm_interrupt irq;

		r = -EFAULT;
A
Al Viro 已提交
1466
		if (copy_from_user(&irq, argp, sizeof irq))
A
Avi Kivity 已提交
1467
			goto out;
A
Avi Kivity 已提交
1468
		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
A
Avi Kivity 已提交
1469 1470 1471 1472 1473 1474 1475 1476 1477
		if (r)
			goto out;
		r = 0;
		break;
	}
	case KVM_DEBUG_GUEST: {
		struct kvm_debug_guest dbg;

		r = -EFAULT;
A
Al Viro 已提交
1478
		if (copy_from_user(&dbg, argp, sizeof dbg))
A
Avi Kivity 已提交
1479
			goto out;
A
Avi Kivity 已提交
1480
		r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
A
Avi Kivity 已提交
1481 1482 1483 1484 1485
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		sigset_t sigset, *p;

		p = NULL;
		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
					   sizeof kvm_sigmask))
				goto out;
			r = -EINVAL;
			if (kvm_sigmask.len != sizeof sigset)
				goto out;
			r = -EFAULT;
			if (copy_from_user(&sigset, sigmask_arg->sigset,
					   sizeof sigset))
				goto out;
			p = &sigset;
		}
		r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
		break;
	}
A
Avi Kivity 已提交
1509 1510 1511 1512
	case KVM_GET_FPU: {
		struct kvm_fpu fpu;

		memset(&fpu, 0, sizeof fpu);
1513
		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
A
Avi Kivity 已提交
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &fpu, sizeof fpu))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_FPU: {
		struct kvm_fpu fpu;

		r = -EFAULT;
		if (copy_from_user(&fpu, argp, sizeof fpu))
			goto out;
1528
		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
A
Avi Kivity 已提交
1529 1530 1531 1532 1533
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1534
	default:
1535
		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545
	}
out:
	return r;
}

static long kvm_vm_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
1546
	int r;
A
Avi Kivity 已提交
1547 1548 1549 1550 1551 1552 1553

	switch (ioctl) {
	case KVM_CREATE_VCPU:
		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
		if (r < 0)
			goto out;
		break;
1554 1555 1556 1557 1558 1559 1560 1561 1562
	case KVM_SET_USER_MEMORY_REGION: {
		struct kvm_userspace_memory_region kvm_userspace_mem;

		r = -EFAULT;
		if (copy_from_user(&kvm_userspace_mem, argp,
						sizeof kvm_userspace_mem))
			goto out;

		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
A
Avi Kivity 已提交
1563 1564 1565 1566 1567 1568 1569 1570
		if (r)
			goto out;
		break;
	}
	case KVM_GET_DIRTY_LOG: {
		struct kvm_dirty_log log;

		r = -EFAULT;
A
Al Viro 已提交
1571
		if (copy_from_user(&log, argp, sizeof log))
A
Avi Kivity 已提交
1572
			goto out;
1573
		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
A
Avi Kivity 已提交
1574 1575 1576 1577
		if (r)
			goto out;
		break;
	}
1578
	default:
1579
		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
	}
out:
	return r;
}

static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
				  unsigned long address,
				  int *type)
{
	struct kvm *kvm = vma->vm_file->private_data;
	unsigned long pgoff;
	struct page *page;

	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1594 1595
	if (!kvm_is_visible_gfn(kvm, pgoff))
		return NOPAGE_SIGBUS;
1596 1597
	/* current->mm->mmap_sem is already held so call lockless version */
	page = __gfn_to_page(kvm, pgoff);
1598 1599
	if (is_error_page(page)) {
		kvm_release_page(page);
1600
		return NOPAGE_SIGBUS;
1601
	}
1602 1603 1604
	if (type != NULL)
		*type = VM_FAULT_MINOR;

1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632
	return page;
}

static struct vm_operations_struct kvm_vm_vm_ops = {
	.nopage = kvm_vm_nopage,
};

static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vm_vm_ops;
	return 0;
}

static struct file_operations kvm_vm_fops = {
	.release        = kvm_vm_release,
	.unlocked_ioctl = kvm_vm_ioctl,
	.compat_ioctl   = kvm_vm_ioctl,
	.mmap           = kvm_vm_mmap,
};

static int kvm_dev_ioctl_create_vm(void)
{
	int fd, r;
	struct inode *inode;
	struct file *file;
	struct kvm *kvm;

	kvm = kvm_create_vm();
1633 1634 1635 1636 1637 1638
	if (IS_ERR(kvm))
		return PTR_ERR(kvm);
	r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
	if (r) {
		kvm_destroy_vm(kvm);
		return r;
1639 1640
	}

A
Avi Kivity 已提交
1641
	kvm->filp = file;
1642 1643 1644 1645 1646 1647 1648 1649

	return fd;
}

static long kvm_dev_ioctl(struct file *filp,
			  unsigned int ioctl, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
1650
	long r = -EINVAL;
1651 1652 1653

	switch (ioctl) {
	case KVM_GET_API_VERSION:
1654 1655 1656
		r = -EINVAL;
		if (arg)
			goto out;
1657 1658 1659
		r = KVM_API_VERSION;
		break;
	case KVM_CREATE_VM:
1660 1661 1662
		r = -EINVAL;
		if (arg)
			goto out;
1663 1664
		r = kvm_dev_ioctl_create_vm();
		break;
1665 1666 1667 1668 1669
	case KVM_CHECK_EXTENSION: {
		int ext = (long)argp;

		switch (ext) {
		case KVM_CAP_IRQCHIP:
E
Eddie Dong 已提交
1670
		case KVM_CAP_HLT:
1671
		case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
1672
		case KVM_CAP_USER_MEMORY:
1673
		case KVM_CAP_SET_TSS_ADDR:
1674 1675 1676 1677 1678 1679
			r = 1;
			break;
		default:
			r = 0;
			break;
		}
1680
		break;
1681
	}
1682 1683 1684 1685
	case KVM_GET_VCPU_MMAP_SIZE:
		r = -EINVAL;
		if (arg)
			goto out;
1686
		r = 2 * PAGE_SIZE;
1687
		break;
A
Avi Kivity 已提交
1688
	default:
1689
		return kvm_arch_dev_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
	}
out:
	return r;
}

static struct file_operations kvm_chardev_ops = {
	.unlocked_ioctl = kvm_dev_ioctl,
	.compat_ioctl   = kvm_dev_ioctl,
};

static struct miscdevice kvm_dev = {
A
Avi Kivity 已提交
1701
	KVM_MINOR,
A
Avi Kivity 已提交
1702 1703 1704 1705
	"kvm",
	&kvm_chardev_ops,
};

A
Avi Kivity 已提交
1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
/*
 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
 * cached on it.
 */
static void decache_vcpus_on_cpu(int cpu)
{
	struct kvm *vm;
	struct kvm_vcpu *vcpu;
	int i;

	spin_lock(&kvm_lock);
S
Shaohua Li 已提交
1717
	list_for_each_entry(vm, &vm_list, vm_list)
A
Avi Kivity 已提交
1718
		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
R
Rusty Russell 已提交
1719 1720 1721
			vcpu = vm->vcpus[i];
			if (!vcpu)
				continue;
A
Avi Kivity 已提交
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
			/*
			 * If the vcpu is locked, then it is running on some
			 * other cpu and therefore it is not cached on the
			 * cpu in question.
			 *
			 * If it's not locked, check the last cpu it executed
			 * on.
			 */
			if (mutex_trylock(&vcpu->mutex)) {
				if (vcpu->cpu == cpu) {
1732
					kvm_x86_ops->vcpu_decache(vcpu);
A
Avi Kivity 已提交
1733 1734 1735 1736 1737 1738 1739 1740
					vcpu->cpu = -1;
				}
				mutex_unlock(&vcpu->mutex);
			}
		}
	spin_unlock(&kvm_lock);
}

1741 1742 1743 1744 1745 1746 1747
static void hardware_enable(void *junk)
{
	int cpu = raw_smp_processor_id();

	if (cpu_isset(cpu, cpus_hardware_enabled))
		return;
	cpu_set(cpu, cpus_hardware_enabled);
1748
	kvm_x86_ops->hardware_enable(NULL);
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
}

static void hardware_disable(void *junk)
{
	int cpu = raw_smp_processor_id();

	if (!cpu_isset(cpu, cpus_hardware_enabled))
		return;
	cpu_clear(cpu, cpus_hardware_enabled);
	decache_vcpus_on_cpu(cpu);
1759
	kvm_x86_ops->hardware_disable(NULL);
1760 1761
}

A
Avi Kivity 已提交
1762 1763 1764 1765 1766 1767
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
			   void *v)
{
	int cpu = (long)v;

	switch (val) {
1768 1769
	case CPU_DYING:
	case CPU_DYING_FROZEN:
1770 1771 1772 1773
		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
		       cpu);
		hardware_disable(NULL);
		break;
A
Avi Kivity 已提交
1774
	case CPU_UP_CANCELED:
1775
	case CPU_UP_CANCELED_FROZEN:
1776 1777
		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
		       cpu);
1778
		smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
A
Avi Kivity 已提交
1779
		break;
1780
	case CPU_ONLINE:
1781
	case CPU_ONLINE_FROZEN:
1782 1783
		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
		       cpu);
1784
		smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
A
Avi Kivity 已提交
1785 1786 1787 1788 1789
		break;
	}
	return NOTIFY_OK;
}

1790
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
M
Mike Day 已提交
1791
		      void *v)
1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
{
	if (val == SYS_RESTART) {
		/*
		 * Some (well, at least mine) BIOSes hang on reboot if
		 * in vmx root mode.
		 */
		printk(KERN_INFO "kvm: exiting hardware virtualization\n");
		on_each_cpu(hardware_disable, NULL, 0, 1);
	}
	return NOTIFY_OK;
}

static struct notifier_block kvm_reboot_notifier = {
	.notifier_call = kvm_reboot,
	.priority = 0,
};

1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
void kvm_io_bus_init(struct kvm_io_bus *bus)
{
	memset(bus, 0, sizeof(*bus));
}

void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
		struct kvm_io_device *pos = bus->devs[i];

		kvm_iodevice_destructor(pos);
	}
}

struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
		struct kvm_io_device *pos = bus->devs[i];

		if (pos->in_range(pos, addr))
			return pos;
	}

	return NULL;
}

void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
{
	BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));

	bus->devs[bus->dev_count++] = dev;
}

A
Avi Kivity 已提交
1846 1847 1848 1849 1850
static struct notifier_block kvm_cpu_notifier = {
	.notifier_call = kvm_cpu_hotplug,
	.priority = 20, /* must be > scheduler priority */
};

A
Avi Kivity 已提交
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
static u64 stat_get(void *_offset)
{
	unsigned offset = (long)_offset;
	u64 total = 0;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;

	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
R
Rusty Russell 已提交
1862 1863 1864
			vcpu = kvm->vcpus[i];
			if (vcpu)
				total += *(u32 *)((void *)vcpu + offset);
A
Avi Kivity 已提交
1865 1866 1867 1868 1869
		}
	spin_unlock(&kvm_lock);
	return total;
}

R
Rusty Russell 已提交
1870
DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
A
Avi Kivity 已提交
1871

A
Avi Kivity 已提交
1872 1873 1874 1875
static __init void kvm_init_debug(void)
{
	struct kvm_stats_debugfs_item *p;

A
Al Viro 已提交
1876
	debugfs_dir = debugfs_create_dir("kvm", NULL);
A
Avi Kivity 已提交
1877
	for (p = debugfs_entries; p->name; ++p)
A
Avi Kivity 已提交
1878 1879 1880
		p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
						(void *)(long)p->offset,
						&stat_fops);
A
Avi Kivity 已提交
1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
}

static void kvm_exit_debug(void)
{
	struct kvm_stats_debugfs_item *p;

	for (p = debugfs_entries; p->name; ++p)
		debugfs_remove(p->dentry);
	debugfs_remove(debugfs_dir);
}

1892 1893
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
A
Avi Kivity 已提交
1894
	hardware_disable(NULL);
1895 1896 1897 1898 1899
	return 0;
}

static int kvm_resume(struct sys_device *dev)
{
A
Avi Kivity 已提交
1900
	hardware_enable(NULL);
1901 1902 1903 1904
	return 0;
}

static struct sysdev_class kvm_sysdev_class = {
1905
	.name = "kvm",
1906 1907 1908 1909 1910 1911 1912 1913 1914
	.suspend = kvm_suspend,
	.resume = kvm_resume,
};

static struct sys_device kvm_sysdev = {
	.id = 0,
	.cls = &kvm_sysdev_class,
};

1915
struct page *bad_page;
A
Avi Kivity 已提交
1916

1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
	return container_of(pn, struct kvm_vcpu, preempt_notifier);
}

static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

1927
	kvm_x86_ops->vcpu_load(vcpu, cpu);
1928 1929 1930 1931 1932 1933 1934
}

static void kvm_sched_out(struct preempt_notifier *pn,
			  struct task_struct *next)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

1935
	kvm_x86_ops->vcpu_put(vcpu);
1936 1937
}

1938
int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
1939
		  struct module *module)
A
Avi Kivity 已提交
1940 1941
{
	int r;
Y
Yang, Sheng 已提交
1942
	int cpu;
A
Avi Kivity 已提交
1943

1944
	if (kvm_x86_ops) {
1945 1946 1947 1948
		printk(KERN_ERR "kvm: already loaded the other module\n");
		return -EEXIST;
	}

1949
	if (!ops->cpu_has_kvm_support()) {
A
Avi Kivity 已提交
1950 1951 1952
		printk(KERN_ERR "kvm: no hardware support\n");
		return -EOPNOTSUPP;
	}
1953
	if (ops->disabled_by_bios()) {
A
Avi Kivity 已提交
1954 1955 1956 1957
		printk(KERN_ERR "kvm: disabled by bios\n");
		return -EOPNOTSUPP;
	}

1958
	kvm_x86_ops = ops;
1959

1960
	r = kvm_x86_ops->hardware_setup();
A
Avi Kivity 已提交
1961
	if (r < 0)
1962
		goto out;
A
Avi Kivity 已提交
1963

Y
Yang, Sheng 已提交
1964 1965
	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu,
1966
				kvm_x86_ops->check_processor_compatibility,
Y
Yang, Sheng 已提交
1967 1968 1969 1970 1971
				&r, 0, 1);
		if (r < 0)
			goto out_free_0;
	}

1972
	on_each_cpu(hardware_enable, NULL, 0, 1);
A
Avi Kivity 已提交
1973 1974 1975
	r = register_cpu_notifier(&kvm_cpu_notifier);
	if (r)
		goto out_free_1;
A
Avi Kivity 已提交
1976 1977
	register_reboot_notifier(&kvm_reboot_notifier);

1978 1979 1980 1981 1982 1983 1984 1985
	r = sysdev_class_register(&kvm_sysdev_class);
	if (r)
		goto out_free_2;

	r = sysdev_register(&kvm_sysdev);
	if (r)
		goto out_free_3;

1986 1987 1988 1989 1990 1991 1992 1993
	/* A kmem cache lets us meet the alignment requirements of fx_save. */
	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
					   __alignof__(struct kvm_vcpu), 0, 0);
	if (!kvm_vcpu_cache) {
		r = -ENOMEM;
		goto out_free_4;
	}

A
Avi Kivity 已提交
1994 1995 1996 1997
	kvm_chardev_ops.owner = module;

	r = misc_register(&kvm_dev);
	if (r) {
M
Mike Day 已提交
1998
		printk(KERN_ERR "kvm: misc device register failed\n");
A
Avi Kivity 已提交
1999 2000 2001
		goto out_free;
	}

2002 2003 2004
	kvm_preempt_ops.sched_in = kvm_sched_in;
	kvm_preempt_ops.sched_out = kvm_sched_out;

2005 2006 2007
	kvm_mmu_set_nonpresent_ptes(0ull, 0ull);

	return 0;
A
Avi Kivity 已提交
2008 2009

out_free:
2010 2011
	kmem_cache_destroy(kvm_vcpu_cache);
out_free_4:
2012 2013 2014 2015
	sysdev_unregister(&kvm_sysdev);
out_free_3:
	sysdev_class_unregister(&kvm_sysdev_class);
out_free_2:
A
Avi Kivity 已提交
2016
	unregister_reboot_notifier(&kvm_reboot_notifier);
A
Avi Kivity 已提交
2017 2018
	unregister_cpu_notifier(&kvm_cpu_notifier);
out_free_1:
2019
	on_each_cpu(hardware_disable, NULL, 0, 1);
Y
Yang, Sheng 已提交
2020
out_free_0:
2021
	kvm_x86_ops->hardware_unsetup();
2022
out:
2023
	kvm_x86_ops = NULL;
A
Avi Kivity 已提交
2024 2025
	return r;
}
M
Mike Day 已提交
2026
EXPORT_SYMBOL_GPL(kvm_init_x86);
A
Avi Kivity 已提交
2027

2028
void kvm_exit_x86(void)
A
Avi Kivity 已提交
2029 2030
{
	misc_deregister(&kvm_dev);
2031
	kmem_cache_destroy(kvm_vcpu_cache);
2032 2033
	sysdev_unregister(&kvm_sysdev);
	sysdev_class_unregister(&kvm_sysdev_class);
A
Avi Kivity 已提交
2034
	unregister_reboot_notifier(&kvm_reboot_notifier);
2035
	unregister_cpu_notifier(&kvm_cpu_notifier);
2036
	on_each_cpu(hardware_disable, NULL, 0, 1);
2037 2038
	kvm_x86_ops->hardware_unsetup();
	kvm_x86_ops = NULL;
A
Avi Kivity 已提交
2039
}
M
Mike Day 已提交
2040
EXPORT_SYMBOL_GPL(kvm_exit_x86);
A
Avi Kivity 已提交
2041 2042 2043

static __init int kvm_init(void)
{
2044 2045
	int r;

2046 2047 2048 2049
	r = kvm_mmu_module_init();
	if (r)
		goto out4;

A
Avi Kivity 已提交
2050 2051
	kvm_init_debug();

2052
	kvm_arch_init();
2053

2054
	bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
M
Mike Day 已提交
2055 2056

	if (bad_page == NULL) {
A
Avi Kivity 已提交
2057 2058 2059 2060
		r = -ENOMEM;
		goto out;
	}

2061
	return 0;
A
Avi Kivity 已提交
2062 2063 2064

out:
	kvm_exit_debug();
2065 2066
	kvm_mmu_module_exit();
out4:
A
Avi Kivity 已提交
2067 2068 2069 2070 2071 2072
	return r;
}

static __exit void kvm_exit(void)
{
	kvm_exit_debug();
2073
	__free_page(bad_page);
2074
	kvm_mmu_module_exit();
A
Avi Kivity 已提交
2075 2076 2077 2078
}

module_init(kvm_init)
module_exit(kvm_exit)