kvm_main.c 46.3 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

18
#include "iodev.h"
A
Avi Kivity 已提交
19

20
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
21 22 23 24 25 26 27 28 29 30 31 32
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
33
#include <linux/sysdev.h>
A
Avi Kivity 已提交
34
#include <linux/cpu.h>
A
Alexey Dobriyan 已提交
35
#include <linux/sched.h>
36 37
#include <linux/cpumask.h>
#include <linux/smp.h>
38
#include <linux/anon_inodes.h>
39
#include <linux/profile.h>
40
#include <linux/kvm_para.h>
41
#include <linux/pagemap.h>
42
#include <linux/mman.h>
43
#include <linux/swap.h>
44
#include <linux/bitops.h>
45
#include <linux/spinlock.h>
46
#include <linux/compat.h>
A
Avi Kivity 已提交
47

A
Avi Kivity 已提交
48 49 50
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
51
#include <asm/pgtable.h>
A
Avi Kivity 已提交
52

53 54 55 56
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
#endif

57 58 59
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>

A
Avi Kivity 已提交
60 61 62
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

63 64 65
/*
 * Ordering of locks:
 *
66
 * 		kvm->slots_lock --> kvm->lock --> kvm->irq_lock
67 68
 */

69 70
DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
71

72
static cpumask_var_t cpus_hardware_enabled;
73 74
static int kvm_usage_count = 0;
static atomic_t hardware_enable_failed;
75

76 77
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
A
Avi Kivity 已提交
78

79 80
static __read_mostly struct preempt_ops kvm_preempt_ops;

81
struct dentry *kvm_debugfs_dir;
A
Avi Kivity 已提交
82

A
Avi Kivity 已提交
83 84
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);
85 86
static int hardware_enable_all(void);
static void hardware_disable_all(void);
A
Avi Kivity 已提交
87

H
Hannes Eder 已提交
88
static bool kvm_rebooting;
89

90 91
static bool largepages_enabled = true;

92
inline int kvm_is_mmio_pfn(pfn_t pfn)
B
Ben-Ami Yassour 已提交
93
{
94 95 96 97
	if (pfn_valid(pfn)) {
		struct page *page = compound_head(pfn_to_page(pfn));
		return PageReserved(page);
	}
B
Ben-Ami Yassour 已提交
98 99 100 101

	return true;
}

A
Avi Kivity 已提交
102 103 104
/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
105
void vcpu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
106
{
107 108
	int cpu;

A
Avi Kivity 已提交
109
	mutex_lock(&vcpu->mutex);
110 111
	cpu = get_cpu();
	preempt_notifier_register(&vcpu->preempt_notifier);
112
	kvm_arch_vcpu_load(vcpu, cpu);
113
	put_cpu();
A
Avi Kivity 已提交
114 115
}

116
void vcpu_put(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
117
{
118
	preempt_disable();
119
	kvm_arch_vcpu_put(vcpu);
120 121
	preempt_notifier_unregister(&vcpu->preempt_notifier);
	preempt_enable();
A
Avi Kivity 已提交
122 123 124
	mutex_unlock(&vcpu->mutex);
}

125 126 127 128
static void ack_flush(void *_completed)
{
}

129
static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
130
{
131
	int i, cpu, me;
132 133
	cpumask_var_t cpus;
	bool called = true;
134 135
	struct kvm_vcpu *vcpu;

136
	zalloc_cpumask_var(&cpus, GFP_ATOMIC);
137

138
	spin_lock(&kvm->requests_lock);
139
	me = smp_processor_id();
140
	kvm_for_each_vcpu(i, vcpu, kvm) {
141
		if (test_and_set_bit(req, &vcpu->requests))
142 143
			continue;
		cpu = vcpu->cpu;
144 145
		if (cpus != NULL && cpu != -1 && cpu != me)
			cpumask_set_cpu(cpu, cpus);
146
	}
147 148 149 150 151 152
	if (unlikely(cpus == NULL))
		smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
	else if (!cpumask_empty(cpus))
		smp_call_function_many(cpus, ack_flush, NULL, 1);
	else
		called = false;
153
	spin_unlock(&kvm->requests_lock);
154
	free_cpumask_var(cpus);
155
	return called;
156 157
}

158
void kvm_flush_remote_tlbs(struct kvm *kvm)
159
{
160 161
	if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
		++kvm->stat.remote_tlb_flush;
162 163
}

164 165 166 167
void kvm_reload_remote_mmus(struct kvm *kvm)
{
	make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
}
168

R
Rusty Russell 已提交
169 170 171 172 173 174 175 176 177
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
	struct page *page;
	int r;

	mutex_init(&vcpu->mutex);
	vcpu->cpu = -1;
	vcpu->kvm = kvm;
	vcpu->vcpu_id = id;
E
Eddie Dong 已提交
178
	init_waitqueue_head(&vcpu->wq);
R
Rusty Russell 已提交
179 180 181 182 183 184 185 186

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
	vcpu->run = page_address(page);

187
	r = kvm_arch_vcpu_init(vcpu);
R
Rusty Russell 已提交
188
	if (r < 0)
189
		goto fail_free_run;
R
Rusty Russell 已提交
190 191 192 193 194
	return 0;

fail_free_run:
	free_page((unsigned long)vcpu->run);
fail:
195
	return r;
R
Rusty Russell 已提交
196 197 198 199 200
}
EXPORT_SYMBOL_GPL(kvm_vcpu_init);

void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
201
	kvm_arch_vcpu_uninit(vcpu);
R
Rusty Russell 已提交
202 203 204 205
	free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
	return container_of(mn, struct kvm, mmu_notifier);
}

static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int need_tlb_flush;

	/*
	 * When ->invalidate_page runs, the linux pte has been zapped
	 * already but the page is still allocated until
	 * ->invalidate_page returns. So if we increase the sequence
	 * here the kvm page fault will notice if the spte can't be
	 * established because the page is going to be freed. If
	 * instead the kvm page fault establishes the spte before
	 * ->invalidate_page runs, kvm_unmap_hva will release it
	 * before returning.
	 *
	 * The sequence increase only need to be seen at spin_unlock
	 * time, and not at spin_lock time.
	 *
	 * Increasing the sequence after the spin_unlock would be
	 * unsafe because the kvm page fault could then establish the
	 * pte after kvm_unmap_hva returned, without noticing the page
	 * is going to be freed.
	 */
	spin_lock(&kvm->mmu_lock);
	kvm->mmu_notifier_seq++;
	need_tlb_flush = kvm_unmap_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);

	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);

}

248 249 250 251 252 253 254 255 256 257 258 259 260
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
					struct mm_struct *mm,
					unsigned long address,
					pte_t pte)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);

	spin_lock(&kvm->mmu_lock);
	kvm->mmu_notifier_seq++;
	kvm_set_spte_hva(kvm, address, pte);
	spin_unlock(&kvm->mmu_lock);
}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
						    struct mm_struct *mm,
						    unsigned long start,
						    unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int need_tlb_flush = 0;

	spin_lock(&kvm->mmu_lock);
	/*
	 * The count increase must become visible at unlock time as no
	 * spte can be established without taking the mmu_lock and
	 * count is also read inside the mmu_lock critical section.
	 */
	kvm->mmu_notifier_count++;
	for (; start < end; start += PAGE_SIZE)
		need_tlb_flush |= kvm_unmap_hva(kvm, start);
	spin_unlock(&kvm->mmu_lock);

	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);
}

static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
						  struct mm_struct *mm,
						  unsigned long start,
						  unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);

	spin_lock(&kvm->mmu_lock);
	/*
	 * This sequence increase will notify the kvm page fault that
	 * the page that is going to be mapped in the spte could have
	 * been freed.
	 */
	kvm->mmu_notifier_seq++;
	/*
	 * The above sequence increase must be visible before the
	 * below count decrease but both values are read by the kvm
	 * page fault under mmu_lock spinlock so we don't need to add
	 * a smb_wmb() here in between the two.
	 */
	kvm->mmu_notifier_count--;
	spin_unlock(&kvm->mmu_lock);

	BUG_ON(kvm->mmu_notifier_count < 0);
}

static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
					      struct mm_struct *mm,
					      unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int young;

	spin_lock(&kvm->mmu_lock);
	young = kvm_age_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);

	if (young)
		kvm_flush_remote_tlbs(kvm);

	return young;
}

328 329 330 331 332 333 334
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
				     struct mm_struct *mm)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	kvm_arch_flush_shadow(kvm);
}

335 336 337 338 339
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
	.invalidate_page	= kvm_mmu_notifier_invalidate_page,
	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
340
	.change_pte		= kvm_mmu_notifier_change_pte,
341
	.release		= kvm_mmu_notifier_release,
342 343 344
};
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

345
static struct kvm *kvm_create_vm(void)
A
Avi Kivity 已提交
346
{
347
	int r = 0;
348
	struct kvm *kvm = kvm_arch_create_vm();
349 350 351
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	struct page *page;
#endif
A
Avi Kivity 已提交
352

353 354
	if (IS_ERR(kvm))
		goto out;
355 356 357 358 359

	r = hardware_enable_all();
	if (r)
		goto out_err_nodisable;

360 361
#ifdef CONFIG_HAVE_KVM_IRQCHIP
	INIT_HLIST_HEAD(&kvm->mask_notifier_list);
362
	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
363
#endif
A
Avi Kivity 已提交
364

365 366 367
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
368 369
		r = -ENOMEM;
		goto out_err;
370 371 372 373 374
	}
	kvm->coalesced_mmio_ring =
			(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif

375 376 377
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	{
		kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
378 379
		r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
		if (r) {
380 381 382
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
			put_page(page);
#endif
383
			goto out_err;
384 385 386 387
		}
	}
#endif

388 389
	kvm->mm = current->mm;
	atomic_inc(&kvm->mm->mm_count);
390
	spin_lock_init(&kvm->mmu_lock);
391
	spin_lock_init(&kvm->requests_lock);
392
	kvm_io_bus_init(&kvm->pio_bus);
G
Gregory Haskins 已提交
393
	kvm_eventfd_init(kvm);
S
Shaohua Li 已提交
394
	mutex_init(&kvm->lock);
395
	mutex_init(&kvm->irq_lock);
396
	kvm_io_bus_init(&kvm->mmio_bus);
397
	init_rwsem(&kvm->slots_lock);
I
Izik Eidus 已提交
398
	atomic_set(&kvm->users_count, 1);
399 400 401
	spin_lock(&kvm_lock);
	list_add(&kvm->vm_list, &vm_list);
	spin_unlock(&kvm_lock);
402 403 404
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	kvm_coalesced_mmio_init(kvm);
#endif
405
out:
406
	return kvm;
407 408 409 410 411 412

out_err:
	hardware_disable_all();
out_err_nodisable:
	kfree(kvm);
	return ERR_PTR(r);
413 414
}

A
Avi Kivity 已提交
415 416 417 418 419 420
/*
 * Free any memory in @free but not in @dont.
 */
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
				  struct kvm_memory_slot *dont)
{
421 422
	int i;

423 424
	if (!dont || free->rmap != dont->rmap)
		vfree(free->rmap);
A
Avi Kivity 已提交
425 426 427 428

	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
		vfree(free->dirty_bitmap);

429 430 431 432 433 434 435

	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
		if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
			vfree(free->lpage_info[i]);
			free->lpage_info[i] = NULL;
		}
	}
M
Marcelo Tosatti 已提交
436

A
Avi Kivity 已提交
437
	free->npages = 0;
A
Al Viro 已提交
438
	free->dirty_bitmap = NULL;
439
	free->rmap = NULL;
A
Avi Kivity 已提交
440 441
}

442
void kvm_free_physmem(struct kvm *kvm)
A
Avi Kivity 已提交
443 444 445 446
{
	int i;

	for (i = 0; i < kvm->nmemslots; ++i)
A
Al Viro 已提交
447
		kvm_free_physmem_slot(&kvm->memslots[i], NULL);
A
Avi Kivity 已提交
448 449
}

450 451
static void kvm_destroy_vm(struct kvm *kvm)
{
452 453
	struct mm_struct *mm = kvm->mm;

454
	kvm_arch_sync_events(kvm);
455 456 457
	spin_lock(&kvm_lock);
	list_del(&kvm->vm_list);
	spin_unlock(&kvm_lock);
458
	kvm_free_irq_routing(kvm);
459
	kvm_io_bus_destroy(&kvm->pio_bus);
460
	kvm_io_bus_destroy(&kvm->mmio_bus);
461 462 463
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	if (kvm->coalesced_mmio_ring != NULL)
		free_page((unsigned long)kvm->coalesced_mmio_ring);
464 465 466
#endif
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
467 468
#else
	kvm_arch_flush_shadow(kvm);
469
#endif
470
	kvm_arch_destroy_vm(kvm);
471
	hardware_disable_all();
472
	mmdrop(mm);
473 474
}

I
Izik Eidus 已提交
475 476 477 478 479 480 481 482 483 484 485 486 487 488
void kvm_get_kvm(struct kvm *kvm)
{
	atomic_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);

void kvm_put_kvm(struct kvm *kvm)
{
	if (atomic_dec_and_test(&kvm->users_count))
		kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);


489 490 491 492
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
	struct kvm *kvm = filp->private_data;

G
Gregory Haskins 已提交
493 494
	kvm_irqfd_release(kvm);

I
Izik Eidus 已提交
495
	kvm_put_kvm(kvm);
A
Avi Kivity 已提交
496 497 498 499 500 501 502 503
	return 0;
}

/*
 * Allocate some memory and give it an address in the guest physical address
 * space.
 *
 * Discontiguous memory is allowed, mostly for framebuffers.
504
 *
505
 * Must be called holding mmap_sem for write.
A
Avi Kivity 已提交
506
 */
507 508 509
int __kvm_set_memory_region(struct kvm *kvm,
			    struct kvm_userspace_memory_region *mem,
			    int user_alloc)
A
Avi Kivity 已提交
510 511 512
{
	int r;
	gfn_t base_gfn;
513 514
	unsigned long npages;
	unsigned long i;
A
Avi Kivity 已提交
515 516 517 518 519 520 521 522 523
	struct kvm_memory_slot *memslot;
	struct kvm_memory_slot old, new;

	r = -EINVAL;
	/* General sanity checks */
	if (mem->memory_size & (PAGE_SIZE - 1))
		goto out;
	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
		goto out;
S
Sheng Yang 已提交
524
	if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
525
		goto out;
526
	if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
A
Avi Kivity 已提交
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
		goto out;
	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
		goto out;

	memslot = &kvm->memslots[mem->slot];
	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
	npages = mem->memory_size >> PAGE_SHIFT;

	if (!npages)
		mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;

	new = old = *memslot;

	new.base_gfn = base_gfn;
	new.npages = npages;
	new.flags = mem->flags;

	/* Disallow changing a memory slot's size. */
	r = -EINVAL;
	if (npages && old.npages && npages != old.npages)
547
		goto out_free;
A
Avi Kivity 已提交
548 549 550 551 552 553

	/* Check for overlaps */
	r = -EEXIST;
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *s = &kvm->memslots[i];

554
		if (s == memslot || !s->npages)
A
Avi Kivity 已提交
555 556 557
			continue;
		if (!((base_gfn + npages <= s->base_gfn) ||
		      (base_gfn >= s->base_gfn + s->npages)))
558
			goto out_free;
A
Avi Kivity 已提交
559 560 561 562
	}

	/* Free page dirty bitmap if unneeded */
	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
A
Al Viro 已提交
563
		new.dirty_bitmap = NULL;
A
Avi Kivity 已提交
564 565 566 567

	r = -ENOMEM;

	/* Allocate if a slot is being created */
568
#ifndef CONFIG_S390
569
	if (npages && !new.rmap) {
M
Mike Day 已提交
570
		new.rmap = vmalloc(npages * sizeof(struct page *));
571 572

		if (!new.rmap)
573
			goto out_free;
574 575

		memset(new.rmap, 0, npages * sizeof(*new.rmap));
576

577
		new.user_alloc = user_alloc;
578 579 580 581 582 583 584 585 586
		/*
		 * hva_to_rmmap() serialzies with the mmu_lock and to be
		 * safe it has to ignore memslots with !user_alloc &&
		 * !userspace_addr.
		 */
		if (user_alloc)
			new.userspace_addr = mem->userspace_addr;
		else
			new.userspace_addr = 0;
A
Avi Kivity 已提交
587
	}
588 589
	if (!npages)
		goto skip_lpage;
M
Marcelo Tosatti 已提交
590

591
	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
592 593 594
		unsigned long ugfn;
		unsigned long j;
		int lpages;
595
		int level = i + 2;
M
Marcelo Tosatti 已提交
596

597 598 599 600 601 602 603 604 605 606 607 608 609
		/* Avoid unused variable warning if no large pages */
		(void)level;

		if (new.lpage_info[i])
			continue;

		lpages = 1 + (base_gfn + npages - 1) /
			     KVM_PAGES_PER_HPAGE(level);
		lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);

		new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));

		if (!new.lpage_info[i])
M
Marcelo Tosatti 已提交
610 611
			goto out_free;

612 613
		memset(new.lpage_info[i], 0,
		       lpages * sizeof(*new.lpage_info[i]));
M
Marcelo Tosatti 已提交
614

615 616 617 618
		if (base_gfn % KVM_PAGES_PER_HPAGE(level))
			new.lpage_info[i][0].write_count = 1;
		if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
			new.lpage_info[i][lpages - 1].write_count = 1;
619 620 621
		ugfn = new.userspace_addr >> PAGE_SHIFT;
		/*
		 * If the gfn and userspace address are not aligned wrt each
622 623
		 * other, or if explicitly asked to, disable large page
		 * support for this slot
624
		 */
625
		if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
626
		    !largepages_enabled)
627 628
			for (j = 0; j < lpages; ++j)
				new.lpage_info[i][j].write_count = 1;
M
Marcelo Tosatti 已提交
629
	}
A
Avi Kivity 已提交
630

631 632
skip_lpage:

A
Avi Kivity 已提交
633 634 635 636 637 638
	/* Allocate page dirty bitmap if needed */
	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
		unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;

		new.dirty_bitmap = vmalloc(dirty_bytes);
		if (!new.dirty_bitmap)
639
			goto out_free;
A
Avi Kivity 已提交
640
		memset(new.dirty_bitmap, 0, dirty_bytes);
641 642
		if (old.npages)
			kvm_arch_flush_shadow(kvm);
A
Avi Kivity 已提交
643
	}
644 645 646 647
#else  /* not defined CONFIG_S390 */
	new.user_alloc = user_alloc;
	if (user_alloc)
		new.userspace_addr = mem->userspace_addr;
648
#endif /* not defined CONFIG_S390 */
A
Avi Kivity 已提交
649

650 651 652
	if (!npages)
		kvm_arch_flush_shadow(kvm);

653 654 655 656
	spin_lock(&kvm->mmu_lock);
	if (mem->slot >= kvm->nmemslots)
		kvm->nmemslots = mem->slot + 1;

657
	*memslot = new;
658
	spin_unlock(&kvm->mmu_lock);
659

660 661
	r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
	if (r) {
662
		spin_lock(&kvm->mmu_lock);
663
		*memslot = old;
664
		spin_unlock(&kvm->mmu_lock);
665
		goto out_free;
666 667
	}

668 669
	kvm_free_physmem_slot(&old, npages ? &new : NULL);
	/* Slot deletion case: we have to update the current slot */
670
	spin_lock(&kvm->mmu_lock);
671 672
	if (!npages)
		*memslot = old;
673
	spin_unlock(&kvm->mmu_lock);
674
#ifdef CONFIG_DMAR
B
Ben-Ami Yassour 已提交
675 676 677 678
	/* map the pages in iommu page table */
	r = kvm_iommu_map_pages(kvm, base_gfn, npages);
	if (r)
		goto out;
679
#endif
A
Avi Kivity 已提交
680 681
	return 0;

682
out_free:
A
Avi Kivity 已提交
683 684 685
	kvm_free_physmem_slot(&new, &old);
out:
	return r;
686 687

}
688 689 690 691 692 693 694 695
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);

int kvm_set_memory_region(struct kvm *kvm,
			  struct kvm_userspace_memory_region *mem,
			  int user_alloc)
{
	int r;

696
	down_write(&kvm->slots_lock);
697
	r = __kvm_set_memory_region(kvm, mem, user_alloc);
698
	up_write(&kvm->slots_lock);
699 700
	return r;
}
701 702
EXPORT_SYMBOL_GPL(kvm_set_memory_region);

703 704 705 706
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
				   struct
				   kvm_userspace_memory_region *mem,
				   int user_alloc)
707
{
708 709
	if (mem->slot >= KVM_MEMORY_SLOTS)
		return -EINVAL;
710
	return kvm_set_memory_region(kvm, mem, user_alloc);
A
Avi Kivity 已提交
711 712
}

713 714
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty)
A
Avi Kivity 已提交
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
{
	struct kvm_memory_slot *memslot;
	int r, i;
	int n;
	unsigned long any = 0;

	r = -EINVAL;
	if (log->slot >= KVM_MEMORY_SLOTS)
		goto out;

	memslot = &kvm->memslots[log->slot];
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

730
	n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
A
Avi Kivity 已提交
731

732
	for (i = 0; !any && i < n/sizeof(long); ++i)
A
Avi Kivity 已提交
733 734 735 736 737 738
		any = memslot->dirty_bitmap[i];

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
		goto out;

739 740
	if (any)
		*is_dirty = 1;
A
Avi Kivity 已提交
741 742 743 744 745 746

	r = 0;
out:
	return r;
}

747 748 749 750 751 752
void kvm_disable_largepages(void)
{
	largepages_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_largepages);

753 754 755 756 757 758
int is_error_page(struct page *page)
{
	return page == bad_page;
}
EXPORT_SYMBOL_GPL(is_error_page);

759 760 761 762 763 764
int is_error_pfn(pfn_t pfn)
{
	return pfn == bad_pfn;
}
EXPORT_SYMBOL_GPL(is_error_pfn);

I
Izik Eidus 已提交
765 766 767 768 769 770 771 772 773 774 775
static inline unsigned long bad_hva(void)
{
	return PAGE_OFFSET;
}

int kvm_is_error_hva(unsigned long addr)
{
	return addr == bad_hva();
}
EXPORT_SYMBOL_GPL(kvm_is_error_hva);

776
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
A
Avi Kivity 已提交
777 778 779 780 781 782 783 784 785 786
{
	int i;

	for (i = 0; i < kvm->nmemslots; ++i) {
		struct kvm_memory_slot *memslot = &kvm->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return memslot;
	}
A
Al Viro 已提交
787
	return NULL;
A
Avi Kivity 已提交
788
}
789
EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
790 791 792 793

struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
	gfn = unalias_gfn(kvm, gfn);
794
	return gfn_to_memslot_unaliased(kvm, gfn);
795
}
A
Avi Kivity 已提交
796

797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
	int i;

	gfn = unalias_gfn(kvm, gfn);
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *memslot = &kvm->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

M
Marcelo Tosatti 已提交
813
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
I
Izik Eidus 已提交
814 815 816 817
{
	struct kvm_memory_slot *slot;

	gfn = unalias_gfn(kvm, gfn);
818
	slot = gfn_to_memslot_unaliased(kvm, gfn);
I
Izik Eidus 已提交
819 820 821 822
	if (!slot)
		return bad_hva();
	return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
}
823
EXPORT_SYMBOL_GPL(gfn_to_hva);
I
Izik Eidus 已提交
824

825
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
A
Avi Kivity 已提交
826
{
827
	struct page *page[1];
I
Izik Eidus 已提交
828
	unsigned long addr;
829
	int npages;
830
	pfn_t pfn;
A
Avi Kivity 已提交
831

832 833
	might_sleep();

I
Izik Eidus 已提交
834 835
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr)) {
836
		get_page(bad_page);
837
		return page_to_pfn(bad_page);
838
	}
839

840
	npages = get_user_pages_fast(addr, 1, 1, page);
I
Izik Eidus 已提交
841

842 843 844
	if (unlikely(npages != 1)) {
		struct vm_area_struct *vma;

845
		down_read(&current->mm->mmap_sem);
846
		vma = find_vma(current->mm, addr);
847

848 849
		if (vma == NULL || addr < vma->vm_start ||
		    !(vma->vm_flags & VM_PFNMAP)) {
850
			up_read(&current->mm->mmap_sem);
851 852 853 854 855
			get_page(bad_page);
			return page_to_pfn(bad_page);
		}

		pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
856
		up_read(&current->mm->mmap_sem);
857
		BUG_ON(!kvm_is_mmio_pfn(pfn));
858 859
	} else
		pfn = page_to_pfn(page[0]);
860

861
	return pfn;
862 863 864 865 866 867
}

EXPORT_SYMBOL_GPL(gfn_to_pfn);

struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
868 869 870
	pfn_t pfn;

	pfn = gfn_to_pfn(kvm, gfn);
871
	if (!kvm_is_mmio_pfn(pfn))
872 873
		return pfn_to_page(pfn);

874
	WARN_ON(kvm_is_mmio_pfn(pfn));
875 876 877

	get_page(bad_page);
	return bad_page;
A
Avi Kivity 已提交
878
}
879

A
Avi Kivity 已提交
880 881
EXPORT_SYMBOL_GPL(gfn_to_page);

882 883
void kvm_release_page_clean(struct page *page)
{
884
	kvm_release_pfn_clean(page_to_pfn(page));
885 886 887
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);

888 889
void kvm_release_pfn_clean(pfn_t pfn)
{
890
	if (!kvm_is_mmio_pfn(pfn))
891
		put_page(pfn_to_page(pfn));
892 893 894
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);

895
void kvm_release_page_dirty(struct page *page)
896
{
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
	kvm_release_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);

void kvm_release_pfn_dirty(pfn_t pfn)
{
	kvm_set_pfn_dirty(pfn);
	kvm_release_pfn_clean(pfn);
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);

void kvm_set_page_dirty(struct page *page)
{
	kvm_set_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_set_page_dirty);

void kvm_set_pfn_dirty(pfn_t pfn)
{
916
	if (!kvm_is_mmio_pfn(pfn)) {
917 918 919 920
		struct page *page = pfn_to_page(pfn);
		if (!PageReserved(page))
			SetPageDirty(page);
	}
921
}
922 923 924 925
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);

void kvm_set_pfn_accessed(pfn_t pfn)
{
926
	if (!kvm_is_mmio_pfn(pfn))
927
		mark_page_accessed(pfn_to_page(pfn));
928 929 930 931 932
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);

void kvm_get_pfn(pfn_t pfn)
{
933
	if (!kvm_is_mmio_pfn(pfn))
934
		get_page(pfn_to_page(pfn));
935 936
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
937

938 939 940 941 942 943 944 945 946 947 948
static int next_segment(unsigned long len, int offset)
{
	if (len > PAGE_SIZE - offset)
		return PAGE_SIZE - offset;
	else
		return len;
}

int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len)
{
949 950
	int r;
	unsigned long addr;
951

952 953 954 955 956
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_from_user(data, (void __user *)addr + offset, len);
	if (r)
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);

int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);

982 983 984 985 986 987 988 989 990 991 992
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len)
{
	int r;
	unsigned long addr;
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int offset = offset_in_page(gpa);

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
993
	pagefault_disable();
994
	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
995
	pagefault_enable();
996 997 998 999 1000 1001
	if (r)
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL(kvm_read_guest_atomic);

1002 1003 1004
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len)
{
1005 1006
	int r;
	unsigned long addr;
1007

1008 1009 1010 1011 1012
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_to_user((void __user *)addr + offset, data, len);
	if (r)
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
		return -EFAULT;
	mark_page_dirty(kvm, gfn);
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);

int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}

int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
1041
	return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

        while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);

A
Avi Kivity 已提交
1064 1065
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
1066
	struct kvm_memory_slot *memslot;
A
Avi Kivity 已提交
1067

1068
	gfn = unalias_gfn(kvm, gfn);
1069
	memslot = gfn_to_memslot_unaliased(kvm, gfn);
R
Rusty Russell 已提交
1070 1071
	if (memslot && memslot->dirty_bitmap) {
		unsigned long rel_gfn = gfn - memslot->base_gfn;
A
Avi Kivity 已提交
1072

R
Rusty Russell 已提交
1073 1074 1075
		/* avoid RMW */
		if (!test_bit(rel_gfn, memslot->dirty_bitmap))
			set_bit(rel_gfn, memslot->dirty_bitmap);
A
Avi Kivity 已提交
1076 1077 1078
	}
}

E
Eddie Dong 已提交
1079 1080 1081
/*
 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
 */
1082
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1083
{
1084 1085 1086 1087 1088
	DEFINE_WAIT(wait);

	for (;;) {
		prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);

1089
		if (kvm_arch_vcpu_runnable(vcpu)) {
1090
			set_bit(KVM_REQ_UNHALT, &vcpu->requests);
1091
			break;
1092
		}
1093 1094
		if (kvm_cpu_has_pending_timer(vcpu))
			break;
1095 1096 1097
		if (signal_pending(current))
			break;

E
Eddie Dong 已提交
1098 1099
		schedule();
	}
1100

1101
	finish_wait(&vcpu->wq, &wait);
E
Eddie Dong 已提交
1102 1103
}

A
Avi Kivity 已提交
1104 1105
void kvm_resched(struct kvm_vcpu *vcpu)
{
1106 1107
	if (!need_resched())
		return;
A
Avi Kivity 已提交
1108 1109 1110 1111
	cond_resched();
}
EXPORT_SYMBOL_GPL(kvm_resched);

Z
Zhai, Edwin 已提交
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
{
	ktime_t expires;
	DEFINE_WAIT(wait);

	prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);

	/* Sleep for 100 us, and hope lock-holder got scheduled */
	expires = ktime_add_ns(ktime_get(), 100000UL);
	schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);

	finish_wait(&vcpu->wq, &wait);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);

1127
static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1128 1129 1130 1131
{
	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
	struct page *page;

1132
	if (vmf->pgoff == 0)
1133
		page = virt_to_page(vcpu->run);
A
Avi Kivity 已提交
1134
#ifdef CONFIG_X86
1135
	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1136
		page = virt_to_page(vcpu->arch.pio_data);
1137 1138 1139 1140
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
A
Avi Kivity 已提交
1141
#endif
1142
	else
1143
		return VM_FAULT_SIGBUS;
1144
	get_page(page);
1145 1146
	vmf->page = page;
	return 0;
1147 1148
}

1149
static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1150
	.fault = kvm_vcpu_fault,
1151 1152 1153 1154 1155 1156 1157 1158
};

static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vcpu_vm_ops;
	return 0;
}

A
Avi Kivity 已提交
1159 1160 1161 1162
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
	struct kvm_vcpu *vcpu = filp->private_data;

A
Al Viro 已提交
1163
	kvm_put_kvm(vcpu->kvm);
A
Avi Kivity 已提交
1164 1165 1166
	return 0;
}

1167
static struct file_operations kvm_vcpu_fops = {
A
Avi Kivity 已提交
1168 1169 1170
	.release        = kvm_vcpu_release,
	.unlocked_ioctl = kvm_vcpu_ioctl,
	.compat_ioctl   = kvm_vcpu_ioctl,
1171
	.mmap           = kvm_vcpu_mmap,
A
Avi Kivity 已提交
1172 1173 1174 1175 1176 1177 1178
};

/*
 * Allocates an inode for the vcpu.
 */
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
1179
	return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
A
Avi Kivity 已提交
1180 1181
}

1182 1183 1184
/*
 * Creates some virtual cpus.  Good luck creating more than one.
 */
1185
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1186 1187
{
	int r;
1188
	struct kvm_vcpu *vcpu, *v;
1189

1190
	vcpu = kvm_arch_vcpu_create(kvm, id);
R
Rusty Russell 已提交
1191 1192
	if (IS_ERR(vcpu))
		return PTR_ERR(vcpu);
1193

1194 1195
	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);

1196 1197
	r = kvm_arch_vcpu_setup(vcpu);
	if (r)
1198
		return r;
1199

S
Shaohua Li 已提交
1200
	mutex_lock(&kvm->lock);
1201 1202
	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
		r = -EINVAL;
1203
		goto vcpu_destroy;
R
Rusty Russell 已提交
1204
	}
1205

1206 1207
	kvm_for_each_vcpu(r, v, kvm)
		if (v->vcpu_id == id) {
1208 1209 1210 1211 1212
			r = -EEXIST;
			goto vcpu_destroy;
		}

	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1213

R
Rusty Russell 已提交
1214
	/* Now it's all set up, let userspace reach it */
A
Al Viro 已提交
1215
	kvm_get_kvm(kvm);
A
Avi Kivity 已提交
1216
	r = create_vcpu_fd(vcpu);
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	if (r < 0) {
		kvm_put_kvm(kvm);
		goto vcpu_destroy;
	}

	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
	smp_wmb();
	atomic_inc(&kvm->online_vcpus);

#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	if (kvm->bsp_vcpu_id == id)
		kvm->bsp_vcpu = vcpu;
#endif
	mutex_unlock(&kvm->lock);
R
Rusty Russell 已提交
1231
	return r;
1232

1233
vcpu_destroy:
1234
	mutex_unlock(&kvm->lock);
1235
	kvm_arch_vcpu_destroy(vcpu);
1236 1237 1238
	return r;
}

A
Avi Kivity 已提交
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
	if (sigset) {
		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
		vcpu->sigset_active = 1;
		vcpu->sigset = *sigset;
	} else
		vcpu->sigset_active = 0;
	return 0;
}

A
Avi Kivity 已提交
1250 1251
static long kvm_vcpu_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
A
Avi Kivity 已提交
1252
{
A
Avi Kivity 已提交
1253
	struct kvm_vcpu *vcpu = filp->private_data;
A
Al Viro 已提交
1254
	void __user *argp = (void __user *)arg;
1255
	int r;
1256 1257
	struct kvm_fpu *fpu = NULL;
	struct kvm_sregs *kvm_sregs = NULL;
A
Avi Kivity 已提交
1258

1259 1260
	if (vcpu->kvm->mm != current->mm)
		return -EIO;
A
Avi Kivity 已提交
1261
	switch (ioctl) {
1262
	case KVM_RUN:
1263 1264 1265
		r = -EINVAL;
		if (arg)
			goto out;
1266
		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
A
Avi Kivity 已提交
1267 1268
		break;
	case KVM_GET_REGS: {
1269
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
1270

1271 1272 1273
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
1274
			goto out;
1275 1276 1277
		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
		if (r)
			goto out_free1;
A
Avi Kivity 已提交
1278
		r = -EFAULT;
1279 1280
		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
			goto out_free1;
A
Avi Kivity 已提交
1281
		r = 0;
1282 1283
out_free1:
		kfree(kvm_regs);
A
Avi Kivity 已提交
1284 1285 1286
		break;
	}
	case KVM_SET_REGS: {
1287
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
1288

1289 1290 1291
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
1292
			goto out;
1293 1294 1295 1296
		r = -EFAULT;
		if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
			goto out_free2;
		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
A
Avi Kivity 已提交
1297
		if (r)
1298
			goto out_free2;
A
Avi Kivity 已提交
1299
		r = 0;
1300 1301
out_free2:
		kfree(kvm_regs);
A
Avi Kivity 已提交
1302 1303 1304
		break;
	}
	case KVM_GET_SREGS: {
1305 1306 1307 1308 1309
		kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
1310 1311 1312
		if (r)
			goto out;
		r = -EFAULT;
1313
		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
1314 1315 1316 1317 1318
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_SREGS: {
1319 1320 1321 1322
		kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
A
Avi Kivity 已提交
1323
		r = -EFAULT;
1324
		if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
1325
			goto out;
1326
		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
1327 1328 1329 1330 1331
		if (r)
			goto out;
		r = 0;
		break;
	}
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
	case KVM_GET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &mp_state, sizeof mp_state))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = -EFAULT;
		if (copy_from_user(&mp_state, argp, sizeof mp_state))
			goto out;
		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1356 1357 1358 1359
	case KVM_TRANSLATE: {
		struct kvm_translation tr;

		r = -EFAULT;
A
Al Viro 已提交
1360
		if (copy_from_user(&tr, argp, sizeof tr))
A
Avi Kivity 已提交
1361
			goto out;
1362
		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
A
Avi Kivity 已提交
1363 1364 1365
		if (r)
			goto out;
		r = -EFAULT;
A
Al Viro 已提交
1366
		if (copy_to_user(argp, &tr, sizeof tr))
A
Avi Kivity 已提交
1367 1368 1369 1370
			goto out;
		r = 0;
		break;
	}
J
Jan Kiszka 已提交
1371 1372
	case KVM_SET_GUEST_DEBUG: {
		struct kvm_guest_debug dbg;
A
Avi Kivity 已提交
1373 1374

		r = -EFAULT;
A
Al Viro 已提交
1375
		if (copy_from_user(&dbg, argp, sizeof dbg))
A
Avi Kivity 已提交
1376
			goto out;
J
Jan Kiszka 已提交
1377
		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
A
Avi Kivity 已提交
1378 1379 1380 1381 1382
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		sigset_t sigset, *p;

		p = NULL;
		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
					   sizeof kvm_sigmask))
				goto out;
			r = -EINVAL;
			if (kvm_sigmask.len != sizeof sigset)
				goto out;
			r = -EFAULT;
			if (copy_from_user(&sigset, sigmask_arg->sigset,
					   sizeof sigset))
				goto out;
			p = &sigset;
		}
		r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
		break;
	}
A
Avi Kivity 已提交
1406
	case KVM_GET_FPU: {
1407 1408 1409 1410 1411
		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
A
Avi Kivity 已提交
1412 1413 1414
		if (r)
			goto out;
		r = -EFAULT;
1415
		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
1416 1417 1418 1419 1420
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_FPU: {
1421 1422 1423 1424
		fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
A
Avi Kivity 已提交
1425
		r = -EFAULT;
1426
		if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
1427
			goto out;
1428
		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
A
Avi Kivity 已提交
1429 1430 1431 1432 1433
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1434
	default:
1435
		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
1436 1437
	}
out:
1438 1439
	kfree(fpu);
	kfree(kvm_sregs);
A
Avi Kivity 已提交
1440 1441 1442 1443 1444 1445 1446 1447
	return r;
}

static long kvm_vm_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
1448
	int r;
A
Avi Kivity 已提交
1449

1450 1451
	if (kvm->mm != current->mm)
		return -EIO;
A
Avi Kivity 已提交
1452 1453 1454 1455 1456 1457
	switch (ioctl) {
	case KVM_CREATE_VCPU:
		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
		if (r < 0)
			goto out;
		break;
1458 1459 1460 1461 1462 1463 1464 1465 1466
	case KVM_SET_USER_MEMORY_REGION: {
		struct kvm_userspace_memory_region kvm_userspace_mem;

		r = -EFAULT;
		if (copy_from_user(&kvm_userspace_mem, argp,
						sizeof kvm_userspace_mem))
			goto out;

		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
A
Avi Kivity 已提交
1467 1468 1469 1470 1471 1472 1473 1474
		if (r)
			goto out;
		break;
	}
	case KVM_GET_DIRTY_LOG: {
		struct kvm_dirty_log log;

		r = -EFAULT;
A
Al Viro 已提交
1475
		if (copy_from_user(&log, argp, sizeof log))
A
Avi Kivity 已提交
1476
			goto out;
1477
		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
A
Avi Kivity 已提交
1478 1479 1480 1481
		if (r)
			goto out;
		break;
	}
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	case KVM_REGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
		r = -EFAULT;
		if (copy_from_user(&zone, argp, sizeof zone))
			goto out;
		r = -ENXIO;
		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
		if (r)
			goto out;
		r = 0;
		break;
	}
	case KVM_UNREGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
		r = -EFAULT;
		if (copy_from_user(&zone, argp, sizeof zone))
			goto out;
		r = -ENXIO;
		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
		if (r)
			goto out;
		r = 0;
		break;
	}
#endif
G
Gregory Haskins 已提交
1508 1509 1510 1511 1512 1513 1514 1515 1516
	case KVM_IRQFD: {
		struct kvm_irqfd data;

		r = -EFAULT;
		if (copy_from_user(&data, argp, sizeof data))
			goto out;
		r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
		break;
	}
G
Gregory Haskins 已提交
1517 1518 1519 1520 1521 1522 1523 1524 1525
	case KVM_IOEVENTFD: {
		struct kvm_ioeventfd data;

		r = -EFAULT;
		if (copy_from_user(&data, argp, sizeof data))
			goto out;
		r = kvm_ioeventfd(kvm, &data);
		break;
	}
1526 1527 1528
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	case KVM_SET_BOOT_CPU_ID:
		r = 0;
1529
		mutex_lock(&kvm->lock);
1530 1531 1532 1533
		if (atomic_read(&kvm->online_vcpus) != 0)
			r = -EBUSY;
		else
			kvm->bsp_vcpu_id = arg;
1534
		mutex_unlock(&kvm->lock);
1535 1536
		break;
#endif
1537
	default:
1538
		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1539 1540
		if (r == -ENOTTY)
			r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
1541 1542 1543 1544 1545
	}
out:
	return r;
}

1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
#ifdef CONFIG_COMPAT
struct compat_kvm_dirty_log {
	__u32 slot;
	__u32 padding1;
	union {
		compat_uptr_t dirty_bitmap; /* one bit per page */
		__u64 padding2;
	};
};

static long kvm_vm_compat_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	int r;

	if (kvm->mm != current->mm)
		return -EIO;
	switch (ioctl) {
	case KVM_GET_DIRTY_LOG: {
		struct compat_kvm_dirty_log compat_log;
		struct kvm_dirty_log log;

		r = -EFAULT;
		if (copy_from_user(&compat_log, (void __user *)arg,
				   sizeof(compat_log)))
			goto out;
		log.slot	 = compat_log.slot;
		log.padding1	 = compat_log.padding1;
		log.padding2	 = compat_log.padding2;
		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);

		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
		if (r)
			goto out;
		break;
	}
	default:
		r = kvm_vm_ioctl(filp, ioctl, arg);
	}

out:
	return r;
}
#endif

1592
static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1593
{
1594 1595 1596 1597
	struct page *page[1];
	unsigned long addr;
	int npages;
	gfn_t gfn = vmf->pgoff;
1598 1599
	struct kvm *kvm = vma->vm_file->private_data;

1600 1601
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
1602
		return VM_FAULT_SIGBUS;
1603 1604 1605 1606

	npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
				NULL);
	if (unlikely(npages != 1))
1607
		return VM_FAULT_SIGBUS;
1608 1609

	vmf->page = page[0];
1610
	return 0;
1611 1612
}

1613
static const struct vm_operations_struct kvm_vm_vm_ops = {
1614
	.fault = kvm_vm_fault,
1615 1616 1617 1618 1619 1620 1621 1622
};

static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vm_vm_ops;
	return 0;
}

1623
static struct file_operations kvm_vm_fops = {
1624 1625
	.release        = kvm_vm_release,
	.unlocked_ioctl = kvm_vm_ioctl,
1626 1627 1628
#ifdef CONFIG_COMPAT
	.compat_ioctl   = kvm_vm_compat_ioctl,
#endif
1629 1630 1631 1632 1633
	.mmap           = kvm_vm_mmap,
};

static int kvm_dev_ioctl_create_vm(void)
{
A
Al Viro 已提交
1634
	int fd;
1635 1636 1637
	struct kvm *kvm;

	kvm = kvm_create_vm();
1638 1639
	if (IS_ERR(kvm))
		return PTR_ERR(kvm);
1640
	fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
A
Al Viro 已提交
1641
	if (fd < 0)
A
Al Viro 已提交
1642
		kvm_put_kvm(kvm);
1643 1644 1645 1646

	return fd;
}

1647 1648 1649
static long kvm_dev_ioctl_check_extension_generic(long arg)
{
	switch (arg) {
1650
	case KVM_CAP_USER_MEMORY:
1651
	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
1652
	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
1653 1654 1655
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	case KVM_CAP_SET_BOOT_CPU_ID:
#endif
1656
		return 1;
1657 1658
#ifdef CONFIG_HAVE_KVM_IRQCHIP
	case KVM_CAP_IRQ_ROUTING:
1659
		return KVM_MAX_IRQ_ROUTES;
1660
#endif
1661 1662 1663 1664 1665 1666
	default:
		break;
	}
	return kvm_dev_ioctl_check_extension(arg);
}

1667 1668 1669
static long kvm_dev_ioctl(struct file *filp,
			  unsigned int ioctl, unsigned long arg)
{
1670
	long r = -EINVAL;
1671 1672 1673

	switch (ioctl) {
	case KVM_GET_API_VERSION:
1674 1675 1676
		r = -EINVAL;
		if (arg)
			goto out;
1677 1678 1679
		r = KVM_API_VERSION;
		break;
	case KVM_CREATE_VM:
1680 1681 1682
		r = -EINVAL;
		if (arg)
			goto out;
1683 1684
		r = kvm_dev_ioctl_create_vm();
		break;
1685
	case KVM_CHECK_EXTENSION:
1686
		r = kvm_dev_ioctl_check_extension_generic(arg);
1687
		break;
1688 1689 1690 1691
	case KVM_GET_VCPU_MMAP_SIZE:
		r = -EINVAL;
		if (arg)
			goto out;
1692 1693 1694
		r = PAGE_SIZE;     /* struct kvm_run */
#ifdef CONFIG_X86
		r += PAGE_SIZE;    /* pio data page */
1695 1696 1697
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
		r += PAGE_SIZE;    /* coalesced mmio ring page */
1698
#endif
1699
		break;
1700 1701 1702
	case KVM_TRACE_ENABLE:
	case KVM_TRACE_PAUSE:
	case KVM_TRACE_DISABLE:
1703
		r = -EOPNOTSUPP;
1704
		break;
A
Avi Kivity 已提交
1705
	default:
1706
		return kvm_arch_dev_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
	}
out:
	return r;
}

static struct file_operations kvm_chardev_ops = {
	.unlocked_ioctl = kvm_dev_ioctl,
	.compat_ioctl   = kvm_dev_ioctl,
};

static struct miscdevice kvm_dev = {
A
Avi Kivity 已提交
1718
	KVM_MINOR,
A
Avi Kivity 已提交
1719 1720 1721 1722
	"kvm",
	&kvm_chardev_ops,
};

1723 1724 1725
static void hardware_enable(void *junk)
{
	int cpu = raw_smp_processor_id();
1726
	int r;
1727

1728
	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1729
		return;
1730

1731
	cpumask_set_cpu(cpu, cpus_hardware_enabled);
1732 1733 1734 1735 1736 1737 1738 1739 1740

	r = kvm_arch_hardware_enable(NULL);

	if (r) {
		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
		atomic_inc(&hardware_enable_failed);
		printk(KERN_INFO "kvm: enabling virtualization on "
				 "CPU%d failed\n", cpu);
	}
1741 1742 1743 1744 1745 1746
}

static void hardware_disable(void *junk)
{
	int cpu = raw_smp_processor_id();

1747
	if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1748
		return;
1749
	cpumask_clear_cpu(cpu, cpus_hardware_enabled);
1750
	kvm_arch_hardware_disable(NULL);
1751 1752
}

1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
static void hardware_disable_all_nolock(void)
{
	BUG_ON(!kvm_usage_count);

	kvm_usage_count--;
	if (!kvm_usage_count)
		on_each_cpu(hardware_disable, NULL, 1);
}

static void hardware_disable_all(void)
{
	spin_lock(&kvm_lock);
	hardware_disable_all_nolock();
	spin_unlock(&kvm_lock);
}

static int hardware_enable_all(void)
{
	int r = 0;

	spin_lock(&kvm_lock);

	kvm_usage_count++;
	if (kvm_usage_count == 1) {
		atomic_set(&hardware_enable_failed, 0);
		on_each_cpu(hardware_enable, NULL, 1);

		if (atomic_read(&hardware_enable_failed)) {
			hardware_disable_all_nolock();
			r = -EBUSY;
		}
	}

	spin_unlock(&kvm_lock);

	return r;
}

A
Avi Kivity 已提交
1791 1792 1793 1794 1795
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
			   void *v)
{
	int cpu = (long)v;

1796 1797 1798
	if (!kvm_usage_count)
		return NOTIFY_OK;

1799
	val &= ~CPU_TASKS_FROZEN;
A
Avi Kivity 已提交
1800
	switch (val) {
1801
	case CPU_DYING:
1802 1803 1804 1805
		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
		       cpu);
		hardware_disable(NULL);
		break;
A
Avi Kivity 已提交
1806
	case CPU_UP_CANCELED:
1807 1808
		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
		       cpu);
1809
		smp_call_function_single(cpu, hardware_disable, NULL, 1);
A
Avi Kivity 已提交
1810
		break;
1811 1812 1813
	case CPU_ONLINE:
		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
		       cpu);
1814
		smp_call_function_single(cpu, hardware_enable, NULL, 1);
A
Avi Kivity 已提交
1815 1816 1817 1818 1819
		break;
	}
	return NOTIFY_OK;
}

1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831

asmlinkage void kvm_handle_fault_on_reboot(void)
{
	if (kvm_rebooting)
		/* spin while reset goes on */
		while (true)
			;
	/* Fault while not rebooting.  We want the trace. */
	BUG();
}
EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);

1832
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
M
Mike Day 已提交
1833
		      void *v)
1834
{
1835 1836 1837 1838 1839 1840 1841 1842 1843
	/*
	 * Some (well, at least mine) BIOSes hang on reboot if
	 * in vmx root mode.
	 *
	 * And Intel TXT required VMX off for all cpu when system shutdown.
	 */
	printk(KERN_INFO "kvm: exiting hardware virtualization\n");
	kvm_rebooting = true;
	on_each_cpu(hardware_disable, NULL, 1);
1844 1845 1846 1847 1848 1849 1850 1851
	return NOTIFY_OK;
}

static struct notifier_block kvm_reboot_notifier = {
	.notifier_call = kvm_reboot,
	.priority = 0,
};

1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
void kvm_io_bus_init(struct kvm_io_bus *bus)
{
	memset(bus, 0, sizeof(*bus));
}

void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
		struct kvm_io_device *pos = bus->devs[i];

		kvm_iodevice_destructor(pos);
	}
}

1868 1869 1870
/* kvm_io_bus_write - called under kvm->slots_lock */
int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
		     int len, const void *val)
1871 1872
{
	int i;
1873 1874 1875 1876 1877
	for (i = 0; i < bus->dev_count; i++)
		if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
			return 0;
	return -EOPNOTSUPP;
}
1878

1879 1880 1881 1882 1883 1884 1885 1886
/* kvm_io_bus_read - called under kvm->slots_lock */
int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
{
	int i;
	for (i = 0; i < bus->dev_count; i++)
		if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
			return 0;
	return -EOPNOTSUPP;
1887 1888
}

1889
int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
1890 1891
			     struct kvm_io_device *dev)
{
1892 1893
	int ret;

1894
	down_write(&kvm->slots_lock);
1895
	ret = __kvm_io_bus_register_dev(bus, dev);
1896
	up_write(&kvm->slots_lock);
1897 1898

	return ret;
1899 1900 1901
}

/* An unlocked version. Caller must have write lock on slots_lock. */
1902 1903
int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
			      struct kvm_io_device *dev)
1904
{
1905 1906
	if (bus->dev_count > NR_IOBUS_DEVS-1)
		return -ENOSPC;
1907 1908

	bus->devs[bus->dev_count++] = dev;
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932

	return 0;
}

void kvm_io_bus_unregister_dev(struct kvm *kvm,
			       struct kvm_io_bus *bus,
			       struct kvm_io_device *dev)
{
	down_write(&kvm->slots_lock);
	__kvm_io_bus_unregister_dev(bus, dev);
	up_write(&kvm->slots_lock);
}

/* An unlocked version. Caller must have write lock on slots_lock. */
void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
				 struct kvm_io_device *dev)
{
	int i;

	for (i = 0; i < bus->dev_count; i++)
		if (bus->devs[i] == dev) {
			bus->devs[i] = bus->devs[--bus->dev_count];
			break;
		}
1933 1934
}

A
Avi Kivity 已提交
1935 1936 1937 1938 1939
static struct notifier_block kvm_cpu_notifier = {
	.notifier_call = kvm_cpu_hotplug,
	.priority = 20, /* must be > scheduler priority */
};

1940
static int vm_stat_get(void *_offset, u64 *val)
1941 1942 1943 1944
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;

1945
	*val = 0;
1946 1947
	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
1948
		*val += *(u32 *)((void *)kvm + offset);
1949
	spin_unlock(&kvm_lock);
1950
	return 0;
1951 1952 1953 1954
}

DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");

1955
static int vcpu_stat_get(void *_offset, u64 *val)
A
Avi Kivity 已提交
1956 1957 1958 1959 1960 1961
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;

1962
	*val = 0;
A
Avi Kivity 已提交
1963 1964
	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
1965 1966 1967
		kvm_for_each_vcpu(i, vcpu, kvm)
			*val += *(u32 *)((void *)vcpu + offset);

A
Avi Kivity 已提交
1968
	spin_unlock(&kvm_lock);
1969
	return 0;
A
Avi Kivity 已提交
1970 1971
}

1972 1973
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");

1974
static const struct file_operations *stat_fops[] = {
1975 1976 1977
	[KVM_STAT_VCPU] = &vcpu_stat_fops,
	[KVM_STAT_VM]   = &vm_stat_fops,
};
A
Avi Kivity 已提交
1978

1979
static void kvm_init_debug(void)
A
Avi Kivity 已提交
1980 1981 1982
{
	struct kvm_stats_debugfs_item *p;

1983
	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
A
Avi Kivity 已提交
1984
	for (p = debugfs_entries; p->name; ++p)
1985
		p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
A
Avi Kivity 已提交
1986
						(void *)(long)p->offset,
1987
						stat_fops[p->kind]);
A
Avi Kivity 已提交
1988 1989 1990 1991 1992 1993 1994 1995
}

static void kvm_exit_debug(void)
{
	struct kvm_stats_debugfs_item *p;

	for (p = debugfs_entries; p->name; ++p)
		debugfs_remove(p->dentry);
1996
	debugfs_remove(kvm_debugfs_dir);
A
Avi Kivity 已提交
1997 1998
}

1999 2000
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
2001 2002
	if (kvm_usage_count)
		hardware_disable(NULL);
2003 2004 2005 2006 2007
	return 0;
}

static int kvm_resume(struct sys_device *dev)
{
2008 2009
	if (kvm_usage_count)
		hardware_enable(NULL);
2010 2011 2012 2013
	return 0;
}

static struct sysdev_class kvm_sysdev_class = {
2014
	.name = "kvm",
2015 2016 2017 2018 2019 2020 2021 2022 2023
	.suspend = kvm_suspend,
	.resume = kvm_resume,
};

static struct sys_device kvm_sysdev = {
	.id = 0,
	.cls = &kvm_sysdev_class,
};

2024
struct page *bad_page;
2025
pfn_t bad_pfn;
A
Avi Kivity 已提交
2026

2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
	return container_of(pn, struct kvm_vcpu, preempt_notifier);
}

static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

2037
	kvm_arch_vcpu_load(vcpu, cpu);
2038 2039 2040 2041 2042 2043 2044
}

static void kvm_sched_out(struct preempt_notifier *pn,
			  struct task_struct *next)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

2045
	kvm_arch_vcpu_put(vcpu);
2046 2047
}

2048
int kvm_init(void *opaque, unsigned int vcpu_size,
2049
		  struct module *module)
A
Avi Kivity 已提交
2050 2051
{
	int r;
Y
Yang, Sheng 已提交
2052
	int cpu;
A
Avi Kivity 已提交
2053

2054 2055
	r = kvm_arch_init(opaque);
	if (r)
2056
		goto out_fail;
2057 2058 2059 2060 2061 2062 2063 2064

	bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);

	if (bad_page == NULL) {
		r = -ENOMEM;
		goto out;
	}

2065 2066
	bad_pfn = page_to_pfn(bad_page);

2067
	if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2068 2069 2070 2071
		r = -ENOMEM;
		goto out_free_0;
	}

2072
	r = kvm_arch_hardware_setup();
A
Avi Kivity 已提交
2073
	if (r < 0)
2074
		goto out_free_0a;
A
Avi Kivity 已提交
2075

Y
Yang, Sheng 已提交
2076 2077
	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu,
2078
				kvm_arch_check_processor_compat,
2079
				&r, 1);
Y
Yang, Sheng 已提交
2080
		if (r < 0)
2081
			goto out_free_1;
Y
Yang, Sheng 已提交
2082 2083
	}

A
Avi Kivity 已提交
2084 2085
	r = register_cpu_notifier(&kvm_cpu_notifier);
	if (r)
2086
		goto out_free_2;
A
Avi Kivity 已提交
2087 2088
	register_reboot_notifier(&kvm_reboot_notifier);

2089 2090
	r = sysdev_class_register(&kvm_sysdev_class);
	if (r)
2091
		goto out_free_3;
2092 2093 2094

	r = sysdev_register(&kvm_sysdev);
	if (r)
2095
		goto out_free_4;
2096

2097 2098
	/* A kmem cache lets us meet the alignment requirements of fx_save. */
	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
J
Joe Perches 已提交
2099 2100
					   __alignof__(struct kvm_vcpu),
					   0, NULL);
2101 2102
	if (!kvm_vcpu_cache) {
		r = -ENOMEM;
2103
		goto out_free_5;
2104 2105
	}

A
Avi Kivity 已提交
2106
	kvm_chardev_ops.owner = module;
2107 2108
	kvm_vm_fops.owner = module;
	kvm_vcpu_fops.owner = module;
A
Avi Kivity 已提交
2109 2110 2111

	r = misc_register(&kvm_dev);
	if (r) {
M
Mike Day 已提交
2112
		printk(KERN_ERR "kvm: misc device register failed\n");
A
Avi Kivity 已提交
2113 2114 2115
		goto out_free;
	}

2116 2117 2118
	kvm_preempt_ops.sched_in = kvm_sched_in;
	kvm_preempt_ops.sched_out = kvm_sched_out;

2119 2120
	kvm_init_debug();

2121
	return 0;
A
Avi Kivity 已提交
2122 2123

out_free:
2124
	kmem_cache_destroy(kvm_vcpu_cache);
2125
out_free_5:
2126
	sysdev_unregister(&kvm_sysdev);
2127
out_free_4:
2128
	sysdev_class_unregister(&kvm_sysdev_class);
2129
out_free_3:
A
Avi Kivity 已提交
2130
	unregister_reboot_notifier(&kvm_reboot_notifier);
A
Avi Kivity 已提交
2131
	unregister_cpu_notifier(&kvm_cpu_notifier);
2132 2133
out_free_2:
out_free_1:
2134
	kvm_arch_hardware_unsetup();
2135 2136
out_free_0a:
	free_cpumask_var(cpus_hardware_enabled);
2137 2138
out_free_0:
	__free_page(bad_page);
2139
out:
2140
	kvm_arch_exit();
2141
out_fail:
A
Avi Kivity 已提交
2142 2143
	return r;
}
2144
EXPORT_SYMBOL_GPL(kvm_init);
A
Avi Kivity 已提交
2145

2146
void kvm_exit(void)
A
Avi Kivity 已提交
2147
{
2148
	tracepoint_synchronize_unregister();
2149
	kvm_exit_debug();
A
Avi Kivity 已提交
2150
	misc_deregister(&kvm_dev);
2151
	kmem_cache_destroy(kvm_vcpu_cache);
2152 2153
	sysdev_unregister(&kvm_sysdev);
	sysdev_class_unregister(&kvm_sysdev_class);
A
Avi Kivity 已提交
2154
	unregister_reboot_notifier(&kvm_reboot_notifier);
2155
	unregister_cpu_notifier(&kvm_cpu_notifier);
2156
	on_each_cpu(hardware_disable, NULL, 1);
2157
	kvm_arch_hardware_unsetup();
2158
	kvm_arch_exit();
2159
	free_cpumask_var(cpus_hardware_enabled);
2160
	__free_page(bad_page);
A
Avi Kivity 已提交
2161
}
2162
EXPORT_SYMBOL_GPL(kvm_exit);