kvm_main.c 56.3 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
8
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
9 10 11 12 13 14 15 16 17 18
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

19
#include "iodev.h"
A
Avi Kivity 已提交
20

21
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
22 23 24 25 26 27 28 29 30 31 32
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
33
#include <linux/sysdev.h>
A
Avi Kivity 已提交
34
#include <linux/cpu.h>
A
Alexey Dobriyan 已提交
35
#include <linux/sched.h>
36 37
#include <linux/cpumask.h>
#include <linux/smp.h>
38
#include <linux/anon_inodes.h>
39
#include <linux/profile.h>
40
#include <linux/kvm_para.h>
41
#include <linux/pagemap.h>
42
#include <linux/mman.h>
43
#include <linux/swap.h>
44
#include <linux/bitops.h>
45
#include <linux/spinlock.h>
46
#include <linux/compat.h>
47
#include <linux/srcu.h>
J
Joerg Roedel 已提交
48
#include <linux/hugetlb.h>
49
#include <linux/slab.h>
A
Avi Kivity 已提交
50

A
Avi Kivity 已提交
51 52 53
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
54
#include <asm/pgtable.h>
55
#include <asm-generic/bitops/le.h>
A
Avi Kivity 已提交
56

57
#include "coalesced_mmio.h"
58
#include "async_pf.h"
59

60 61 62
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>

A
Avi Kivity 已提交
63 64 65
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

66 67 68
/*
 * Ordering of locks:
 *
69
 * 		kvm->lock --> kvm->slots_lock --> kvm->irq_lock
70 71
 */

72 73
DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
74

75
static cpumask_var_t cpus_hardware_enabled;
76 77
static int kvm_usage_count = 0;
static atomic_t hardware_enable_failed;
78

79 80
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
A
Avi Kivity 已提交
81

82 83
static __read_mostly struct preempt_ops kvm_preempt_ops;

84
struct dentry *kvm_debugfs_dir;
A
Avi Kivity 已提交
85

A
Avi Kivity 已提交
86 87
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);
88 89
static int hardware_enable_all(void);
static void hardware_disable_all(void);
A
Avi Kivity 已提交
90

M
Marcelo Tosatti 已提交
91 92
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);

93 94
bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
95

96 97
static bool largepages_enabled = true;

98 99
static struct page *hwpoison_page;
static pfn_t hwpoison_pfn;
100

101 102 103
static struct page *fault_page;
static pfn_t fault_pfn;

104
inline int kvm_is_mmio_pfn(pfn_t pfn)
B
Ben-Ami Yassour 已提交
105
{
106
	if (pfn_valid(pfn)) {
107
		int reserved;
108
		struct page *tail = pfn_to_page(pfn);
109 110
		struct page *head = compound_trans_head(tail);
		reserved = PageReserved(head);
111 112
		if (head != tail) {
			/*
113 114 115 116 117 118 119 120
			 * "head" is not a dangling pointer
			 * (compound_trans_head takes care of that)
			 * but the hugepage may have been splitted
			 * from under us (and we may not hold a
			 * reference count on the head page so it can
			 * be reused before we run PageReferenced), so
			 * we've to check PageTail before returning
			 * what we just read.
121
			 */
122 123 124
			smp_rmb();
			if (PageTail(tail))
				return reserved;
125 126
		}
		return PageReserved(tail);
127
	}
B
Ben-Ami Yassour 已提交
128 129 130 131

	return true;
}

A
Avi Kivity 已提交
132 133 134
/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
135
void vcpu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
136
{
137 138
	int cpu;

A
Avi Kivity 已提交
139
	mutex_lock(&vcpu->mutex);
140 141
	cpu = get_cpu();
	preempt_notifier_register(&vcpu->preempt_notifier);
142
	kvm_arch_vcpu_load(vcpu, cpu);
143
	put_cpu();
A
Avi Kivity 已提交
144 145
}

146
void vcpu_put(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
147
{
148
	preempt_disable();
149
	kvm_arch_vcpu_put(vcpu);
150 151
	preempt_notifier_unregister(&vcpu->preempt_notifier);
	preempt_enable();
A
Avi Kivity 已提交
152 153 154
	mutex_unlock(&vcpu->mutex);
}

155 156 157 158
static void ack_flush(void *_completed)
{
}

159
static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
160
{
161
	int i, cpu, me;
162 163
	cpumask_var_t cpus;
	bool called = true;
164 165
	struct kvm_vcpu *vcpu;

166
	zalloc_cpumask_var(&cpus, GFP_ATOMIC);
167

168
	raw_spin_lock(&kvm->requests_lock);
169
	me = smp_processor_id();
170
	kvm_for_each_vcpu(i, vcpu, kvm) {
171
		if (kvm_make_check_request(req, vcpu))
172 173
			continue;
		cpu = vcpu->cpu;
174 175
		if (cpus != NULL && cpu != -1 && cpu != me)
			cpumask_set_cpu(cpu, cpus);
176
	}
177 178 179 180 181 182
	if (unlikely(cpus == NULL))
		smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
	else if (!cpumask_empty(cpus))
		smp_call_function_many(cpus, ack_flush, NULL, 1);
	else
		called = false;
183
	raw_spin_unlock(&kvm->requests_lock);
184
	free_cpumask_var(cpus);
185
	return called;
186 187
}

188
void kvm_flush_remote_tlbs(struct kvm *kvm)
189
{
190 191 192
	int dirty_count = kvm->tlbs_dirty;

	smp_mb();
193 194
	if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
		++kvm->stat.remote_tlb_flush;
195
	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
196 197
}

198 199 200 201
void kvm_reload_remote_mmus(struct kvm *kvm)
{
	make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
}
202

R
Rusty Russell 已提交
203 204 205 206 207 208 209 210 211
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
	struct page *page;
	int r;

	mutex_init(&vcpu->mutex);
	vcpu->cpu = -1;
	vcpu->kvm = kvm;
	vcpu->vcpu_id = id;
E
Eddie Dong 已提交
212
	init_waitqueue_head(&vcpu->wq);
213
	kvm_async_pf_vcpu_init(vcpu);
R
Rusty Russell 已提交
214 215 216 217 218 219 220 221

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
	vcpu->run = page_address(page);

222
	r = kvm_arch_vcpu_init(vcpu);
R
Rusty Russell 已提交
223
	if (r < 0)
224
		goto fail_free_run;
R
Rusty Russell 已提交
225 226 227 228 229
	return 0;

fail_free_run:
	free_page((unsigned long)vcpu->run);
fail:
230
	return r;
R
Rusty Russell 已提交
231 232 233 234 235
}
EXPORT_SYMBOL_GPL(kvm_vcpu_init);

void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
236
	kvm_arch_vcpu_uninit(vcpu);
R
Rusty Russell 已提交
237 238 239 240
	free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);

241 242 243 244 245 246 247 248 249 250 251
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
	return container_of(mn, struct kvm, mmu_notifier);
}

static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
252
	int need_tlb_flush, idx;
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271

	/*
	 * When ->invalidate_page runs, the linux pte has been zapped
	 * already but the page is still allocated until
	 * ->invalidate_page returns. So if we increase the sequence
	 * here the kvm page fault will notice if the spte can't be
	 * established because the page is going to be freed. If
	 * instead the kvm page fault establishes the spte before
	 * ->invalidate_page runs, kvm_unmap_hva will release it
	 * before returning.
	 *
	 * The sequence increase only need to be seen at spin_unlock
	 * time, and not at spin_lock time.
	 *
	 * Increasing the sequence after the spin_unlock would be
	 * unsafe because the kvm page fault could then establish the
	 * pte after kvm_unmap_hva returned, without noticing the page
	 * is going to be freed.
	 */
272
	idx = srcu_read_lock(&kvm->srcu);
273 274
	spin_lock(&kvm->mmu_lock);
	kvm->mmu_notifier_seq++;
275
	need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
276
	spin_unlock(&kvm->mmu_lock);
277
	srcu_read_unlock(&kvm->srcu, idx);
278 279 280 281 282 283 284

	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);

}

285 286 287 288 289 290
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
					struct mm_struct *mm,
					unsigned long address,
					pte_t pte)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
291
	int idx;
292

293
	idx = srcu_read_lock(&kvm->srcu);
294 295 296 297
	spin_lock(&kvm->mmu_lock);
	kvm->mmu_notifier_seq++;
	kvm_set_spte_hva(kvm, address, pte);
	spin_unlock(&kvm->mmu_lock);
298
	srcu_read_unlock(&kvm->srcu, idx);
299 300
}

301 302 303 304 305 306
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
						    struct mm_struct *mm,
						    unsigned long start,
						    unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
307
	int need_tlb_flush = 0, idx;
308

309
	idx = srcu_read_lock(&kvm->srcu);
310 311 312 313 314 315 316 317 318
	spin_lock(&kvm->mmu_lock);
	/*
	 * The count increase must become visible at unlock time as no
	 * spte can be established without taking the mmu_lock and
	 * count is also read inside the mmu_lock critical section.
	 */
	kvm->mmu_notifier_count++;
	for (; start < end; start += PAGE_SIZE)
		need_tlb_flush |= kvm_unmap_hva(kvm, start);
319
	need_tlb_flush |= kvm->tlbs_dirty;
320
	spin_unlock(&kvm->mmu_lock);
321
	srcu_read_unlock(&kvm->srcu, idx);
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358

	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);
}

static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
						  struct mm_struct *mm,
						  unsigned long start,
						  unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);

	spin_lock(&kvm->mmu_lock);
	/*
	 * This sequence increase will notify the kvm page fault that
	 * the page that is going to be mapped in the spte could have
	 * been freed.
	 */
	kvm->mmu_notifier_seq++;
	/*
	 * The above sequence increase must be visible before the
	 * below count decrease but both values are read by the kvm
	 * page fault under mmu_lock spinlock so we don't need to add
	 * a smb_wmb() here in between the two.
	 */
	kvm->mmu_notifier_count--;
	spin_unlock(&kvm->mmu_lock);

	BUG_ON(kvm->mmu_notifier_count < 0);
}

static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
					      struct mm_struct *mm,
					      unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
359
	int young, idx;
360

361
	idx = srcu_read_lock(&kvm->srcu);
362 363 364
	spin_lock(&kvm->mmu_lock);
	young = kvm_age_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);
365
	srcu_read_unlock(&kvm->srcu, idx);
366 367 368 369 370 371 372

	if (young)
		kvm_flush_remote_tlbs(kvm);

	return young;
}

A
Andrea Arcangeli 已提交
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
				       struct mm_struct *mm,
				       unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int young, idx;

	idx = srcu_read_lock(&kvm->srcu);
	spin_lock(&kvm->mmu_lock);
	young = kvm_test_age_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);

	return young;
}

389 390 391 392
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
				     struct mm_struct *mm)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
393 394 395
	int idx;

	idx = srcu_read_lock(&kvm->srcu);
396
	kvm_arch_flush_shadow(kvm);
397
	srcu_read_unlock(&kvm->srcu, idx);
398 399
}

400 401 402 403 404
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
	.invalidate_page	= kvm_mmu_notifier_invalidate_page,
	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
A
Andrea Arcangeli 已提交
405
	.test_young		= kvm_mmu_notifier_test_young,
406
	.change_pte		= kvm_mmu_notifier_change_pte,
407
	.release		= kvm_mmu_notifier_release,
408
};
409 410 411 412 413 414 415 416 417 418 419 420 421 422

static int kvm_init_mmu_notifier(struct kvm *kvm)
{
	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
}

#else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */

static int kvm_init_mmu_notifier(struct kvm *kvm)
{
	return 0;
}

423 424
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

425
static struct kvm *kvm_create_vm(void)
A
Avi Kivity 已提交
426
{
427 428
	int r, i;
	struct kvm *kvm = kvm_arch_alloc_vm();
A
Avi Kivity 已提交
429

430 431 432 433 434 435
	if (!kvm)
		return ERR_PTR(-ENOMEM);

	r = kvm_arch_init_vm(kvm);
	if (r)
		goto out_err_nodisable;
436 437 438 439 440

	r = hardware_enable_all();
	if (r)
		goto out_err_nodisable;

441 442
#ifdef CONFIG_HAVE_KVM_IRQCHIP
	INIT_HLIST_HEAD(&kvm->mask_notifier_list);
443
	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
444
#endif
A
Avi Kivity 已提交
445

446 447 448
	r = -ENOMEM;
	kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
	if (!kvm->memslots)
449
		goto out_err_nosrcu;
450
	if (init_srcu_struct(&kvm->srcu))
451
		goto out_err_nosrcu;
M
Marcelo Tosatti 已提交
452 453 454
	for (i = 0; i < KVM_NR_BUSES; i++) {
		kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
					GFP_KERNEL);
455
		if (!kvm->buses[i])
M
Marcelo Tosatti 已提交
456 457
			goto out_err;
	}
458

459
	r = kvm_init_mmu_notifier(kvm);
460
	if (r)
461
		goto out_err;
462

463 464
	kvm->mm = current->mm;
	atomic_inc(&kvm->mm->mm_count);
465
	spin_lock_init(&kvm->mmu_lock);
466
	raw_spin_lock_init(&kvm->requests_lock);
G
Gregory Haskins 已提交
467
	kvm_eventfd_init(kvm);
S
Shaohua Li 已提交
468
	mutex_init(&kvm->lock);
469
	mutex_init(&kvm->irq_lock);
470
	mutex_init(&kvm->slots_lock);
I
Izik Eidus 已提交
471
	atomic_set(&kvm->users_count, 1);
472 473 474
	spin_lock(&kvm_lock);
	list_add(&kvm->vm_list, &vm_list);
	spin_unlock(&kvm_lock);
475

476
	return kvm;
477 478

out_err:
479 480
	cleanup_srcu_struct(&kvm->srcu);
out_err_nosrcu:
481 482
	hardware_disable_all();
out_err_nodisable:
M
Marcelo Tosatti 已提交
483 484
	for (i = 0; i < KVM_NR_BUSES; i++)
		kfree(kvm->buses[i]);
485
	kfree(kvm->memslots);
486
	kvm_arch_free_vm(kvm);
487
	return ERR_PTR(r);
488 489
}

490 491 492 493 494
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{
	if (!memslot->dirty_bitmap)
		return;

495 496 497 498 499
	if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
		vfree(memslot->dirty_bitmap_head);
	else
		kfree(memslot->dirty_bitmap_head);

500
	memslot->dirty_bitmap = NULL;
501
	memslot->dirty_bitmap_head = NULL;
502 503
}

A
Avi Kivity 已提交
504 505 506 507 508 509
/*
 * Free any memory in @free but not in @dont.
 */
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
				  struct kvm_memory_slot *dont)
{
510 511
	int i;

512 513
	if (!dont || free->rmap != dont->rmap)
		vfree(free->rmap);
A
Avi Kivity 已提交
514 515

	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
516
		kvm_destroy_dirty_bitmap(free);
A
Avi Kivity 已提交
517

518 519 520 521 522 523 524

	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
		if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
			vfree(free->lpage_info[i]);
			free->lpage_info[i] = NULL;
		}
	}
M
Marcelo Tosatti 已提交
525

A
Avi Kivity 已提交
526
	free->npages = 0;
527
	free->rmap = NULL;
A
Avi Kivity 已提交
528 529
}

530
void kvm_free_physmem(struct kvm *kvm)
A
Avi Kivity 已提交
531 532
{
	int i;
533 534 535 536
	struct kvm_memslots *slots = kvm->memslots;

	for (i = 0; i < slots->nmemslots; ++i)
		kvm_free_physmem_slot(&slots->memslots[i], NULL);
A
Avi Kivity 已提交
537

538
	kfree(kvm->memslots);
A
Avi Kivity 已提交
539 540
}

541 542
static void kvm_destroy_vm(struct kvm *kvm)
{
M
Marcelo Tosatti 已提交
543
	int i;
544 545
	struct mm_struct *mm = kvm->mm;

546
	kvm_arch_sync_events(kvm);
547 548 549
	spin_lock(&kvm_lock);
	list_del(&kvm->vm_list);
	spin_unlock(&kvm_lock);
550
	kvm_free_irq_routing(kvm);
M
Marcelo Tosatti 已提交
551 552
	for (i = 0; i < KVM_NR_BUSES; i++)
		kvm_io_bus_destroy(kvm->buses[i]);
553
	kvm_coalesced_mmio_free(kvm);
554 555
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
556 557
#else
	kvm_arch_flush_shadow(kvm);
558
#endif
559
	kvm_arch_destroy_vm(kvm);
560 561 562
	kvm_free_physmem(kvm);
	cleanup_srcu_struct(&kvm->srcu);
	kvm_arch_free_vm(kvm);
563
	hardware_disable_all();
564
	mmdrop(mm);
565 566
}

I
Izik Eidus 已提交
567 568 569 570 571 572 573 574 575 576 577 578 579 580
void kvm_get_kvm(struct kvm *kvm)
{
	atomic_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);

void kvm_put_kvm(struct kvm *kvm)
{
	if (atomic_dec_and_test(&kvm->users_count))
		kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);


581 582 583 584
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
	struct kvm *kvm = filp->private_data;

G
Gregory Haskins 已提交
585 586
	kvm_irqfd_release(kvm);

I
Izik Eidus 已提交
587
	kvm_put_kvm(kvm);
A
Avi Kivity 已提交
588 589 590
	return 0;
}

591 592 593 594 595
/*
 * Allocation size is twice as large as the actual dirty bitmap size.
 * This makes it possible to do double buffering: see x86's
 * kvm_vm_ioctl_get_dirty_log().
 */
596 597
static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
598
	unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
599

600 601 602 603 604
	if (dirty_bytes > PAGE_SIZE)
		memslot->dirty_bitmap = vzalloc(dirty_bytes);
	else
		memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);

605 606 607
	if (!memslot->dirty_bitmap)
		return -ENOMEM;

608
	memslot->dirty_bitmap_head = memslot->dirty_bitmap;
609 610 611
	return 0;
}

A
Avi Kivity 已提交
612 613 614 615 616
/*
 * Allocate some memory and give it an address in the guest physical address
 * space.
 *
 * Discontiguous memory is allowed, mostly for framebuffers.
617
 *
618
 * Must be called holding mmap_sem for write.
A
Avi Kivity 已提交
619
 */
620 621 622
int __kvm_set_memory_region(struct kvm *kvm,
			    struct kvm_userspace_memory_region *mem,
			    int user_alloc)
A
Avi Kivity 已提交
623
{
624
	int r;
A
Avi Kivity 已提交
625
	gfn_t base_gfn;
626 627
	unsigned long npages;
	unsigned long i;
A
Avi Kivity 已提交
628 629
	struct kvm_memory_slot *memslot;
	struct kvm_memory_slot old, new;
630
	struct kvm_memslots *slots, *old_memslots;
A
Avi Kivity 已提交
631 632 633 634 635 636 637

	r = -EINVAL;
	/* General sanity checks */
	if (mem->memory_size & (PAGE_SIZE - 1))
		goto out;
	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
		goto out;
S
Sheng Yang 已提交
638
	if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
639
		goto out;
640
	if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
A
Avi Kivity 已提交
641 642 643 644
		goto out;
	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
		goto out;

645
	memslot = &kvm->memslots->memslots[mem->slot];
A
Avi Kivity 已提交
646 647 648
	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
	npages = mem->memory_size >> PAGE_SHIFT;

649 650 651 652
	r = -EINVAL;
	if (npages > KVM_MEM_MAX_NR_PAGES)
		goto out;

A
Avi Kivity 已提交
653 654 655 656 657
	if (!npages)
		mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;

	new = old = *memslot;

658
	new.id = mem->slot;
A
Avi Kivity 已提交
659 660 661 662 663 664 665
	new.base_gfn = base_gfn;
	new.npages = npages;
	new.flags = mem->flags;

	/* Disallow changing a memory slot's size. */
	r = -EINVAL;
	if (npages && old.npages && npages != old.npages)
666
		goto out_free;
A
Avi Kivity 已提交
667 668 669 670

	/* Check for overlaps */
	r = -EEXIST;
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
671
		struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
A
Avi Kivity 已提交
672

673
		if (s == memslot || !s->npages)
A
Avi Kivity 已提交
674 675 676
			continue;
		if (!((base_gfn + npages <= s->base_gfn) ||
		      (base_gfn >= s->base_gfn + s->npages)))
677
			goto out_free;
A
Avi Kivity 已提交
678 679 680 681
	}

	/* Free page dirty bitmap if unneeded */
	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
A
Al Viro 已提交
682
		new.dirty_bitmap = NULL;
A
Avi Kivity 已提交
683 684 685 686

	r = -ENOMEM;

	/* Allocate if a slot is being created */
687
#ifndef CONFIG_S390
688
	if (npages && !new.rmap) {
689
		new.rmap = vzalloc(npages * sizeof(*new.rmap));
690 691

		if (!new.rmap)
692
			goto out_free;
693

694
		new.user_alloc = user_alloc;
695
		new.userspace_addr = mem->userspace_addr;
A
Avi Kivity 已提交
696
	}
697 698
	if (!npages)
		goto skip_lpage;
M
Marcelo Tosatti 已提交
699

700
	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
701 702 703
		unsigned long ugfn;
		unsigned long j;
		int lpages;
704
		int level = i + 2;
M
Marcelo Tosatti 已提交
705

706 707 708 709 710 711
		/* Avoid unused variable warning if no large pages */
		(void)level;

		if (new.lpage_info[i])
			continue;

712 713 714
		lpages = 1 + ((base_gfn + npages - 1)
			     >> KVM_HPAGE_GFN_SHIFT(level));
		lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
715

716
		new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
717 718

		if (!new.lpage_info[i])
M
Marcelo Tosatti 已提交
719 720
			goto out_free;

721
		if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
722
			new.lpage_info[i][0].write_count = 1;
723
		if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
724
			new.lpage_info[i][lpages - 1].write_count = 1;
725 726 727
		ugfn = new.userspace_addr >> PAGE_SHIFT;
		/*
		 * If the gfn and userspace address are not aligned wrt each
728 729
		 * other, or if explicitly asked to, disable large page
		 * support for this slot
730
		 */
731
		if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
732
		    !largepages_enabled)
733 734
			for (j = 0; j < lpages; ++j)
				new.lpage_info[i][j].write_count = 1;
M
Marcelo Tosatti 已提交
735
	}
A
Avi Kivity 已提交
736

737 738
skip_lpage:

A
Avi Kivity 已提交
739 740
	/* Allocate page dirty bitmap if needed */
	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
741
		if (kvm_create_dirty_bitmap(&new) < 0)
742
			goto out_free;
743
		/* destroy any largepage mappings for dirty tracking */
A
Avi Kivity 已提交
744
	}
745 746 747 748
#else  /* not defined CONFIG_S390 */
	new.user_alloc = user_alloc;
	if (user_alloc)
		new.userspace_addr = mem->userspace_addr;
749
#endif /* not defined CONFIG_S390 */
A
Avi Kivity 已提交
750

751 752 753 754 755 756 757 758
	if (!npages) {
		r = -ENOMEM;
		slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
		if (!slots)
			goto out_free;
		memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
		if (mem->slot >= slots->nmemslots)
			slots->nmemslots = mem->slot + 1;
759
		slots->generation++;
760 761 762 763 764 765 766 767 768 769 770 771
		slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;

		old_memslots = kvm->memslots;
		rcu_assign_pointer(kvm->memslots, slots);
		synchronize_srcu_expedited(&kvm->srcu);
		/* From this point no new shadow pages pointing to a deleted
		 * memslot will be created.
		 *
		 * validation of sp->gfn happens in:
		 * 	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
		 * 	- kvm_is_visible_gfn (mmu_check_roots)
		 */
772
		kvm_arch_flush_shadow(kvm);
773 774
		kfree(old_memslots);
	}
775

776 777 778 779
	r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
	if (r)
		goto out_free;

780 781 782 783 784 785
	/* map the pages in iommu page table */
	if (npages) {
		r = kvm_iommu_map_pages(kvm, &new);
		if (r)
			goto out_free;
	}
786

787 788 789 790 791 792 793
	r = -ENOMEM;
	slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
	if (!slots)
		goto out_free;
	memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
	if (mem->slot >= slots->nmemslots)
		slots->nmemslots = mem->slot + 1;
794
	slots->generation++;
795 796 797 798 799 800 801 802 803 804 805 806 807

	/* actual memory is freed via old in kvm_free_physmem_slot below */
	if (!npages) {
		new.rmap = NULL;
		new.dirty_bitmap = NULL;
		for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
			new.lpage_info[i] = NULL;
	}

	slots->memslots[mem->slot] = new;
	old_memslots = kvm->memslots;
	rcu_assign_pointer(kvm->memslots, slots);
	synchronize_srcu_expedited(&kvm->srcu);
808

809
	kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
810

811 812 813
	kvm_free_physmem_slot(&old, &new);
	kfree(old_memslots);

A
Avi Kivity 已提交
814 815
	return 0;

816
out_free:
A
Avi Kivity 已提交
817 818 819
	kvm_free_physmem_slot(&new, &old);
out:
	return r;
820 821

}
822 823 824 825 826 827 828 829
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);

int kvm_set_memory_region(struct kvm *kvm,
			  struct kvm_userspace_memory_region *mem,
			  int user_alloc)
{
	int r;

830
	mutex_lock(&kvm->slots_lock);
831
	r = __kvm_set_memory_region(kvm, mem, user_alloc);
832
	mutex_unlock(&kvm->slots_lock);
833 834
	return r;
}
835 836
EXPORT_SYMBOL_GPL(kvm_set_memory_region);

837 838 839 840
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
				   struct
				   kvm_userspace_memory_region *mem,
				   int user_alloc)
841
{
842 843
	if (mem->slot >= KVM_MEMORY_SLOTS)
		return -EINVAL;
844
	return kvm_set_memory_region(kvm, mem, user_alloc);
A
Avi Kivity 已提交
845 846
}

847 848
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty)
A
Avi Kivity 已提交
849 850 851
{
	struct kvm_memory_slot *memslot;
	int r, i;
852
	unsigned long n;
A
Avi Kivity 已提交
853 854 855 856 857 858
	unsigned long any = 0;

	r = -EINVAL;
	if (log->slot >= KVM_MEMORY_SLOTS)
		goto out;

859
	memslot = &kvm->memslots->memslots[log->slot];
A
Avi Kivity 已提交
860 861 862 863
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

864
	n = kvm_dirty_bitmap_bytes(memslot);
A
Avi Kivity 已提交
865

866
	for (i = 0; !any && i < n/sizeof(long); ++i)
A
Avi Kivity 已提交
867 868 869 870 871 872
		any = memslot->dirty_bitmap[i];

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
		goto out;

873 874
	if (any)
		*is_dirty = 1;
A
Avi Kivity 已提交
875 876 877 878 879 880

	r = 0;
out:
	return r;
}

881 882 883 884 885 886
void kvm_disable_largepages(void)
{
	largepages_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_largepages);

887 888
int is_error_page(struct page *page)
{
889
	return page == bad_page || page == hwpoison_page || page == fault_page;
890 891 892
}
EXPORT_SYMBOL_GPL(is_error_page);

893 894
int is_error_pfn(pfn_t pfn)
{
895
	return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
896 897 898
}
EXPORT_SYMBOL_GPL(is_error_pfn);

899 900 901 902 903 904
int is_hwpoison_pfn(pfn_t pfn)
{
	return pfn == hwpoison_pfn;
}
EXPORT_SYMBOL_GPL(is_hwpoison_pfn);

905 906 907 908 909 910
int is_fault_pfn(pfn_t pfn)
{
	return pfn == fault_pfn;
}
EXPORT_SYMBOL_GPL(is_fault_pfn);

I
Izik Eidus 已提交
911 912 913 914 915 916 917 918 919 920 921
static inline unsigned long bad_hva(void)
{
	return PAGE_OFFSET;
}

int kvm_is_error_hva(unsigned long addr)
{
	return addr == bad_hva();
}
EXPORT_SYMBOL_GPL(kvm_is_error_hva);

922 923
static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
						gfn_t gfn)
A
Avi Kivity 已提交
924 925 926
{
	int i;

927 928
	for (i = 0; i < slots->nmemslots; ++i) {
		struct kvm_memory_slot *memslot = &slots->memslots[i];
A
Avi Kivity 已提交
929 930 931 932 933

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return memslot;
	}
A
Al Viro 已提交
934
	return NULL;
A
Avi Kivity 已提交
935
}
936 937 938 939 940

struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
}
A
Avi Kivity 已提交
941
EXPORT_SYMBOL_GPL(gfn_to_memslot);
A
Avi Kivity 已提交
942

943 944 945
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
	int i;
946
	struct kvm_memslots *slots = kvm_memslots(kvm);
947 948

	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
949
		struct kvm_memory_slot *memslot = &slots->memslots[i];
950

951 952 953
		if (memslot->flags & KVM_MEMSLOT_INVALID)
			continue;

954 955 956 957 958 959 960 961
		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

J
Joerg Roedel 已提交
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
{
	struct vm_area_struct *vma;
	unsigned long addr, size;

	size = PAGE_SIZE;

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return PAGE_SIZE;

	down_read(&current->mm->mmap_sem);
	vma = find_vma(current->mm, addr);
	if (!vma)
		goto out;

	size = vma_kernel_pagesize(vma);

out:
	up_read(&current->mm->mmap_sem);

	return size;
}

986 987 988
int memslot_id(struct kvm *kvm, gfn_t gfn)
{
	int i;
989
	struct kvm_memslots *slots = kvm_memslots(kvm);
990 991 992 993 994 995 996 997 998 999 1000 1001 1002
	struct kvm_memory_slot *memslot = NULL;

	for (i = 0; i < slots->nmemslots; ++i) {
		memslot = &slots->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			break;
	}

	return memslot - slots->memslots;
}

1003
static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1004
				     gfn_t *nr_pages)
I
Izik Eidus 已提交
1005
{
1006
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
I
Izik Eidus 已提交
1007
		return bad_hva();
1008 1009 1010 1011

	if (nr_pages)
		*nr_pages = slot->npages - (gfn - slot->base_gfn);

1012
	return gfn_to_hva_memslot(slot, gfn);
I
Izik Eidus 已提交
1013
}
1014 1015 1016

unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
1017
	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1018
}
1019
EXPORT_SYMBOL_GPL(gfn_to_hva);
I
Izik Eidus 已提交
1020

1021 1022 1023 1024 1025 1026
static pfn_t get_fault_pfn(void)
{
	get_page(fault_page);
	return fault_pfn;
}

1027
static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1028
			bool *async, bool write_fault, bool *writable)
A
Avi Kivity 已提交
1029
{
1030
	struct page *page[1];
1031
	int npages = 0;
1032
	pfn_t pfn;
A
Avi Kivity 已提交
1033

1034 1035 1036
	/* we can do it either atomically or asynchronously, not both */
	BUG_ON(atomic && async);

1037 1038 1039 1040 1041
	BUG_ON(!write_fault && !writable);

	if (writable)
		*writable = true;

1042
	if (atomic || async)
1043
		npages = __get_user_pages_fast(addr, 1, 1, page);
1044 1045

	if (unlikely(npages != 1) && !atomic) {
1046
		might_sleep();
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064

		if (writable)
			*writable = write_fault;

		npages = get_user_pages_fast(addr, 1, write_fault, page);

		/* map read fault as writable if possible */
		if (unlikely(!write_fault) && npages == 1) {
			struct page *wpage[1];

			npages = __get_user_pages_fast(addr, 1, 1, wpage);
			if (npages == 1) {
				*writable = true;
				put_page(page[0]);
				page[0] = wpage[0];
			}
			npages = 1;
		}
1065
	}
I
Izik Eidus 已提交
1066

1067 1068 1069
	if (unlikely(npages != 1)) {
		struct vm_area_struct *vma;

1070
		if (atomic)
1071
			return get_fault_pfn();
1072

1073
		down_read(&current->mm->mmap_sem);
1074
		if (is_hwpoison_address(addr)) {
1075
			up_read(&current->mm->mmap_sem);
1076 1077 1078 1079
			get_page(hwpoison_page);
			return page_to_pfn(hwpoison_page);
		}

1080
		vma = find_vma_intersection(current->mm, addr, addr+1);
1081

1082 1083 1084 1085 1086 1087 1088 1089
		if (vma == NULL)
			pfn = get_fault_pfn();
		else if ((vma->vm_flags & VM_PFNMAP)) {
			pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
				vma->vm_pgoff;
			BUG_ON(!kvm_is_mmio_pfn(pfn));
		} else {
			if (async && (vma->vm_flags & VM_WRITE))
1090
				*async = true;
1091
			pfn = get_fault_pfn();
1092
		}
1093
		up_read(&current->mm->mmap_sem);
1094 1095
	} else
		pfn = page_to_pfn(page[0]);
1096

1097
	return pfn;
1098 1099
}

1100 1101
pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
{
1102
	return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
1103 1104 1105
}
EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);

1106 1107
static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
			  bool write_fault, bool *writable)
1108 1109 1110
{
	unsigned long addr;

1111 1112 1113
	if (async)
		*async = false;

1114 1115 1116 1117 1118 1119
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr)) {
		get_page(bad_page);
		return page_to_pfn(bad_page);
	}

1120
	return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
1121 1122 1123 1124
}

pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
{
1125
	return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
1126 1127 1128
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);

1129 1130
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
		       bool write_fault, bool *writable)
1131
{
1132
	return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
1133 1134 1135
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_async);

1136 1137
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
1138
	return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
1139
}
1140 1141
EXPORT_SYMBOL_GPL(gfn_to_pfn);

1142 1143 1144 1145 1146 1147 1148
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable)
{
	return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);

1149 1150 1151 1152
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
			 struct kvm_memory_slot *slot, gfn_t gfn)
{
	unsigned long addr = gfn_to_hva_memslot(slot, gfn);
1153
	return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
1154 1155
}

1156 1157 1158 1159 1160 1161
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
								  int nr_pages)
{
	unsigned long addr;
	gfn_t entry;

1162
	addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	if (kvm_is_error_hva(addr))
		return -1;

	if (entry < nr_pages)
		return 0;

	return __get_user_pages_fast(addr, nr_pages, 1, pages);
}
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);

1173 1174
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
1175 1176 1177
	pfn_t pfn;

	pfn = gfn_to_pfn(kvm, gfn);
1178
	if (!kvm_is_mmio_pfn(pfn))
1179 1180
		return pfn_to_page(pfn);

1181
	WARN_ON(kvm_is_mmio_pfn(pfn));
1182 1183 1184

	get_page(bad_page);
	return bad_page;
A
Avi Kivity 已提交
1185
}
1186

A
Avi Kivity 已提交
1187 1188
EXPORT_SYMBOL_GPL(gfn_to_page);

1189 1190
void kvm_release_page_clean(struct page *page)
{
1191
	kvm_release_pfn_clean(page_to_pfn(page));
1192 1193 1194
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);

1195 1196
void kvm_release_pfn_clean(pfn_t pfn)
{
1197
	if (!kvm_is_mmio_pfn(pfn))
1198
		put_page(pfn_to_page(pfn));
1199 1200 1201
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);

1202
void kvm_release_page_dirty(struct page *page)
1203
{
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
	kvm_release_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);

void kvm_release_pfn_dirty(pfn_t pfn)
{
	kvm_set_pfn_dirty(pfn);
	kvm_release_pfn_clean(pfn);
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);

void kvm_set_page_dirty(struct page *page)
{
	kvm_set_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_set_page_dirty);

void kvm_set_pfn_dirty(pfn_t pfn)
{
1223
	if (!kvm_is_mmio_pfn(pfn)) {
1224 1225 1226 1227
		struct page *page = pfn_to_page(pfn);
		if (!PageReserved(page))
			SetPageDirty(page);
	}
1228
}
1229 1230 1231 1232
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);

void kvm_set_pfn_accessed(pfn_t pfn)
{
1233
	if (!kvm_is_mmio_pfn(pfn))
1234
		mark_page_accessed(pfn_to_page(pfn));
1235 1236 1237 1238 1239
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);

void kvm_get_pfn(pfn_t pfn)
{
1240
	if (!kvm_is_mmio_pfn(pfn))
1241
		get_page(pfn_to_page(pfn));
1242 1243
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
1244

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
static int next_segment(unsigned long len, int offset)
{
	if (len > PAGE_SIZE - offset)
		return PAGE_SIZE - offset;
	else
		return len;
}

int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len)
{
1256 1257
	int r;
	unsigned long addr;
1258

1259 1260 1261 1262 1263
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_from_user(data, (void __user *)addr + offset, len);
	if (r)
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);

int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);

1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len)
{
	int r;
	unsigned long addr;
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int offset = offset_in_page(gpa);

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
1300
	pagefault_disable();
1301
	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1302
	pagefault_enable();
1303 1304 1305 1306 1307 1308
	if (r)
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL(kvm_read_guest_atomic);

1309 1310 1311
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len)
{
1312 1313
	int r;
	unsigned long addr;
1314

1315 1316 1317 1318 1319
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_to_user((void __user *)addr + offset, data, len);
	if (r)
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
		return -EFAULT;
	mark_page_dirty(kvm, gfn);
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);

int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}

1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			      gpa_t gpa)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	int offset = offset_in_page(gpa);
	gfn_t gfn = gpa >> PAGE_SHIFT;

	ghc->gpa = gpa;
	ghc->generation = slots->generation;
	ghc->memslot = __gfn_to_memslot(slots, gfn);
	ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
	if (!kvm_is_error_hva(ghc->hva))
		ghc->hva += offset;
	else
		return -EFAULT;

	return 0;
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);

int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	int r;

	if (slots->generation != ghc->generation)
		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);

	if (kvm_is_error_hva(ghc->hva))
		return -EFAULT;

	r = copy_to_user((void __user *)ghc->hva, data, len);
	if (r)
		return -EFAULT;
	mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);

	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);

1387 1388
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
1389 1390
	return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
				    offset, len);
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

        while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);

1413 1414
void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
			     gfn_t gfn)
A
Avi Kivity 已提交
1415
{
R
Rusty Russell 已提交
1416 1417
	if (memslot && memslot->dirty_bitmap) {
		unsigned long rel_gfn = gfn - memslot->base_gfn;
A
Avi Kivity 已提交
1418

1419
		generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
A
Avi Kivity 已提交
1420 1421 1422
	}
}

1423 1424 1425 1426 1427 1428 1429 1430
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
	struct kvm_memory_slot *memslot;

	memslot = gfn_to_memslot(kvm, gfn);
	mark_page_dirty_in_slot(kvm, memslot, gfn);
}

E
Eddie Dong 已提交
1431 1432 1433
/*
 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
 */
1434
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1435
{
1436 1437 1438 1439 1440
	DEFINE_WAIT(wait);

	for (;;) {
		prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);

1441
		if (kvm_arch_vcpu_runnable(vcpu)) {
1442
			kvm_make_request(KVM_REQ_UNHALT, vcpu);
1443
			break;
1444
		}
1445 1446
		if (kvm_cpu_has_pending_timer(vcpu))
			break;
1447 1448 1449
		if (signal_pending(current))
			break;

E
Eddie Dong 已提交
1450 1451
		schedule();
	}
1452

1453
	finish_wait(&vcpu->wq, &wait);
E
Eddie Dong 已提交
1454 1455
}

A
Avi Kivity 已提交
1456 1457
void kvm_resched(struct kvm_vcpu *vcpu)
{
1458 1459
	if (!need_resched())
		return;
A
Avi Kivity 已提交
1460 1461 1462 1463
	cond_resched();
}
EXPORT_SYMBOL_GPL(kvm_resched);

Z
Zhai, Edwin 已提交
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
{
	ktime_t expires;
	DEFINE_WAIT(wait);

	prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);

	/* Sleep for 100 us, and hope lock-holder got scheduled */
	expires = ktime_add_ns(ktime_get(), 100000UL);
	schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);

	finish_wait(&vcpu->wq, &wait);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);

1479
static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1480 1481 1482 1483
{
	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
	struct page *page;

1484
	if (vmf->pgoff == 0)
1485
		page = virt_to_page(vcpu->run);
A
Avi Kivity 已提交
1486
#ifdef CONFIG_X86
1487
	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1488
		page = virt_to_page(vcpu->arch.pio_data);
1489 1490 1491 1492
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
A
Avi Kivity 已提交
1493
#endif
1494
	else
1495
		return VM_FAULT_SIGBUS;
1496
	get_page(page);
1497 1498
	vmf->page = page;
	return 0;
1499 1500
}

1501
static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1502
	.fault = kvm_vcpu_fault,
1503 1504 1505 1506 1507 1508 1509 1510
};

static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vcpu_vm_ops;
	return 0;
}

A
Avi Kivity 已提交
1511 1512 1513 1514
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
	struct kvm_vcpu *vcpu = filp->private_data;

A
Al Viro 已提交
1515
	kvm_put_kvm(vcpu->kvm);
A
Avi Kivity 已提交
1516 1517 1518
	return 0;
}

1519
static struct file_operations kvm_vcpu_fops = {
A
Avi Kivity 已提交
1520 1521 1522
	.release        = kvm_vcpu_release,
	.unlocked_ioctl = kvm_vcpu_ioctl,
	.compat_ioctl   = kvm_vcpu_ioctl,
1523
	.mmap           = kvm_vcpu_mmap,
1524
	.llseek		= noop_llseek,
A
Avi Kivity 已提交
1525 1526 1527 1528 1529 1530 1531
};

/*
 * Allocates an inode for the vcpu.
 */
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
1532
	return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
A
Avi Kivity 已提交
1533 1534
}

1535 1536 1537
/*
 * Creates some virtual cpus.  Good luck creating more than one.
 */
1538
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1539 1540
{
	int r;
1541
	struct kvm_vcpu *vcpu, *v;
1542

1543
	vcpu = kvm_arch_vcpu_create(kvm, id);
R
Rusty Russell 已提交
1544 1545
	if (IS_ERR(vcpu))
		return PTR_ERR(vcpu);
1546

1547 1548
	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);

1549 1550
	r = kvm_arch_vcpu_setup(vcpu);
	if (r)
1551
		return r;
1552

S
Shaohua Li 已提交
1553
	mutex_lock(&kvm->lock);
1554 1555
	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
		r = -EINVAL;
1556
		goto vcpu_destroy;
R
Rusty Russell 已提交
1557
	}
1558

1559 1560
	kvm_for_each_vcpu(r, v, kvm)
		if (v->vcpu_id == id) {
1561 1562 1563 1564 1565
			r = -EEXIST;
			goto vcpu_destroy;
		}

	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1566

R
Rusty Russell 已提交
1567
	/* Now it's all set up, let userspace reach it */
A
Al Viro 已提交
1568
	kvm_get_kvm(kvm);
A
Avi Kivity 已提交
1569
	r = create_vcpu_fd(vcpu);
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
	if (r < 0) {
		kvm_put_kvm(kvm);
		goto vcpu_destroy;
	}

	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
	smp_wmb();
	atomic_inc(&kvm->online_vcpus);

#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	if (kvm->bsp_vcpu_id == id)
		kvm->bsp_vcpu = vcpu;
#endif
	mutex_unlock(&kvm->lock);
R
Rusty Russell 已提交
1584
	return r;
1585

1586
vcpu_destroy:
1587
	mutex_unlock(&kvm->lock);
1588
	kvm_arch_vcpu_destroy(vcpu);
1589 1590 1591
	return r;
}

A
Avi Kivity 已提交
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
	if (sigset) {
		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
		vcpu->sigset_active = 1;
		vcpu->sigset = *sigset;
	} else
		vcpu->sigset_active = 0;
	return 0;
}

A
Avi Kivity 已提交
1603 1604
static long kvm_vcpu_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
A
Avi Kivity 已提交
1605
{
A
Avi Kivity 已提交
1606
	struct kvm_vcpu *vcpu = filp->private_data;
A
Al Viro 已提交
1607
	void __user *argp = (void __user *)arg;
1608
	int r;
1609 1610
	struct kvm_fpu *fpu = NULL;
	struct kvm_sregs *kvm_sregs = NULL;
A
Avi Kivity 已提交
1611

1612 1613
	if (vcpu->kvm->mm != current->mm)
		return -EIO;
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625

#if defined(CONFIG_S390) || defined(CONFIG_PPC)
	/*
	 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
	 * so vcpu_load() would break it.
	 */
	if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
		return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
#endif


	vcpu_load(vcpu);
A
Avi Kivity 已提交
1626
	switch (ioctl) {
1627
	case KVM_RUN:
1628 1629 1630
		r = -EINVAL;
		if (arg)
			goto out;
1631
		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1632
		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
A
Avi Kivity 已提交
1633 1634
		break;
	case KVM_GET_REGS: {
1635
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
1636

1637 1638 1639
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
1640
			goto out;
1641 1642 1643
		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
		if (r)
			goto out_free1;
A
Avi Kivity 已提交
1644
		r = -EFAULT;
1645 1646
		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
			goto out_free1;
A
Avi Kivity 已提交
1647
		r = 0;
1648 1649
out_free1:
		kfree(kvm_regs);
A
Avi Kivity 已提交
1650 1651 1652
		break;
	}
	case KVM_SET_REGS: {
1653
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
1654

1655 1656 1657
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
1658
			goto out;
1659 1660 1661 1662
		r = -EFAULT;
		if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
			goto out_free2;
		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
A
Avi Kivity 已提交
1663
		if (r)
1664
			goto out_free2;
A
Avi Kivity 已提交
1665
		r = 0;
1666 1667
out_free2:
		kfree(kvm_regs);
A
Avi Kivity 已提交
1668 1669 1670
		break;
	}
	case KVM_GET_SREGS: {
1671 1672 1673 1674 1675
		kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
1676 1677 1678
		if (r)
			goto out;
		r = -EFAULT;
1679
		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
1680 1681 1682 1683 1684
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_SREGS: {
1685 1686 1687 1688
		kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
A
Avi Kivity 已提交
1689
		r = -EFAULT;
1690
		if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
1691
			goto out;
1692
		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
1693 1694 1695 1696 1697
		if (r)
			goto out;
		r = 0;
		break;
	}
1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
	case KVM_GET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &mp_state, sizeof mp_state))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = -EFAULT;
		if (copy_from_user(&mp_state, argp, sizeof mp_state))
			goto out;
		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1722 1723 1724 1725
	case KVM_TRANSLATE: {
		struct kvm_translation tr;

		r = -EFAULT;
A
Al Viro 已提交
1726
		if (copy_from_user(&tr, argp, sizeof tr))
A
Avi Kivity 已提交
1727
			goto out;
1728
		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
A
Avi Kivity 已提交
1729 1730 1731
		if (r)
			goto out;
		r = -EFAULT;
A
Al Viro 已提交
1732
		if (copy_to_user(argp, &tr, sizeof tr))
A
Avi Kivity 已提交
1733 1734 1735 1736
			goto out;
		r = 0;
		break;
	}
J
Jan Kiszka 已提交
1737 1738
	case KVM_SET_GUEST_DEBUG: {
		struct kvm_guest_debug dbg;
A
Avi Kivity 已提交
1739 1740

		r = -EFAULT;
A
Al Viro 已提交
1741
		if (copy_from_user(&dbg, argp, sizeof dbg))
A
Avi Kivity 已提交
1742
			goto out;
J
Jan Kiszka 已提交
1743
		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
A
Avi Kivity 已提交
1744 1745 1746 1747 1748
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		sigset_t sigset, *p;

		p = NULL;
		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
					   sizeof kvm_sigmask))
				goto out;
			r = -EINVAL;
			if (kvm_sigmask.len != sizeof sigset)
				goto out;
			r = -EFAULT;
			if (copy_from_user(&sigset, sigmask_arg->sigset,
					   sizeof sigset))
				goto out;
			p = &sigset;
		}
1769
		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
A
Avi Kivity 已提交
1770 1771
		break;
	}
A
Avi Kivity 已提交
1772
	case KVM_GET_FPU: {
1773 1774 1775 1776 1777
		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
A
Avi Kivity 已提交
1778 1779 1780
		if (r)
			goto out;
		r = -EFAULT;
1781
		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
1782 1783 1784 1785 1786
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_FPU: {
1787 1788 1789 1790
		fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
A
Avi Kivity 已提交
1791
		r = -EFAULT;
1792
		if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
1793
			goto out;
1794
		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
A
Avi Kivity 已提交
1795 1796 1797 1798 1799
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1800
	default:
1801
		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
1802 1803
	}
out:
1804
	vcpu_put(vcpu);
1805 1806
	kfree(fpu);
	kfree(kvm_sregs);
A
Avi Kivity 已提交
1807 1808 1809 1810 1811 1812 1813 1814
	return r;
}

static long kvm_vm_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
1815
	int r;
A
Avi Kivity 已提交
1816

1817 1818
	if (kvm->mm != current->mm)
		return -EIO;
A
Avi Kivity 已提交
1819 1820 1821 1822 1823 1824
	switch (ioctl) {
	case KVM_CREATE_VCPU:
		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
		if (r < 0)
			goto out;
		break;
1825 1826 1827 1828 1829 1830 1831 1832 1833
	case KVM_SET_USER_MEMORY_REGION: {
		struct kvm_userspace_memory_region kvm_userspace_mem;

		r = -EFAULT;
		if (copy_from_user(&kvm_userspace_mem, argp,
						sizeof kvm_userspace_mem))
			goto out;

		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
A
Avi Kivity 已提交
1834 1835 1836 1837 1838 1839 1840 1841
		if (r)
			goto out;
		break;
	}
	case KVM_GET_DIRTY_LOG: {
		struct kvm_dirty_log log;

		r = -EFAULT;
A
Al Viro 已提交
1842
		if (copy_from_user(&log, argp, sizeof log))
A
Avi Kivity 已提交
1843
			goto out;
1844
		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
A
Avi Kivity 已提交
1845 1846 1847 1848
		if (r)
			goto out;
		break;
	}
1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	case KVM_REGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
		r = -EFAULT;
		if (copy_from_user(&zone, argp, sizeof zone))
			goto out;
		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
		if (r)
			goto out;
		r = 0;
		break;
	}
	case KVM_UNREGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
		r = -EFAULT;
		if (copy_from_user(&zone, argp, sizeof zone))
			goto out;
		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
		if (r)
			goto out;
		r = 0;
		break;
	}
#endif
G
Gregory Haskins 已提交
1873 1874 1875 1876 1877 1878 1879 1880 1881
	case KVM_IRQFD: {
		struct kvm_irqfd data;

		r = -EFAULT;
		if (copy_from_user(&data, argp, sizeof data))
			goto out;
		r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
		break;
	}
G
Gregory Haskins 已提交
1882 1883 1884 1885 1886 1887 1888 1889 1890
	case KVM_IOEVENTFD: {
		struct kvm_ioeventfd data;

		r = -EFAULT;
		if (copy_from_user(&data, argp, sizeof data))
			goto out;
		r = kvm_ioeventfd(kvm, &data);
		break;
	}
1891 1892 1893
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	case KVM_SET_BOOT_CPU_ID:
		r = 0;
1894
		mutex_lock(&kvm->lock);
1895 1896 1897 1898
		if (atomic_read(&kvm->online_vcpus) != 0)
			r = -EBUSY;
		else
			kvm->bsp_vcpu_id = arg;
1899
		mutex_unlock(&kvm->lock);
1900 1901
		break;
#endif
1902
	default:
1903
		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1904 1905
		if (r == -ENOTTY)
			r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
1906 1907 1908 1909 1910
	}
out:
	return r;
}

1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
#ifdef CONFIG_COMPAT
struct compat_kvm_dirty_log {
	__u32 slot;
	__u32 padding1;
	union {
		compat_uptr_t dirty_bitmap; /* one bit per page */
		__u64 padding2;
	};
};

static long kvm_vm_compat_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	int r;

	if (kvm->mm != current->mm)
		return -EIO;
	switch (ioctl) {
	case KVM_GET_DIRTY_LOG: {
		struct compat_kvm_dirty_log compat_log;
		struct kvm_dirty_log log;

		r = -EFAULT;
		if (copy_from_user(&compat_log, (void __user *)arg,
				   sizeof(compat_log)))
			goto out;
		log.slot	 = compat_log.slot;
		log.padding1	 = compat_log.padding1;
		log.padding2	 = compat_log.padding2;
		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);

		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
		if (r)
			goto out;
		break;
	}
	default:
		r = kvm_vm_ioctl(filp, ioctl, arg);
	}

out:
	return r;
}
#endif

1957
static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1958
{
1959 1960 1961 1962
	struct page *page[1];
	unsigned long addr;
	int npages;
	gfn_t gfn = vmf->pgoff;
1963 1964
	struct kvm *kvm = vma->vm_file->private_data;

1965 1966
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
1967
		return VM_FAULT_SIGBUS;
1968 1969 1970 1971

	npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
				NULL);
	if (unlikely(npages != 1))
1972
		return VM_FAULT_SIGBUS;
1973 1974

	vmf->page = page[0];
1975
	return 0;
1976 1977
}

1978
static const struct vm_operations_struct kvm_vm_vm_ops = {
1979
	.fault = kvm_vm_fault,
1980 1981 1982 1983 1984 1985 1986 1987
};

static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vm_vm_ops;
	return 0;
}

1988
static struct file_operations kvm_vm_fops = {
1989 1990
	.release        = kvm_vm_release,
	.unlocked_ioctl = kvm_vm_ioctl,
1991 1992 1993
#ifdef CONFIG_COMPAT
	.compat_ioctl   = kvm_vm_compat_ioctl,
#endif
1994
	.mmap           = kvm_vm_mmap,
1995
	.llseek		= noop_llseek,
1996 1997 1998 1999
};

static int kvm_dev_ioctl_create_vm(void)
{
2000
	int r;
2001 2002 2003
	struct kvm *kvm;

	kvm = kvm_create_vm();
2004 2005
	if (IS_ERR(kvm))
		return PTR_ERR(kvm);
2006 2007 2008 2009 2010 2011 2012
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	r = kvm_coalesced_mmio_init(kvm);
	if (r < 0) {
		kvm_put_kvm(kvm);
		return r;
	}
#endif
2013 2014
	r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
	if (r < 0)
A
Al Viro 已提交
2015
		kvm_put_kvm(kvm);
2016

2017
	return r;
2018 2019
}

2020 2021 2022
static long kvm_dev_ioctl_check_extension_generic(long arg)
{
	switch (arg) {
2023
	case KVM_CAP_USER_MEMORY:
2024
	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2025
	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2026 2027 2028
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	case KVM_CAP_SET_BOOT_CPU_ID:
#endif
2029
	case KVM_CAP_INTERNAL_ERROR_DATA:
2030
		return 1;
2031 2032
#ifdef CONFIG_HAVE_KVM_IRQCHIP
	case KVM_CAP_IRQ_ROUTING:
2033
		return KVM_MAX_IRQ_ROUTES;
2034
#endif
2035 2036 2037 2038 2039 2040
	default:
		break;
	}
	return kvm_dev_ioctl_check_extension(arg);
}

2041 2042 2043
static long kvm_dev_ioctl(struct file *filp,
			  unsigned int ioctl, unsigned long arg)
{
2044
	long r = -EINVAL;
2045 2046 2047

	switch (ioctl) {
	case KVM_GET_API_VERSION:
2048 2049 2050
		r = -EINVAL;
		if (arg)
			goto out;
2051 2052 2053
		r = KVM_API_VERSION;
		break;
	case KVM_CREATE_VM:
2054 2055 2056
		r = -EINVAL;
		if (arg)
			goto out;
2057 2058
		r = kvm_dev_ioctl_create_vm();
		break;
2059
	case KVM_CHECK_EXTENSION:
2060
		r = kvm_dev_ioctl_check_extension_generic(arg);
2061
		break;
2062 2063 2064 2065
	case KVM_GET_VCPU_MMAP_SIZE:
		r = -EINVAL;
		if (arg)
			goto out;
2066 2067 2068
		r = PAGE_SIZE;     /* struct kvm_run */
#ifdef CONFIG_X86
		r += PAGE_SIZE;    /* pio data page */
2069 2070 2071
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
		r += PAGE_SIZE;    /* coalesced mmio ring page */
2072
#endif
2073
		break;
2074 2075 2076
	case KVM_TRACE_ENABLE:
	case KVM_TRACE_PAUSE:
	case KVM_TRACE_DISABLE:
2077
		r = -EOPNOTSUPP;
2078
		break;
A
Avi Kivity 已提交
2079
	default:
2080
		return kvm_arch_dev_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
2081 2082 2083 2084 2085 2086 2087 2088
	}
out:
	return r;
}

static struct file_operations kvm_chardev_ops = {
	.unlocked_ioctl = kvm_dev_ioctl,
	.compat_ioctl   = kvm_dev_ioctl,
2089
	.llseek		= noop_llseek,
A
Avi Kivity 已提交
2090 2091 2092
};

static struct miscdevice kvm_dev = {
A
Avi Kivity 已提交
2093
	KVM_MINOR,
A
Avi Kivity 已提交
2094 2095 2096 2097
	"kvm",
	&kvm_chardev_ops,
};

2098
static void hardware_enable_nolock(void *junk)
2099 2100
{
	int cpu = raw_smp_processor_id();
2101
	int r;
2102

2103
	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2104
		return;
2105

2106
	cpumask_set_cpu(cpu, cpus_hardware_enabled);
2107 2108 2109 2110 2111 2112 2113 2114 2115

	r = kvm_arch_hardware_enable(NULL);

	if (r) {
		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
		atomic_inc(&hardware_enable_failed);
		printk(KERN_INFO "kvm: enabling virtualization on "
				 "CPU%d failed\n", cpu);
	}
2116 2117
}

2118 2119 2120 2121 2122 2123 2124 2125
static void hardware_enable(void *junk)
{
	spin_lock(&kvm_lock);
	hardware_enable_nolock(junk);
	spin_unlock(&kvm_lock);
}

static void hardware_disable_nolock(void *junk)
2126 2127 2128
{
	int cpu = raw_smp_processor_id();

2129
	if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2130
		return;
2131
	cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2132
	kvm_arch_hardware_disable(NULL);
2133 2134
}

2135 2136 2137 2138 2139 2140 2141
static void hardware_disable(void *junk)
{
	spin_lock(&kvm_lock);
	hardware_disable_nolock(junk);
	spin_unlock(&kvm_lock);
}

2142 2143 2144 2145 2146 2147
static void hardware_disable_all_nolock(void)
{
	BUG_ON(!kvm_usage_count);

	kvm_usage_count--;
	if (!kvm_usage_count)
2148
		on_each_cpu(hardware_disable_nolock, NULL, 1);
2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166
}

static void hardware_disable_all(void)
{
	spin_lock(&kvm_lock);
	hardware_disable_all_nolock();
	spin_unlock(&kvm_lock);
}

static int hardware_enable_all(void)
{
	int r = 0;

	spin_lock(&kvm_lock);

	kvm_usage_count++;
	if (kvm_usage_count == 1) {
		atomic_set(&hardware_enable_failed, 0);
2167
		on_each_cpu(hardware_enable_nolock, NULL, 1);
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179

		if (atomic_read(&hardware_enable_failed)) {
			hardware_disable_all_nolock();
			r = -EBUSY;
		}
	}

	spin_unlock(&kvm_lock);

	return r;
}

A
Avi Kivity 已提交
2180 2181 2182 2183 2184
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
			   void *v)
{
	int cpu = (long)v;

2185 2186 2187
	if (!kvm_usage_count)
		return NOTIFY_OK;

2188
	val &= ~CPU_TASKS_FROZEN;
A
Avi Kivity 已提交
2189
	switch (val) {
2190
	case CPU_DYING:
2191 2192 2193 2194
		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
		       cpu);
		hardware_disable(NULL);
		break;
2195
	case CPU_STARTING:
2196 2197
		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
		       cpu);
2198
		hardware_enable(NULL);
A
Avi Kivity 已提交
2199 2200 2201 2202 2203
		break;
	}
	return NOTIFY_OK;
}

2204

2205
asmlinkage void kvm_spurious_fault(void)
2206 2207 2208 2209
{
	/* Fault while not rebooting.  We want the trace. */
	BUG();
}
2210
EXPORT_SYMBOL_GPL(kvm_spurious_fault);
2211

2212
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
M
Mike Day 已提交
2213
		      void *v)
2214
{
2215 2216 2217 2218 2219 2220 2221 2222
	/*
	 * Some (well, at least mine) BIOSes hang on reboot if
	 * in vmx root mode.
	 *
	 * And Intel TXT required VMX off for all cpu when system shutdown.
	 */
	printk(KERN_INFO "kvm: exiting hardware virtualization\n");
	kvm_rebooting = true;
2223
	on_each_cpu(hardware_disable_nolock, NULL, 1);
2224 2225 2226 2227 2228 2229 2230 2231
	return NOTIFY_OK;
}

static struct notifier_block kvm_reboot_notifier = {
	.notifier_call = kvm_reboot,
	.priority = 0,
};

M
Marcelo Tosatti 已提交
2232
static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2233 2234 2235 2236 2237 2238 2239 2240
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
		struct kvm_io_device *pos = bus->devs[i];

		kvm_iodevice_destructor(pos);
	}
M
Marcelo Tosatti 已提交
2241
	kfree(bus);
2242 2243
}

2244
/* kvm_io_bus_write - called under kvm->slots_lock */
M
Marcelo Tosatti 已提交
2245
int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2246
		     int len, const void *val)
2247 2248
{
	int i;
2249 2250 2251
	struct kvm_io_bus *bus;

	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2252 2253 2254 2255 2256
	for (i = 0; i < bus->dev_count; i++)
		if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
			return 0;
	return -EOPNOTSUPP;
}
2257

2258
/* kvm_io_bus_read - called under kvm->slots_lock */
M
Marcelo Tosatti 已提交
2259 2260
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
		    int len, void *val)
2261 2262
{
	int i;
2263
	struct kvm_io_bus *bus;
M
Marcelo Tosatti 已提交
2264

2265
	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2266 2267 2268 2269
	for (i = 0; i < bus->dev_count; i++)
		if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
			return 0;
	return -EOPNOTSUPP;
2270 2271
}

2272
/* Caller must hold slots_lock. */
M
Marcelo Tosatti 已提交
2273 2274
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			    struct kvm_io_device *dev)
2275
{
M
Marcelo Tosatti 已提交
2276
	struct kvm_io_bus *new_bus, *bus;
2277

M
Marcelo Tosatti 已提交
2278
	bus = kvm->buses[bus_idx];
2279 2280
	if (bus->dev_count > NR_IOBUS_DEVS-1)
		return -ENOSPC;
2281

M
Marcelo Tosatti 已提交
2282 2283 2284 2285 2286 2287 2288 2289
	new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
	if (!new_bus)
		return -ENOMEM;
	memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
	new_bus->devs[new_bus->dev_count++] = dev;
	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
	synchronize_srcu_expedited(&kvm->srcu);
	kfree(bus);
2290 2291 2292 2293

	return 0;
}

2294
/* Caller must hold slots_lock. */
M
Marcelo Tosatti 已提交
2295 2296
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			      struct kvm_io_device *dev)
2297
{
M
Marcelo Tosatti 已提交
2298 2299
	int i, r;
	struct kvm_io_bus *new_bus, *bus;
2300

M
Marcelo Tosatti 已提交
2301 2302 2303
	new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
	if (!new_bus)
		return -ENOMEM;
2304

M
Marcelo Tosatti 已提交
2305 2306 2307 2308 2309 2310 2311 2312
	bus = kvm->buses[bus_idx];
	memcpy(new_bus, bus, sizeof(struct kvm_io_bus));

	r = -ENOENT;
	for (i = 0; i < new_bus->dev_count; i++)
		if (new_bus->devs[i] == dev) {
			r = 0;
			new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
2313 2314
			break;
		}
M
Marcelo Tosatti 已提交
2315 2316 2317 2318 2319 2320 2321 2322 2323 2324

	if (r) {
		kfree(new_bus);
		return r;
	}

	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
	synchronize_srcu_expedited(&kvm->srcu);
	kfree(bus);
	return r;
2325 2326
}

A
Avi Kivity 已提交
2327 2328 2329 2330
static struct notifier_block kvm_cpu_notifier = {
	.notifier_call = kvm_cpu_hotplug,
};

2331
static int vm_stat_get(void *_offset, u64 *val)
2332 2333 2334 2335
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;

2336
	*val = 0;
2337 2338
	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
2339
		*val += *(u32 *)((void *)kvm + offset);
2340
	spin_unlock(&kvm_lock);
2341
	return 0;
2342 2343 2344 2345
}

DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");

2346
static int vcpu_stat_get(void *_offset, u64 *val)
A
Avi Kivity 已提交
2347 2348 2349 2350 2351 2352
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;

2353
	*val = 0;
A
Avi Kivity 已提交
2354 2355
	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
2356 2357 2358
		kvm_for_each_vcpu(i, vcpu, kvm)
			*val += *(u32 *)((void *)vcpu + offset);

A
Avi Kivity 已提交
2359
	spin_unlock(&kvm_lock);
2360
	return 0;
A
Avi Kivity 已提交
2361 2362
}

2363 2364
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");

2365
static const struct file_operations *stat_fops[] = {
2366 2367 2368
	[KVM_STAT_VCPU] = &vcpu_stat_fops,
	[KVM_STAT_VM]   = &vm_stat_fops,
};
A
Avi Kivity 已提交
2369

2370
static void kvm_init_debug(void)
A
Avi Kivity 已提交
2371 2372 2373
{
	struct kvm_stats_debugfs_item *p;

2374
	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
A
Avi Kivity 已提交
2375
	for (p = debugfs_entries; p->name; ++p)
2376
		p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
A
Avi Kivity 已提交
2377
						(void *)(long)p->offset,
2378
						stat_fops[p->kind]);
A
Avi Kivity 已提交
2379 2380 2381 2382 2383 2384 2385 2386
}

static void kvm_exit_debug(void)
{
	struct kvm_stats_debugfs_item *p;

	for (p = debugfs_entries; p->name; ++p)
		debugfs_remove(p->dentry);
2387
	debugfs_remove(kvm_debugfs_dir);
A
Avi Kivity 已提交
2388 2389
}

2390 2391
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
2392
	if (kvm_usage_count)
2393
		hardware_disable_nolock(NULL);
2394 2395 2396 2397 2398
	return 0;
}

static int kvm_resume(struct sys_device *dev)
{
2399 2400
	if (kvm_usage_count) {
		WARN_ON(spin_is_locked(&kvm_lock));
2401
		hardware_enable_nolock(NULL);
2402
	}
2403 2404 2405 2406
	return 0;
}

static struct sysdev_class kvm_sysdev_class = {
2407
	.name = "kvm",
2408 2409 2410 2411 2412 2413 2414 2415 2416
	.suspend = kvm_suspend,
	.resume = kvm_resume,
};

static struct sys_device kvm_sysdev = {
	.id = 0,
	.cls = &kvm_sysdev_class,
};

2417
struct page *bad_page;
2418
pfn_t bad_pfn;
A
Avi Kivity 已提交
2419

2420 2421 2422 2423 2424 2425 2426 2427 2428 2429
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
	return container_of(pn, struct kvm_vcpu, preempt_notifier);
}

static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

2430
	kvm_arch_vcpu_load(vcpu, cpu);
2431 2432 2433 2434 2435 2436 2437
}

static void kvm_sched_out(struct preempt_notifier *pn,
			  struct task_struct *next)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

2438
	kvm_arch_vcpu_put(vcpu);
2439 2440
}

2441
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2442
		  struct module *module)
A
Avi Kivity 已提交
2443 2444
{
	int r;
Y
Yang, Sheng 已提交
2445
	int cpu;
A
Avi Kivity 已提交
2446

2447 2448
	r = kvm_arch_init(opaque);
	if (r)
2449
		goto out_fail;
2450 2451 2452 2453 2454 2455 2456 2457

	bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);

	if (bad_page == NULL) {
		r = -ENOMEM;
		goto out;
	}

2458 2459
	bad_pfn = page_to_pfn(bad_page);

2460 2461 2462 2463 2464 2465 2466 2467 2468
	hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);

	if (hwpoison_page == NULL) {
		r = -ENOMEM;
		goto out_free_0;
	}

	hwpoison_pfn = page_to_pfn(hwpoison_page);

2469 2470 2471 2472 2473 2474 2475 2476 2477
	fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);

	if (fault_page == NULL) {
		r = -ENOMEM;
		goto out_free_0;
	}

	fault_pfn = page_to_pfn(fault_page);

2478
	if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2479 2480 2481 2482
		r = -ENOMEM;
		goto out_free_0;
	}

2483
	r = kvm_arch_hardware_setup();
A
Avi Kivity 已提交
2484
	if (r < 0)
2485
		goto out_free_0a;
A
Avi Kivity 已提交
2486

Y
Yang, Sheng 已提交
2487 2488
	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu,
2489
				kvm_arch_check_processor_compat,
2490
				&r, 1);
Y
Yang, Sheng 已提交
2491
		if (r < 0)
2492
			goto out_free_1;
Y
Yang, Sheng 已提交
2493 2494
	}

A
Avi Kivity 已提交
2495 2496
	r = register_cpu_notifier(&kvm_cpu_notifier);
	if (r)
2497
		goto out_free_2;
A
Avi Kivity 已提交
2498 2499
	register_reboot_notifier(&kvm_reboot_notifier);

2500 2501
	r = sysdev_class_register(&kvm_sysdev_class);
	if (r)
2502
		goto out_free_3;
2503 2504 2505

	r = sysdev_register(&kvm_sysdev);
	if (r)
2506
		goto out_free_4;
2507

2508
	/* A kmem cache lets us meet the alignment requirements of fx_save. */
2509 2510 2511
	if (!vcpu_align)
		vcpu_align = __alignof__(struct kvm_vcpu);
	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
J
Joe Perches 已提交
2512
					   0, NULL);
2513 2514
	if (!kvm_vcpu_cache) {
		r = -ENOMEM;
2515
		goto out_free_5;
2516 2517
	}

2518 2519 2520 2521
	r = kvm_async_pf_init();
	if (r)
		goto out_free;

A
Avi Kivity 已提交
2522
	kvm_chardev_ops.owner = module;
2523 2524
	kvm_vm_fops.owner = module;
	kvm_vcpu_fops.owner = module;
A
Avi Kivity 已提交
2525 2526 2527

	r = misc_register(&kvm_dev);
	if (r) {
M
Mike Day 已提交
2528
		printk(KERN_ERR "kvm: misc device register failed\n");
2529
		goto out_unreg;
A
Avi Kivity 已提交
2530 2531
	}

2532 2533 2534
	kvm_preempt_ops.sched_in = kvm_sched_in;
	kvm_preempt_ops.sched_out = kvm_sched_out;

2535 2536
	kvm_init_debug();

2537
	return 0;
A
Avi Kivity 已提交
2538

2539 2540
out_unreg:
	kvm_async_pf_deinit();
A
Avi Kivity 已提交
2541
out_free:
2542
	kmem_cache_destroy(kvm_vcpu_cache);
2543
out_free_5:
2544
	sysdev_unregister(&kvm_sysdev);
2545
out_free_4:
2546
	sysdev_class_unregister(&kvm_sysdev_class);
2547
out_free_3:
A
Avi Kivity 已提交
2548
	unregister_reboot_notifier(&kvm_reboot_notifier);
A
Avi Kivity 已提交
2549
	unregister_cpu_notifier(&kvm_cpu_notifier);
2550 2551
out_free_2:
out_free_1:
2552
	kvm_arch_hardware_unsetup();
2553 2554
out_free_0a:
	free_cpumask_var(cpus_hardware_enabled);
2555
out_free_0:
2556 2557
	if (fault_page)
		__free_page(fault_page);
2558 2559
	if (hwpoison_page)
		__free_page(hwpoison_page);
2560
	__free_page(bad_page);
2561
out:
2562
	kvm_arch_exit();
2563
out_fail:
A
Avi Kivity 已提交
2564 2565
	return r;
}
2566
EXPORT_SYMBOL_GPL(kvm_init);
A
Avi Kivity 已提交
2567

2568
void kvm_exit(void)
A
Avi Kivity 已提交
2569
{
2570
	kvm_exit_debug();
A
Avi Kivity 已提交
2571
	misc_deregister(&kvm_dev);
2572
	kmem_cache_destroy(kvm_vcpu_cache);
2573
	kvm_async_pf_deinit();
2574 2575
	sysdev_unregister(&kvm_sysdev);
	sysdev_class_unregister(&kvm_sysdev_class);
A
Avi Kivity 已提交
2576
	unregister_reboot_notifier(&kvm_reboot_notifier);
2577
	unregister_cpu_notifier(&kvm_cpu_notifier);
2578
	on_each_cpu(hardware_disable_nolock, NULL, 1);
2579
	kvm_arch_hardware_unsetup();
2580
	kvm_arch_exit();
2581
	free_cpumask_var(cpus_hardware_enabled);
2582
	__free_page(hwpoison_page);
2583
	__free_page(bad_page);
A
Avi Kivity 已提交
2584
}
2585
EXPORT_SYMBOL_GPL(kvm_exit);