kvm_main.c 83.7 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
8
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
9 10 11 12 13 14 15 16 17 18
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

19
#include <kvm/iodev.h>
A
Avi Kivity 已提交
20

21
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
22 23 24 25 26 27 28 29 30 31 32
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
33
#include <linux/syscore_ops.h>
A
Avi Kivity 已提交
34
#include <linux/cpu.h>
A
Alexey Dobriyan 已提交
35
#include <linux/sched.h>
36 37
#include <linux/cpumask.h>
#include <linux/smp.h>
38
#include <linux/anon_inodes.h>
39
#include <linux/profile.h>
40
#include <linux/kvm_para.h>
41
#include <linux/pagemap.h>
42
#include <linux/mman.h>
43
#include <linux/swap.h>
44
#include <linux/bitops.h>
45
#include <linux/spinlock.h>
46
#include <linux/compat.h>
47
#include <linux/srcu.h>
J
Joerg Roedel 已提交
48
#include <linux/hugetlb.h>
49
#include <linux/slab.h>
50 51
#include <linux/sort.h>
#include <linux/bsearch.h>
A
Avi Kivity 已提交
52

A
Avi Kivity 已提交
53 54
#include <asm/processor.h>
#include <asm/io.h>
55
#include <asm/ioctl.h>
A
Avi Kivity 已提交
56
#include <asm/uaccess.h>
57
#include <asm/pgtable.h>
A
Avi Kivity 已提交
58

59
#include "coalesced_mmio.h"
60
#include "async_pf.h"
P
Paolo Bonzini 已提交
61
#include "vfio.h"
62

63 64 65
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>

A
Avi Kivity 已提交
66 67 68
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

69 70
/* Architectures should define their poll value according to the halt latency */
static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
71 72
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);

W
Wanpeng Li 已提交
73 74 75 76 77 78 79 80
/* Default doubles per-vcpu halt_poll_ns. */
static unsigned int halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, int, S_IRUGO);

/* Default resets per-vcpu halt_poll_ns . */
static unsigned int halt_poll_ns_shrink;
module_param(halt_poll_ns_shrink, int, S_IRUGO);

81 82 83
/*
 * Ordering of locks:
 *
84
 *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock
85 86
 */

87
DEFINE_SPINLOCK(kvm_lock);
88
static DEFINE_RAW_SPINLOCK(kvm_count_lock);
89
LIST_HEAD(vm_list);
90

91
static cpumask_var_t cpus_hardware_enabled;
92
static int kvm_usage_count;
93
static atomic_t hardware_enable_failed;
94

95 96
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
A
Avi Kivity 已提交
97

98 99
static __read_mostly struct preempt_ops kvm_preempt_ops;

100
struct dentry *kvm_debugfs_dir;
101
EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
A
Avi Kivity 已提交
102

A
Avi Kivity 已提交
103 104
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);
105
#ifdef CONFIG_KVM_COMPAT
106 107 108
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
				  unsigned long arg);
#endif
109 110
static int hardware_enable_all(void);
static void hardware_disable_all(void);
A
Avi Kivity 已提交
111

M
Marcelo Tosatti 已提交
112
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
113 114

static void kvm_release_pfn_dirty(pfn_t pfn);
115
static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
M
Marcelo Tosatti 已提交
116

117
__visible bool kvm_rebooting;
118
EXPORT_SYMBOL_GPL(kvm_rebooting);
119

120 121
static bool largepages_enabled = true;

122
bool kvm_is_reserved_pfn(pfn_t pfn)
B
Ben-Ami Yassour 已提交
123
{
124
	if (pfn_valid(pfn))
125
		return PageReserved(pfn_to_page(pfn));
B
Ben-Ami Yassour 已提交
126 127 128 129

	return true;
}

A
Avi Kivity 已提交
130 131 132
/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
133
int vcpu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
134
{
135 136
	int cpu;

137 138
	if (mutex_lock_killable(&vcpu->mutex))
		return -EINTR;
139 140
	cpu = get_cpu();
	preempt_notifier_register(&vcpu->preempt_notifier);
141
	kvm_arch_vcpu_load(vcpu, cpu);
142
	put_cpu();
143
	return 0;
A
Avi Kivity 已提交
144 145
}

146
void vcpu_put(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
147
{
148
	preempt_disable();
149
	kvm_arch_vcpu_put(vcpu);
150 151
	preempt_notifier_unregister(&vcpu->preempt_notifier);
	preempt_enable();
A
Avi Kivity 已提交
152 153 154
	mutex_unlock(&vcpu->mutex);
}

155 156 157 158
static void ack_flush(void *_completed)
{
}

159
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
160
{
161
	int i, cpu, me;
162 163
	cpumask_var_t cpus;
	bool called = true;
164 165
	struct kvm_vcpu *vcpu;

166
	zalloc_cpumask_var(&cpus, GFP_ATOMIC);
167

168
	me = get_cpu();
169
	kvm_for_each_vcpu(i, vcpu, kvm) {
170
		kvm_make_request(req, vcpu);
171
		cpu = vcpu->cpu;
172 173 174 175 176 177

		/* Set ->requests bit before we read ->mode */
		smp_mb();

		if (cpus != NULL && cpu != -1 && cpu != me &&
		      kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
178
			cpumask_set_cpu(cpu, cpus);
179
	}
180 181 182 183 184 185
	if (unlikely(cpus == NULL))
		smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
	else if (!cpumask_empty(cpus))
		smp_call_function_many(cpus, ack_flush, NULL, 1);
	else
		called = false;
186
	put_cpu();
187
	free_cpumask_var(cpus);
188
	return called;
189 190
}

191
#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
192
void kvm_flush_remote_tlbs(struct kvm *kvm)
193
{
194 195 196
	long dirty_count = kvm->tlbs_dirty;

	smp_mb();
197
	if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
198
		++kvm->stat.remote_tlb_flush;
199
	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
200
}
201
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
202
#endif
203

204 205
void kvm_reload_remote_mmus(struct kvm *kvm)
{
206
	kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
207
}
208

R
Rusty Russell 已提交
209 210 211 212 213 214 215 216 217
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
	struct page *page;
	int r;

	mutex_init(&vcpu->mutex);
	vcpu->cpu = -1;
	vcpu->kvm = kvm;
	vcpu->vcpu_id = id;
218
	vcpu->pid = NULL;
W
Wanpeng Li 已提交
219
	vcpu->halt_poll_ns = 0;
E
Eddie Dong 已提交
220
	init_waitqueue_head(&vcpu->wq);
221
	kvm_async_pf_vcpu_init(vcpu);
R
Rusty Russell 已提交
222

223 224 225
	vcpu->pre_pcpu = -1;
	INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);

R
Rusty Russell 已提交
226 227 228 229 230 231 232
	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
	vcpu->run = page_address(page);

233 234
	kvm_vcpu_set_in_spin_loop(vcpu, false);
	kvm_vcpu_set_dy_eligible(vcpu, false);
235
	vcpu->preempted = false;
236

237
	r = kvm_arch_vcpu_init(vcpu);
R
Rusty Russell 已提交
238
	if (r < 0)
239
		goto fail_free_run;
R
Rusty Russell 已提交
240 241 242 243 244
	return 0;

fail_free_run:
	free_page((unsigned long)vcpu->run);
fail:
245
	return r;
R
Rusty Russell 已提交
246 247 248 249 250
}
EXPORT_SYMBOL_GPL(kvm_vcpu_init);

void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
251
	put_pid(vcpu->pid);
252
	kvm_arch_vcpu_uninit(vcpu);
R
Rusty Russell 已提交
253 254 255 256
	free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);

257 258 259 260 261 262 263 264 265 266 267
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
	return container_of(mn, struct kvm, mmu_notifier);
}

static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
268
	int need_tlb_flush, idx;
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287

	/*
	 * When ->invalidate_page runs, the linux pte has been zapped
	 * already but the page is still allocated until
	 * ->invalidate_page returns. So if we increase the sequence
	 * here the kvm page fault will notice if the spte can't be
	 * established because the page is going to be freed. If
	 * instead the kvm page fault establishes the spte before
	 * ->invalidate_page runs, kvm_unmap_hva will release it
	 * before returning.
	 *
	 * The sequence increase only need to be seen at spin_unlock
	 * time, and not at spin_lock time.
	 *
	 * Increasing the sequence after the spin_unlock would be
	 * unsafe because the kvm page fault could then establish the
	 * pte after kvm_unmap_hva returned, without noticing the page
	 * is going to be freed.
	 */
288
	idx = srcu_read_lock(&kvm->srcu);
289
	spin_lock(&kvm->mmu_lock);
290

291
	kvm->mmu_notifier_seq++;
292
	need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
293 294 295 296
	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);

297
	spin_unlock(&kvm->mmu_lock);
298 299 300

	kvm_arch_mmu_notifier_invalidate_page(kvm, address);

301
	srcu_read_unlock(&kvm->srcu, idx);
302 303
}

304 305 306 307 308 309
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
					struct mm_struct *mm,
					unsigned long address,
					pte_t pte)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
310
	int idx;
311

312
	idx = srcu_read_lock(&kvm->srcu);
313 314 315 316
	spin_lock(&kvm->mmu_lock);
	kvm->mmu_notifier_seq++;
	kvm_set_spte_hva(kvm, address, pte);
	spin_unlock(&kvm->mmu_lock);
317
	srcu_read_unlock(&kvm->srcu, idx);
318 319
}

320 321 322 323 324 325
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
						    struct mm_struct *mm,
						    unsigned long start,
						    unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
326
	int need_tlb_flush = 0, idx;
327

328
	idx = srcu_read_lock(&kvm->srcu);
329 330 331 332 333 334 335
	spin_lock(&kvm->mmu_lock);
	/*
	 * The count increase must become visible at unlock time as no
	 * spte can be established without taking the mmu_lock and
	 * count is also read inside the mmu_lock critical section.
	 */
	kvm->mmu_notifier_count++;
336
	need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
337
	need_tlb_flush |= kvm->tlbs_dirty;
338 339 340
	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);
341 342 343

	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
}

static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
						  struct mm_struct *mm,
						  unsigned long start,
						  unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);

	spin_lock(&kvm->mmu_lock);
	/*
	 * This sequence increase will notify the kvm page fault that
	 * the page that is going to be mapped in the spte could have
	 * been freed.
	 */
	kvm->mmu_notifier_seq++;
360
	smp_wmb();
361 362
	/*
	 * The above sequence increase must be visible before the
363 364
	 * below count decrease, which is ensured by the smp_wmb above
	 * in conjunction with the smp_rmb in mmu_notifier_retry().
365 366 367 368 369 370 371 372 373
	 */
	kvm->mmu_notifier_count--;
	spin_unlock(&kvm->mmu_lock);

	BUG_ON(kvm->mmu_notifier_count < 0);
}

static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
					      struct mm_struct *mm,
A
Andres Lagar-Cavilla 已提交
374 375
					      unsigned long start,
					      unsigned long end)
376 377
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
378
	int young, idx;
379

380
	idx = srcu_read_lock(&kvm->srcu);
381 382
	spin_lock(&kvm->mmu_lock);

A
Andres Lagar-Cavilla 已提交
383
	young = kvm_age_hva(kvm, start, end);
384 385 386
	if (young)
		kvm_flush_remote_tlbs(kvm);

387 388 389
	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);

390 391 392
	return young;
}

393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
					struct mm_struct *mm,
					unsigned long start,
					unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int young, idx;

	idx = srcu_read_lock(&kvm->srcu);
	spin_lock(&kvm->mmu_lock);
	/*
	 * Even though we do not flush TLB, this will still adversely
	 * affect performance on pre-Haswell Intel EPT, where there is
	 * no EPT Access Bit to clear so that we have to tear down EPT
	 * tables instead. If we find this unacceptable, we can always
	 * add a parameter to kvm_age_hva so that it effectively doesn't
	 * do anything on clear_young.
	 *
	 * Also note that currently we never issue secondary TLB flushes
	 * from clear_young, leaving this job up to the regular system
	 * cadence. If we find this inaccurate, we might come up with a
	 * more sophisticated heuristic later.
	 */
	young = kvm_age_hva(kvm, start, end);
	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);

	return young;
}

A
Andrea Arcangeli 已提交
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
				       struct mm_struct *mm,
				       unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int young, idx;

	idx = srcu_read_lock(&kvm->srcu);
	spin_lock(&kvm->mmu_lock);
	young = kvm_test_age_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);

	return young;
}

439 440 441 442
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
				     struct mm_struct *mm)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
443 444 445
	int idx;

	idx = srcu_read_lock(&kvm->srcu);
446
	kvm_arch_flush_shadow_all(kvm);
447
	srcu_read_unlock(&kvm->srcu, idx);
448 449
}

450 451 452 453 454
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
	.invalidate_page	= kvm_mmu_notifier_invalidate_page,
	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
455
	.clear_young		= kvm_mmu_notifier_clear_young,
A
Andrea Arcangeli 已提交
456
	.test_young		= kvm_mmu_notifier_test_young,
457
	.change_pte		= kvm_mmu_notifier_change_pte,
458
	.release		= kvm_mmu_notifier_release,
459
};
460 461 462 463 464 465 466 467 468 469 470 471 472 473

static int kvm_init_mmu_notifier(struct kvm *kvm)
{
	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
}

#else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */

static int kvm_init_mmu_notifier(struct kvm *kvm)
{
	return 0;
}

474 475
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

476
static struct kvm_memslots *kvm_alloc_memslots(void)
477 478
{
	int i;
479
	struct kvm_memslots *slots;
480

481 482 483 484 485 486 487 488 489
	slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
	if (!slots)
		return NULL;

	/*
	 * Init kvm generation close to the maximum to easily test the
	 * code of handling generation number wrap-around.
	 */
	slots->generation = -150;
490
	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
491
		slots->id_to_index[i] = slots->memslots[i].id = i;
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529

	return slots;
}

static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{
	if (!memslot->dirty_bitmap)
		return;

	kvfree(memslot->dirty_bitmap);
	memslot->dirty_bitmap = NULL;
}

/*
 * Free any memory in @free but not in @dont.
 */
static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
			      struct kvm_memory_slot *dont)
{
	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
		kvm_destroy_dirty_bitmap(free);

	kvm_arch_free_memslot(kvm, free, dont);

	free->npages = 0;
}

static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
{
	struct kvm_memory_slot *memslot;

	if (!slots)
		return;

	kvm_for_each_memslot(memslot, slots)
		kvm_free_memslot(kvm, memslot, NULL);

	kvfree(slots);
530 531
}

532
static struct kvm *kvm_create_vm(unsigned long type)
A
Avi Kivity 已提交
533
{
534 535
	int r, i;
	struct kvm *kvm = kvm_arch_alloc_vm();
A
Avi Kivity 已提交
536

537 538 539
	if (!kvm)
		return ERR_PTR(-ENOMEM);

540
	r = kvm_arch_init_vm(kvm, type);
541
	if (r)
542
		goto out_err_no_disable;
543 544 545

	r = hardware_enable_all();
	if (r)
546
		goto out_err_no_disable;
547

548
#ifdef CONFIG_HAVE_KVM_IRQFD
549
	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
550
#endif
A
Avi Kivity 已提交
551

552 553
	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);

554
	r = -ENOMEM;
555 556 557 558 559
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		kvm->memslots[i] = kvm_alloc_memslots();
		if (!kvm->memslots[i])
			goto out_err_no_srcu;
	}
560

561
	if (init_srcu_struct(&kvm->srcu))
562 563 564
		goto out_err_no_srcu;
	if (init_srcu_struct(&kvm->irq_srcu))
		goto out_err_no_irq_srcu;
M
Marcelo Tosatti 已提交
565 566 567
	for (i = 0; i < KVM_NR_BUSES; i++) {
		kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
					GFP_KERNEL);
568
		if (!kvm->buses[i])
M
Marcelo Tosatti 已提交
569 570
			goto out_err;
	}
571

572
	spin_lock_init(&kvm->mmu_lock);
573 574
	kvm->mm = current->mm;
	atomic_inc(&kvm->mm->mm_count);
G
Gregory Haskins 已提交
575
	kvm_eventfd_init(kvm);
S
Shaohua Li 已提交
576
	mutex_init(&kvm->lock);
577
	mutex_init(&kvm->irq_lock);
578
	mutex_init(&kvm->slots_lock);
I
Izik Eidus 已提交
579
	atomic_set(&kvm->users_count, 1);
580
	INIT_LIST_HEAD(&kvm->devices);
581 582 583 584 585

	r = kvm_init_mmu_notifier(kvm);
	if (r)
		goto out_err;

586
	spin_lock(&kvm_lock);
587
	list_add(&kvm->vm_list, &vm_list);
588
	spin_unlock(&kvm_lock);
589

590 591
	preempt_notifier_inc();

592
	return kvm;
593 594

out_err:
595 596
	cleanup_srcu_struct(&kvm->irq_srcu);
out_err_no_irq_srcu:
597
	cleanup_srcu_struct(&kvm->srcu);
598
out_err_no_srcu:
599
	hardware_disable_all();
600
out_err_no_disable:
M
Marcelo Tosatti 已提交
601 602
	for (i = 0; i < KVM_NR_BUSES; i++)
		kfree(kvm->buses[i]);
603 604
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
		kvm_free_memslots(kvm, kvm->memslots[i]);
605
	kvm_arch_free_vm(kvm);
606
	return ERR_PTR(r);
607 608
}

609 610 611 612
/*
 * Avoid using vmalloc for a small buffer.
 * Should not be used when the size is statically known.
 */
613
void *kvm_kvzalloc(unsigned long size)
614 615 616 617 618 619 620
{
	if (size > PAGE_SIZE)
		return vzalloc(size);
	else
		return kzalloc(size, GFP_KERNEL);
}

621 622 623 624 625 626 627 628 629 630 631 632 633
static void kvm_destroy_devices(struct kvm *kvm)
{
	struct list_head *node, *tmp;

	list_for_each_safe(node, tmp, &kvm->devices) {
		struct kvm_device *dev =
			list_entry(node, struct kvm_device, vm_node);

		list_del(node);
		dev->ops->destroy(dev);
	}
}

634 635
static void kvm_destroy_vm(struct kvm *kvm)
{
M
Marcelo Tosatti 已提交
636
	int i;
637 638
	struct mm_struct *mm = kvm->mm;

639
	kvm_arch_sync_events(kvm);
640
	spin_lock(&kvm_lock);
641
	list_del(&kvm->vm_list);
642
	spin_unlock(&kvm_lock);
643
	kvm_free_irq_routing(kvm);
M
Marcelo Tosatti 已提交
644 645
	for (i = 0; i < KVM_NR_BUSES; i++)
		kvm_io_bus_destroy(kvm->buses[i]);
646
	kvm_coalesced_mmio_free(kvm);
647 648
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
649
#else
650
	kvm_arch_flush_shadow_all(kvm);
651
#endif
652
	kvm_arch_destroy_vm(kvm);
653
	kvm_destroy_devices(kvm);
654 655
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
		kvm_free_memslots(kvm, kvm->memslots[i]);
656
	cleanup_srcu_struct(&kvm->irq_srcu);
657 658
	cleanup_srcu_struct(&kvm->srcu);
	kvm_arch_free_vm(kvm);
659
	preempt_notifier_dec();
660
	hardware_disable_all();
661
	mmdrop(mm);
662 663
}

I
Izik Eidus 已提交
664 665 666 667 668 669 670 671 672 673 674 675 676 677
void kvm_get_kvm(struct kvm *kvm)
{
	atomic_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);

void kvm_put_kvm(struct kvm *kvm)
{
	if (atomic_dec_and_test(&kvm->users_count))
		kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);


678 679 680 681
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
	struct kvm *kvm = filp->private_data;

G
Gregory Haskins 已提交
682 683
	kvm_irqfd_release(kvm);

I
Izik Eidus 已提交
684
	kvm_put_kvm(kvm);
A
Avi Kivity 已提交
685 686 687
	return 0;
}

688 689
/*
 * Allocation size is twice as large as the actual dirty bitmap size.
690
 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
691
 */
692 693
static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
694
	unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
695

696
	memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
697 698 699 700 701 702
	if (!memslot->dirty_bitmap)
		return -ENOMEM;

	return 0;
}

703
/*
704 705 706 707
 * Insert memslot and re-sort memslots based on their GFN,
 * so binary search could be used to lookup GFN.
 * Sorting algorithm takes advantage of having initially
 * sorted array and known changed memslot position.
708
 */
709 710
static void update_memslots(struct kvm_memslots *slots,
			    struct kvm_memory_slot *new)
711
{
712 713
	int id = new->id;
	int i = slots->id_to_index[id];
714
	struct kvm_memory_slot *mslots = slots->memslots;
715

716
	WARN_ON(mslots[i].id != id);
717
	if (!new->npages) {
718
		WARN_ON(!mslots[i].npages);
719 720 721 722 723 724
		if (mslots[i].npages)
			slots->used_slots--;
	} else {
		if (!mslots[i].npages)
			slots->used_slots++;
	}
725

726
	while (i < KVM_MEM_SLOTS_NUM - 1 &&
727 728 729
	       new->base_gfn <= mslots[i + 1].base_gfn) {
		if (!mslots[i + 1].npages)
			break;
730 731 732 733
		mslots[i] = mslots[i + 1];
		slots->id_to_index[mslots[i].id] = i;
		i++;
	}
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750

	/*
	 * The ">=" is needed when creating a slot with base_gfn == 0,
	 * so that it moves before all those with base_gfn == npages == 0.
	 *
	 * On the other hand, if new->npages is zero, the above loop has
	 * already left i pointing to the beginning of the empty part of
	 * mslots, and the ">=" would move the hole backwards in this
	 * case---which is wrong.  So skip the loop when deleting a slot.
	 */
	if (new->npages) {
		while (i > 0 &&
		       new->base_gfn >= mslots[i - 1].base_gfn) {
			mslots[i] = mslots[i - 1];
			slots->id_to_index[mslots[i].id] = i;
			i--;
		}
751 752
	} else
		WARN_ON_ONCE(i != slots->used_slots);
753

754 755
	mslots[i] = *new;
	slots->id_to_index[mslots[i].id] = i;
756 757
}

758
static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
759
{
X
Xiao Guangrong 已提交
760 761
	u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;

762
#ifdef __KVM_HAVE_READONLY_MEM
X
Xiao Guangrong 已提交
763 764 765 766
	valid_flags |= KVM_MEM_READONLY;
#endif

	if (mem->flags & ~valid_flags)
767 768 769 770 771
		return -EINVAL;

	return 0;
}

772
static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
773
		int as_id, struct kvm_memslots *slots)
774
{
775
	struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
776

777 778 779 780 781 782 783
	/*
	 * Set the low bit in the generation, which disables SPTE caching
	 * until the end of synchronize_srcu_expedited.
	 */
	WARN_ON(old_memslots->generation & 1);
	slots->generation = old_memslots->generation + 1;

784
	rcu_assign_pointer(kvm->memslots[as_id], slots);
785
	synchronize_srcu_expedited(&kvm->srcu);
786

787 788 789 790 791 792 793
	/*
	 * Increment the new memslot generation a second time. This prevents
	 * vm exits that race with memslot updates from caching a memslot
	 * generation that will (potentially) be valid forever.
	 */
	slots->generation++;

794
	kvm_arch_memslots_updated(kvm, slots);
795 796

	return old_memslots;
797 798
}

A
Avi Kivity 已提交
799 800 801 802 803
/*
 * Allocate some memory and give it an address in the guest physical address
 * space.
 *
 * Discontiguous memory is allowed, mostly for framebuffers.
804
 *
805
 * Must be called holding kvm->slots_lock for write.
A
Avi Kivity 已提交
806
 */
807
int __kvm_set_memory_region(struct kvm *kvm,
808
			    const struct kvm_userspace_memory_region *mem)
A
Avi Kivity 已提交
809
{
810
	int r;
A
Avi Kivity 已提交
811
	gfn_t base_gfn;
812
	unsigned long npages;
813
	struct kvm_memory_slot *slot;
A
Avi Kivity 已提交
814
	struct kvm_memory_slot old, new;
815
	struct kvm_memslots *slots = NULL, *old_memslots;
816
	int as_id, id;
817
	enum kvm_mr_change change;
A
Avi Kivity 已提交
818

819 820 821 822
	r = check_memory_region_flags(mem);
	if (r)
		goto out;

A
Avi Kivity 已提交
823
	r = -EINVAL;
824 825 826
	as_id = mem->slot >> 16;
	id = (u16)mem->slot;

A
Avi Kivity 已提交
827 828 829 830 831
	/* General sanity checks */
	if (mem->memory_size & (PAGE_SIZE - 1))
		goto out;
	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
		goto out;
832
	/* We can read the guest memory with __xxx_user() later on. */
833
	if ((id < KVM_USER_MEM_SLOTS) &&
834
	    ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
835 836 837
	     !access_ok(VERIFY_WRITE,
			(void __user *)(unsigned long)mem->userspace_addr,
			mem->memory_size)))
838
		goto out;
839
	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
A
Avi Kivity 已提交
840 841 842 843
		goto out;
	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
		goto out;

844
	slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
A
Avi Kivity 已提交
845 846 847
	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
	npages = mem->memory_size >> PAGE_SHIFT;

848 849 850
	if (npages > KVM_MEM_MAX_NR_PAGES)
		goto out;

851
	new = old = *slot;
A
Avi Kivity 已提交
852

853
	new.id = id;
A
Avi Kivity 已提交
854 855 856 857
	new.base_gfn = base_gfn;
	new.npages = npages;
	new.flags = mem->flags;

858 859 860 861 862
	if (npages) {
		if (!old.npages)
			change = KVM_MR_CREATE;
		else { /* Modify an existing slot. */
			if ((mem->userspace_addr != old.userspace_addr) ||
863 864
			    (npages != old.npages) ||
			    ((new.flags ^ old.flags) & KVM_MEM_READONLY))
865 866 867 868 869 870 871 872 873 874 875
				goto out;

			if (base_gfn != old.base_gfn)
				change = KVM_MR_MOVE;
			else if (new.flags != old.flags)
				change = KVM_MR_FLAGS_ONLY;
			else { /* Nothing to change. */
				r = 0;
				goto out;
			}
		}
876 877 878 879
	} else {
		if (!old.npages)
			goto out;

880
		change = KVM_MR_DELETE;
881 882 883
		new.base_gfn = 0;
		new.flags = 0;
	}
A
Avi Kivity 已提交
884

885
	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
886 887
		/* Check for overlaps */
		r = -EEXIST;
888
		kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
889
			if ((slot->id >= KVM_USER_MEM_SLOTS) ||
890
			    (slot->id == id))
891 892 893 894 895
				continue;
			if (!((base_gfn + npages <= slot->base_gfn) ||
			      (base_gfn >= slot->base_gfn + slot->npages)))
				goto out;
		}
A
Avi Kivity 已提交
896 897 898 899
	}

	/* Free page dirty bitmap if unneeded */
	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
A
Al Viro 已提交
900
		new.dirty_bitmap = NULL;
A
Avi Kivity 已提交
901 902

	r = -ENOMEM;
903
	if (change == KVM_MR_CREATE) {
904
		new.userspace_addr = mem->userspace_addr;
905

906
		if (kvm_arch_create_memslot(kvm, &new, npages))
907
			goto out_free;
A
Avi Kivity 已提交
908
	}
909

A
Avi Kivity 已提交
910 911
	/* Allocate page dirty bitmap if needed */
	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
912
		if (kvm_create_dirty_bitmap(&new) < 0)
913
			goto out_free;
A
Avi Kivity 已提交
914 915
	}

916
	slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
917 918
	if (!slots)
		goto out_free;
919
	memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
920

921
	if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
922
		slot = id_to_memslot(slots, id);
923 924
		slot->flags |= KVM_MEMSLOT_INVALID;

925
		old_memslots = install_new_memslots(kvm, as_id, slots);
926

927 928
		/* slot was deleted or moved, clear iommu mapping */
		kvm_iommu_unmap_pages(kvm, &old);
929 930
		/* From this point no new shadow pages pointing to a deleted,
		 * or moved, memslot will be created.
931 932
		 *
		 * validation of sp->gfn happens in:
933 934
		 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
		 *	- kvm_is_visible_gfn (mmu_check_roots)
935
		 */
936
		kvm_arch_flush_shadow_memslot(kvm, slot);
937 938 939 940 941 942

		/*
		 * We can re-use the old_memslots from above, the only difference
		 * from the currently installed memslots is the invalid flag.  This
		 * will get overwritten by update_memslots anyway.
		 */
943
		slots = old_memslots;
944
	}
945

946
	r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
947
	if (r)
948
		goto out_slots;
949

950
	/* actual memory is freed via old in kvm_free_memslot below */
951
	if (change == KVM_MR_DELETE) {
952
		new.dirty_bitmap = NULL;
953
		memset(&new.arch, 0, sizeof(new.arch));
954 955
	}

956
	update_memslots(slots, &new);
957
	old_memslots = install_new_memslots(kvm, as_id, slots);
958

959
	kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
960

961
	kvm_free_memslot(kvm, &old, &new);
962
	kvfree(old_memslots);
963

964 965
	/*
	 * IOMMU mapping:  New slots need to be mapped.  Old slots need to be
966 967 968 969 970 971
	 * un-mapped and re-mapped if their base changes.  Since base change
	 * unmapping is handled above with slot deletion, mapping alone is
	 * needed here.  Anything else the iommu might care about for existing
	 * slots (size changes, userspace addr changes and read-only flag
	 * changes) is disallowed above, so any other attribute changes getting
	 * here can be skipped.
972
	 */
973 974
	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
		r = kvm_iommu_map_pages(kvm, &new);
975
		return r;
976 977
	}

A
Avi Kivity 已提交
978 979
	return 0;

980
out_slots:
981
	kvfree(slots);
982
out_free:
983
	kvm_free_memslot(kvm, &new, &old);
A
Avi Kivity 已提交
984 985
out:
	return r;
986
}
987 988 989
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);

int kvm_set_memory_region(struct kvm *kvm,
990
			  const struct kvm_userspace_memory_region *mem)
991 992 993
{
	int r;

994
	mutex_lock(&kvm->slots_lock);
995
	r = __kvm_set_memory_region(kvm, mem);
996
	mutex_unlock(&kvm->slots_lock);
997 998
	return r;
}
999 1000
EXPORT_SYMBOL_GPL(kvm_set_memory_region);

1001 1002
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
					  struct kvm_userspace_memory_region *mem)
1003
{
1004
	if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
1005
		return -EINVAL;
1006

1007
	return kvm_set_memory_region(kvm, mem);
A
Avi Kivity 已提交
1008 1009
}

1010 1011
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty)
A
Avi Kivity 已提交
1012
{
1013
	struct kvm_memslots *slots;
A
Avi Kivity 已提交
1014
	struct kvm_memory_slot *memslot;
1015
	int r, i, as_id, id;
1016
	unsigned long n;
A
Avi Kivity 已提交
1017 1018 1019
	unsigned long any = 0;

	r = -EINVAL;
1020 1021 1022
	as_id = log->slot >> 16;
	id = (u16)log->slot;
	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
A
Avi Kivity 已提交
1023 1024
		goto out;

1025 1026
	slots = __kvm_memslots(kvm, as_id);
	memslot = id_to_memslot(slots, id);
A
Avi Kivity 已提交
1027 1028 1029 1030
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

1031
	n = kvm_dirty_bitmap_bytes(memslot);
A
Avi Kivity 已提交
1032

1033
	for (i = 0; !any && i < n/sizeof(long); ++i)
A
Avi Kivity 已提交
1034 1035 1036 1037 1038 1039
		any = memslot->dirty_bitmap[i];

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
		goto out;

1040 1041
	if (any)
		*is_dirty = 1;
A
Avi Kivity 已提交
1042 1043 1044 1045 1046

	r = 0;
out:
	return r;
}
1047
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
A
Avi Kivity 已提交
1048

1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
/**
 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
 *	are dirty write protect them for next write.
 * @kvm:	pointer to kvm instance
 * @log:	slot id and address to which we copy the log
 * @is_dirty:	flag set if any page is dirty
 *
 * We need to keep it in mind that VCPU threads can write to the bitmap
 * concurrently. So, to avoid losing track of dirty pages we keep the
 * following order:
 *
 *    1. Take a snapshot of the bit and clear it if needed.
 *    2. Write protect the corresponding page.
 *    3. Copy the snapshot to the userspace.
 *    4. Upon return caller flushes TLB's if needed.
 *
 * Between 2 and 4, the guest may write to the page using the remaining TLB
 * entry.  This is not a problem because the page is reported dirty using
 * the snapshot taken before and step 4 ensures that writes done after
 * exiting to userspace will be logged for the next call.
 *
 */
int kvm_get_dirty_log_protect(struct kvm *kvm,
			struct kvm_dirty_log *log, bool *is_dirty)
{
1075
	struct kvm_memslots *slots;
1076
	struct kvm_memory_slot *memslot;
1077
	int r, i, as_id, id;
1078 1079 1080 1081 1082
	unsigned long n;
	unsigned long *dirty_bitmap;
	unsigned long *dirty_bitmap_buffer;

	r = -EINVAL;
1083 1084 1085
	as_id = log->slot >> 16;
	id = (u16)log->slot;
	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1086 1087
		goto out;

1088 1089
	slots = __kvm_memslots(kvm, as_id);
	memslot = id_to_memslot(slots, id);
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114

	dirty_bitmap = memslot->dirty_bitmap;
	r = -ENOENT;
	if (!dirty_bitmap)
		goto out;

	n = kvm_dirty_bitmap_bytes(memslot);

	dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
	memset(dirty_bitmap_buffer, 0, n);

	spin_lock(&kvm->mmu_lock);
	*is_dirty = false;
	for (i = 0; i < n / sizeof(long); i++) {
		unsigned long mask;
		gfn_t offset;

		if (!dirty_bitmap[i])
			continue;

		*is_dirty = true;

		mask = xchg(&dirty_bitmap[i], 0);
		dirty_bitmap_buffer[i] = mask;

1115 1116 1117 1118 1119
		if (mask) {
			offset = i * BITS_PER_LONG;
			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
								offset, mask);
		}
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
	}

	spin_unlock(&kvm->mmu_lock);

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
		goto out;

	r = 0;
out:
	return r;
}
EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
#endif

1135 1136 1137 1138 1139
bool kvm_largepages_enabled(void)
{
	return largepages_enabled;
}

1140 1141 1142 1143 1144 1145
void kvm_disable_largepages(void)
{
	largepages_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_largepages);

1146 1147 1148 1149
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
}
A
Avi Kivity 已提交
1150
EXPORT_SYMBOL_GPL(gfn_to_memslot);
A
Avi Kivity 已提交
1151

1152 1153 1154 1155 1156
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
}

1157
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1158
{
1159
	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1160

1161
	if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
1162
	      memslot->flags & KVM_MEMSLOT_INVALID)
1163
		return false;
1164

1165
	return true;
1166 1167 1168
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

J
Joerg Roedel 已提交
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
{
	struct vm_area_struct *vma;
	unsigned long addr, size;

	size = PAGE_SIZE;

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return PAGE_SIZE;

	down_read(&current->mm->mmap_sem);
	vma = find_vma(current->mm, addr);
	if (!vma)
		goto out;

	size = vma_kernel_pagesize(vma);

out:
	up_read(&current->mm->mmap_sem);

	return size;
}

X
Xiao Guangrong 已提交
1193 1194 1195 1196 1197 1198 1199
static bool memslot_is_readonly(struct kvm_memory_slot *slot)
{
	return slot->flags & KVM_MEM_READONLY;
}

static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
				       gfn_t *nr_pages, bool write)
I
Izik Eidus 已提交
1200
{
1201
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
X
Xiao Guangrong 已提交
1202
		return KVM_HVA_ERR_BAD;
1203

X
Xiao Guangrong 已提交
1204 1205
	if (memslot_is_readonly(slot) && write)
		return KVM_HVA_ERR_RO_BAD;
1206 1207 1208 1209

	if (nr_pages)
		*nr_pages = slot->npages - (gfn - slot->base_gfn);

X
Xiao Guangrong 已提交
1210
	return __gfn_to_hva_memslot(slot, gfn);
I
Izik Eidus 已提交
1211
}
1212

X
Xiao Guangrong 已提交
1213 1214 1215 1216
static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
				     gfn_t *nr_pages)
{
	return __gfn_to_hva_many(slot, gfn, nr_pages, true);
I
Izik Eidus 已提交
1217
}
1218

X
Xiao Guangrong 已提交
1219
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
1220
					gfn_t gfn)
X
Xiao Guangrong 已提交
1221 1222 1223 1224 1225
{
	return gfn_to_hva_many(slot, gfn, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);

1226 1227
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
1228
	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1229
}
1230
EXPORT_SYMBOL_GPL(gfn_to_hva);
I
Izik Eidus 已提交
1231

1232 1233 1234 1235 1236 1237
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);

1238
/*
1239 1240
 * If writable is set to false, the hva returned by this function is only
 * allowed to be read.
1241
 */
1242 1243
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
				      gfn_t gfn, bool *writable)
1244
{
1245 1246 1247
	unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);

	if (!kvm_is_error_hva(hva) && writable)
1248 1249
		*writable = !memslot_is_readonly(slot);

1250
	return hva;
1251 1252
}

1253 1254 1255 1256 1257 1258 1259
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
{
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);

	return gfn_to_hva_memslot_prot(slot, gfn, writable);
}

1260 1261 1262 1263 1264 1265 1266
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
{
	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);

	return gfn_to_hva_memslot_prot(slot, gfn, writable);
}

G
Geoff Levand 已提交
1267
static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
	unsigned long start, int write, struct page **page)
{
	int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;

	if (write)
		flags |= FOLL_WRITE;

	return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
}

1278 1279 1280 1281 1282 1283 1284 1285 1286
static inline int check_user_page_hwpoison(unsigned long addr)
{
	int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;

	rc = __get_user_pages(current, current->mm, addr, 1,
			      flags, NULL, NULL, NULL);
	return rc == -EHWPOISON;
}

X
Xiao Guangrong 已提交
1287 1288 1289 1290 1291 1292
/*
 * The atomic path to get the writable pfn which will be stored in @pfn,
 * true indicates success, otherwise false is returned.
 */
static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
			    bool write_fault, bool *writable, pfn_t *pfn)
A
Avi Kivity 已提交
1293
{
1294
	struct page *page[1];
X
Xiao Guangrong 已提交
1295
	int npages;
A
Avi Kivity 已提交
1296

X
Xiao Guangrong 已提交
1297 1298
	if (!(async || atomic))
		return false;
1299

1300 1301 1302 1303 1304 1305 1306
	/*
	 * Fast pin a writable pfn only if it is a write fault request
	 * or the caller allows to map a writable pfn for a read fault
	 * request.
	 */
	if (!(write_fault || writable))
		return false;
1307

X
Xiao Guangrong 已提交
1308 1309 1310
	npages = __get_user_pages_fast(addr, 1, 1, page);
	if (npages == 1) {
		*pfn = page_to_pfn(page[0]);
1311

X
Xiao Guangrong 已提交
1312 1313 1314 1315
		if (writable)
			*writable = true;
		return true;
	}
1316

X
Xiao Guangrong 已提交
1317 1318
	return false;
}
1319

X
Xiao Guangrong 已提交
1320 1321 1322 1323 1324 1325 1326 1327 1328
/*
 * The slow path to get the pfn of the specified host virtual address,
 * 1 indicates success, -errno is returned if error is detected.
 */
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
			   bool *writable, pfn_t *pfn)
{
	struct page *page[1];
	int npages = 0;
1329

X
Xiao Guangrong 已提交
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
	might_sleep();

	if (writable)
		*writable = write_fault;

	if (async) {
		down_read(&current->mm->mmap_sem);
		npages = get_user_page_nowait(current, current->mm,
					      addr, write_fault, page);
		up_read(&current->mm->mmap_sem);
1340 1341 1342 1343
	} else
		npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
						   write_fault, 0, page,
						   FOLL_TOUCH|FOLL_HWPOISON);
X
Xiao Guangrong 已提交
1344 1345 1346 1347
	if (npages != 1)
		return npages;

	/* map read fault as writable if possible */
1348
	if (unlikely(!write_fault) && writable) {
X
Xiao Guangrong 已提交
1349 1350 1351 1352 1353 1354 1355
		struct page *wpage[1];

		npages = __get_user_pages_fast(addr, 1, 1, wpage);
		if (npages == 1) {
			*writable = true;
			put_page(page[0]);
			page[0] = wpage[0];
1356
		}
X
Xiao Guangrong 已提交
1357 1358

		npages = 1;
1359
	}
X
Xiao Guangrong 已提交
1360 1361 1362
	*pfn = page_to_pfn(page[0]);
	return npages;
}
I
Izik Eidus 已提交
1363

X
Xiao Guangrong 已提交
1364 1365 1366 1367
static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
{
	if (unlikely(!(vma->vm_flags & VM_READ)))
		return false;
1368

X
Xiao Guangrong 已提交
1369 1370
	if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
		return false;
1371

X
Xiao Guangrong 已提交
1372 1373
	return true;
}
1374

1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
/*
 * Pin guest page in memory and return its pfn.
 * @addr: host virtual address which maps memory to the guest
 * @atomic: whether this function can sleep
 * @async: whether this function need to wait IO complete if the
 *         host page is not in the memory
 * @write_fault: whether we should get a writable host page
 * @writable: whether it allows to map a writable host page for !@write_fault
 *
 * The function will map a writable host page for these two cases:
 * 1): @write_fault = true
 * 2): @write_fault = false && @writable, @writable will tell the caller
 *     whether the mapping is writable.
 */
X
Xiao Guangrong 已提交
1389 1390 1391 1392 1393 1394
static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
			bool write_fault, bool *writable)
{
	struct vm_area_struct *vma;
	pfn_t pfn = 0;
	int npages;
1395

X
Xiao Guangrong 已提交
1396 1397
	/* we can do it either atomically or asynchronously, not both */
	BUG_ON(atomic && async);
1398

X
Xiao Guangrong 已提交
1399 1400 1401 1402 1403 1404 1405 1406 1407
	if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
		return pfn;

	if (atomic)
		return KVM_PFN_ERR_FAULT;

	npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
	if (npages == 1)
		return pfn;
1408

X
Xiao Guangrong 已提交
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
	down_read(&current->mm->mmap_sem);
	if (npages == -EHWPOISON ||
	      (!async && check_user_page_hwpoison(addr))) {
		pfn = KVM_PFN_ERR_HWPOISON;
		goto exit;
	}

	vma = find_vma_intersection(current->mm, addr, addr + 1);

	if (vma == NULL)
		pfn = KVM_PFN_ERR_FAULT;
	else if ((vma->vm_flags & VM_PFNMAP)) {
		pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
			vma->vm_pgoff;
1423
		BUG_ON(!kvm_is_reserved_pfn(pfn));
X
Xiao Guangrong 已提交
1424
	} else {
X
Xiao Guangrong 已提交
1425
		if (async && vma_is_valid(vma, write_fault))
X
Xiao Guangrong 已提交
1426 1427 1428 1429 1430
			*async = true;
		pfn = KVM_PFN_ERR_FAULT;
	}
exit:
	up_read(&current->mm->mmap_sem);
1431
	return pfn;
1432 1433
}

1434 1435
pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
			   bool *async, bool write_fault, bool *writable)
1436
{
X
Xiao Guangrong 已提交
1437 1438 1439 1440 1441 1442
	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);

	if (addr == KVM_HVA_ERR_RO_BAD)
		return KVM_PFN_ERR_RO_FAULT;

	if (kvm_is_error_hva(addr))
1443
		return KVM_PFN_NOSLOT;
X
Xiao Guangrong 已提交
1444 1445 1446 1447 1448 1449 1450 1451 1452

	/* Do not map writable pfn in the readonly memslot. */
	if (writable && memslot_is_readonly(slot)) {
		*writable = false;
		writable = NULL;
	}

	return hva_to_pfn(addr, atomic, async, write_fault,
			  writable);
1453
}
1454
EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
1455

1456 1457 1458
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable)
{
P
Paolo Bonzini 已提交
1459 1460
	return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
				    write_fault, writable);
1461 1462 1463
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);

1464
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1465
{
X
Xiao Guangrong 已提交
1466
	return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
1467
}
P
Paolo Bonzini 已提交
1468
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
1469

1470
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
1471
{
X
Xiao Guangrong 已提交
1472
	return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
1473
}
1474
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
1475

P
Paolo Bonzini 已提交
1476 1477 1478 1479 1480 1481
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
{
	return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);

1482 1483 1484 1485 1486 1487
pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);

P
Paolo Bonzini 已提交
1488 1489 1490 1491 1492 1493
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
	return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn);

1494 1495 1496 1497 1498 1499
pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);

1500 1501
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
			    struct page **pages, int nr_pages)
1502 1503 1504 1505
{
	unsigned long addr;
	gfn_t entry;

1506
	addr = gfn_to_hva_many(slot, gfn, &entry);
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
	if (kvm_is_error_hva(addr))
		return -1;

	if (entry < nr_pages)
		return 0;

	return __get_user_pages_fast(addr, nr_pages, 1, pages);
}
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);

X
Xiao Guangrong 已提交
1517 1518
static struct page *kvm_pfn_to_page(pfn_t pfn)
{
1519
	if (is_error_noslot_pfn(pfn))
1520
		return KVM_ERR_PTR_BAD_PAGE;
X
Xiao Guangrong 已提交
1521

1522
	if (kvm_is_reserved_pfn(pfn)) {
1523
		WARN_ON(1);
1524
		return KVM_ERR_PTR_BAD_PAGE;
1525
	}
X
Xiao Guangrong 已提交
1526 1527 1528 1529

	return pfn_to_page(pfn);
}

1530 1531
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
1532 1533 1534 1535
	pfn_t pfn;

	pfn = gfn_to_pfn(kvm, gfn);

X
Xiao Guangrong 已提交
1536
	return kvm_pfn_to_page(pfn);
A
Avi Kivity 已提交
1537 1538 1539
}
EXPORT_SYMBOL_GPL(gfn_to_page);

1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	pfn_t pfn;

	pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);

	return kvm_pfn_to_page(pfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);

1550 1551
void kvm_release_page_clean(struct page *page)
{
1552 1553
	WARN_ON(is_error_page(page));

1554
	kvm_release_pfn_clean(page_to_pfn(page));
1555 1556 1557
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);

1558 1559
void kvm_release_pfn_clean(pfn_t pfn)
{
1560
	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
1561
		put_page(pfn_to_page(pfn));
1562 1563 1564
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);

1565
void kvm_release_page_dirty(struct page *page)
1566
{
X
Xiao Guangrong 已提交
1567 1568
	WARN_ON(is_error_page(page));

1569 1570 1571 1572
	kvm_release_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);

1573
static void kvm_release_pfn_dirty(pfn_t pfn)
1574 1575 1576 1577 1578 1579 1580
{
	kvm_set_pfn_dirty(pfn);
	kvm_release_pfn_clean(pfn);
}

void kvm_set_pfn_dirty(pfn_t pfn)
{
1581
	if (!kvm_is_reserved_pfn(pfn)) {
1582
		struct page *page = pfn_to_page(pfn);
1583

1584 1585 1586
		if (!PageReserved(page))
			SetPageDirty(page);
	}
1587
}
1588 1589 1590 1591
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);

void kvm_set_pfn_accessed(pfn_t pfn)
{
1592
	if (!kvm_is_reserved_pfn(pfn))
1593
		mark_page_accessed(pfn_to_page(pfn));
1594 1595 1596 1597 1598
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);

void kvm_get_pfn(pfn_t pfn)
{
1599
	if (!kvm_is_reserved_pfn(pfn))
1600
		get_page(pfn_to_page(pfn));
1601 1602
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
1603

1604 1605 1606 1607 1608 1609 1610 1611
static int next_segment(unsigned long len, int offset)
{
	if (len > PAGE_SIZE - offset)
		return PAGE_SIZE - offset;
	else
		return len;
}

1612 1613
static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
				 void *data, int offset, int len)
1614
{
1615 1616
	int r;
	unsigned long addr;
1617

1618
	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
1619 1620
	if (kvm_is_error_hva(addr))
		return -EFAULT;
1621
	r = __copy_from_user(data, (void __user *)addr + offset, len);
1622
	if (r)
1623 1624 1625
		return -EFAULT;
	return 0;
}
1626 1627 1628 1629 1630 1631 1632 1633

int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len)
{
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);

	return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
1634 1635
EXPORT_SYMBOL_GPL(kvm_read_guest_page);

1636 1637 1638 1639 1640 1641 1642 1643 1644
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
			     int offset, int len)
{
	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);

	return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);

1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);

1665
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
1666 1667
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
1668
	int seg;
1669
	int offset = offset_in_page(gpa);
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
1684

1685 1686 1687 1688 1689 1690 1691
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
			           void *data, int offset, unsigned long len)
{
	int r;
	unsigned long addr;

	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
1692 1693
	if (kvm_is_error_hva(addr))
		return -EFAULT;
1694
	pagefault_disable();
1695
	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1696
	pagefault_enable();
1697 1698 1699 1700 1701
	if (r)
		return -EFAULT;
	return 0;
}

1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
	int offset = offset_in_page(gpa);

	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);

int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
			       void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	int offset = offset_in_page(gpa);

	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);

static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
			          const void *data, int offset, int len)
1726
{
1727 1728
	int r;
	unsigned long addr;
1729

1730
	addr = gfn_to_hva_memslot(memslot, gfn);
1731 1732
	if (kvm_is_error_hva(addr))
		return -EFAULT;
1733
	r = __copy_to_user((void __user *)addr + offset, data, len);
1734
	if (r)
1735
		return -EFAULT;
1736
	mark_page_dirty_in_slot(memslot, gfn);
1737 1738
	return 0;
}
1739 1740 1741 1742 1743 1744 1745 1746

int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
			 const void *data, int offset, int len)
{
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);

	return __kvm_write_guest_page(slot, gfn, data, offset, len);
}
1747 1748
EXPORT_SYMBOL_GPL(kvm_write_guest_page);

1749 1750 1751 1752 1753 1754 1755 1756 1757
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
			      const void *data, int offset, int len)
{
	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);

	return __kvm_write_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);

1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
1777
EXPORT_SYMBOL_GPL(kvm_write_guest);
1778

1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
		         unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);

1800
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1801
			      gpa_t gpa, unsigned long len)
1802 1803 1804
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	int offset = offset_in_page(gpa);
1805 1806 1807 1808
	gfn_t start_gfn = gpa >> PAGE_SHIFT;
	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
	gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
	gfn_t nr_pages_avail;
1809 1810 1811

	ghc->gpa = gpa;
	ghc->generation = slots->generation;
1812 1813
	ghc->len = len;
	ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1814 1815
	ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
	if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
1816
		ghc->hva += offset;
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
	} else {
		/*
		 * If the requested region crosses two memslots, we still
		 * verify that the entire region is valid here.
		 */
		while (start_gfn <= end_gfn) {
			ghc->memslot = gfn_to_memslot(kvm, start_gfn);
			ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
						   &nr_pages_avail);
			if (kvm_is_error_hva(ghc->hva))
				return -EFAULT;
			start_gfn += nr_pages_avail;
		}
		/* Use the slow path for cross page reads and writes. */
		ghc->memslot = NULL;
	}
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);

int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	int r;

1843 1844
	BUG_ON(len > ghc->len);

1845
	if (slots->generation != ghc->generation)
1846 1847 1848 1849
		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);

	if (unlikely(!ghc->memslot))
		return kvm_write_guest(kvm, ghc->gpa, data, len);
1850 1851 1852 1853

	if (kvm_is_error_hva(ghc->hva))
		return -EFAULT;

1854
	r = __copy_to_user((void __user *)ghc->hva, data, len);
1855 1856
	if (r)
		return -EFAULT;
1857
	mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1858 1859 1860 1861 1862

	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);

1863 1864 1865 1866 1867 1868
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	int r;

1869 1870
	BUG_ON(len > ghc->len);

1871
	if (slots->generation != ghc->generation)
1872 1873 1874 1875
		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);

	if (unlikely(!ghc->memslot))
		return kvm_read_guest(kvm, ghc->gpa, data, len);
1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887

	if (kvm_is_error_hva(ghc->hva))
		return -EFAULT;

	r = __copy_from_user(data, (void __user *)ghc->hva, len);
	if (r)
		return -EFAULT;

	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_cached);

1888 1889
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
1890 1891 1892
	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));

	return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

1903
	while ((seg = next_segment(len, offset)) != 0) {
1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);

1915
static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
1916
				    gfn_t gfn)
A
Avi Kivity 已提交
1917
{
R
Rusty Russell 已提交
1918 1919
	if (memslot && memslot->dirty_bitmap) {
		unsigned long rel_gfn = gfn - memslot->base_gfn;
A
Avi Kivity 已提交
1920

1921
		set_bit_le(rel_gfn, memslot->dirty_bitmap);
A
Avi Kivity 已提交
1922 1923 1924
	}
}

1925 1926 1927 1928 1929
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
	struct kvm_memory_slot *memslot;

	memslot = gfn_to_memslot(kvm, gfn);
1930
	mark_page_dirty_in_slot(memslot, gfn);
1931
}
1932
EXPORT_SYMBOL_GPL(mark_page_dirty);
1933

1934 1935 1936 1937 1938 1939 1940 1941 1942
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	struct kvm_memory_slot *memslot;

	memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	mark_page_dirty_in_slot(memslot, gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);

W
Wanpeng Li 已提交
1943 1944
static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
{
1945
	int old, val;
W
Wanpeng Li 已提交
1946

1947
	old = val = vcpu->halt_poll_ns;
W
Wanpeng Li 已提交
1948 1949 1950 1951 1952 1953 1954
	/* 10us base */
	if (val == 0 && halt_poll_ns_grow)
		val = 10000;
	else
		val *= halt_poll_ns_grow;

	vcpu->halt_poll_ns = val;
1955
	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
W
Wanpeng Li 已提交
1956 1957 1958 1959
}

static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
{
1960
	int old, val;
W
Wanpeng Li 已提交
1961

1962
	old = val = vcpu->halt_poll_ns;
W
Wanpeng Li 已提交
1963 1964 1965 1966 1967 1968
	if (halt_poll_ns_shrink == 0)
		val = 0;
	else
		val /= halt_poll_ns_shrink;

	vcpu->halt_poll_ns = val;
1969
	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
W
Wanpeng Li 已提交
1970 1971
}

1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{
	if (kvm_arch_vcpu_runnable(vcpu)) {
		kvm_make_request(KVM_REQ_UNHALT, vcpu);
		return -EINTR;
	}
	if (kvm_cpu_has_pending_timer(vcpu))
		return -EINTR;
	if (signal_pending(current))
		return -EINTR;

	return 0;
}

E
Eddie Dong 已提交
1986 1987 1988
/*
 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
 */
1989
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1990
{
1991
	ktime_t start, cur;
1992
	DEFINE_WAIT(wait);
1993
	bool waited = false;
W
Wanpeng Li 已提交
1994
	u64 block_ns;
1995 1996

	start = cur = ktime_get();
W
Wanpeng Li 已提交
1997 1998
	if (vcpu->halt_poll_ns) {
		ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
1999

2000
		++vcpu->stat.halt_attempted_poll;
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
		do {
			/*
			 * This sets KVM_REQ_UNHALT if an interrupt
			 * arrives.
			 */
			if (kvm_vcpu_check_block(vcpu) < 0) {
				++vcpu->stat.halt_successful_poll;
				goto out;
			}
			cur = ktime_get();
		} while (single_task_running() && ktime_before(cur, stop));
	}
2013

2014 2015
	kvm_arch_vcpu_blocking(vcpu);

2016 2017 2018
	for (;;) {
		prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);

2019
		if (kvm_vcpu_check_block(vcpu) < 0)
2020 2021
			break;

2022
		waited = true;
E
Eddie Dong 已提交
2023 2024
		schedule();
	}
2025

2026
	finish_wait(&vcpu->wq, &wait);
2027 2028
	cur = ktime_get();

2029
	kvm_arch_vcpu_unblocking(vcpu);
2030
out:
W
Wanpeng Li 已提交
2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
	block_ns = ktime_to_ns(cur) - ktime_to_ns(start);

	if (halt_poll_ns) {
		if (block_ns <= vcpu->halt_poll_ns)
			;
		/* we had a long block, shrink polling */
		else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
			shrink_halt_poll_ns(vcpu);
		/* we had a short halt and our poll time is too small */
		else if (vcpu->halt_poll_ns < halt_poll_ns &&
			block_ns < halt_poll_ns)
			grow_halt_poll_ns(vcpu);
2043 2044
	} else
		vcpu->halt_poll_ns = 0;
W
Wanpeng Li 已提交
2045 2046

	trace_kvm_vcpu_wakeup(block_ns, waited);
E
Eddie Dong 已提交
2047
}
2048
EXPORT_SYMBOL_GPL(kvm_vcpu_block);
E
Eddie Dong 已提交
2049

2050
#ifndef CONFIG_S390
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
/*
 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
 */
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
	int me;
	int cpu = vcpu->cpu;
	wait_queue_head_t *wqp;

	wqp = kvm_arch_vcpu_wq(vcpu);
	if (waitqueue_active(wqp)) {
		wake_up_interruptible(wqp);
		++vcpu->stat.halt_wakeup;
	}

	me = get_cpu();
	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
		if (kvm_arch_vcpu_should_kick(vcpu))
			smp_send_reschedule(cpu);
	put_cpu();
}
2072
EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
2073
#endif /* !CONFIG_S390 */
2074

2075
int kvm_vcpu_yield_to(struct kvm_vcpu *target)
2076 2077 2078
{
	struct pid *pid;
	struct task_struct *task = NULL;
2079
	int ret = 0;
2080 2081 2082 2083

	rcu_read_lock();
	pid = rcu_dereference(target->pid);
	if (pid)
2084
		task = get_pid_task(pid, PIDTYPE_PID);
2085 2086
	rcu_read_unlock();
	if (!task)
2087 2088
		return ret;
	ret = yield_to(task, 1);
2089
	put_task_struct(task);
2090 2091

	return ret;
2092 2093 2094
}
EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);

2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
/*
 * Helper that checks whether a VCPU is eligible for directed yield.
 * Most eligible candidate to yield is decided by following heuristics:
 *
 *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
 *  (preempted lock holder), indicated by @in_spin_loop.
 *  Set at the beiginning and cleared at the end of interception/PLE handler.
 *
 *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
 *  chance last time (mostly it has become eligible now since we have probably
 *  yielded to lockholder in last iteration. This is done by toggling
 *  @dy_eligible each time a VCPU checked for eligibility.)
 *
 *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
 *  to preempted lock-holder could result in wrong VCPU selection and CPU
 *  burning. Giving priority for a potential lock-holder increases lock
 *  progress.
 *
 *  Since algorithm is based on heuristics, accessing another VCPU data without
 *  locking does not harm. It may result in trying to yield to  same VCPU, fail
 *  and continue with next VCPU and so on.
 */
2117
static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2118
{
2119
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2120 2121 2122
	bool eligible;

	eligible = !vcpu->spin_loop.in_spin_loop ||
2123
		    vcpu->spin_loop.dy_eligible;
2124 2125 2126 2127 2128

	if (vcpu->spin_loop.in_spin_loop)
		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);

	return eligible;
2129 2130
#else
	return true;
2131
#endif
2132
}
2133

2134
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
Z
Zhai, Edwin 已提交
2135
{
2136 2137 2138 2139
	struct kvm *kvm = me->kvm;
	struct kvm_vcpu *vcpu;
	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
	int yielded = 0;
2140
	int try = 3;
2141 2142
	int pass;
	int i;
Z
Zhai, Edwin 已提交
2143

2144
	kvm_vcpu_set_in_spin_loop(me, true);
2145 2146 2147 2148 2149 2150 2151
	/*
	 * We boost the priority of a VCPU that is runnable but not
	 * currently running, because it got preempted by something
	 * else and called schedule in __vcpu_run.  Hopefully that
	 * VCPU is holding the lock that we need and will release it.
	 * We approximate round-robin by starting at the last boosted VCPU.
	 */
2152
	for (pass = 0; pass < 2 && !yielded && try; pass++) {
2153
		kvm_for_each_vcpu(i, vcpu, kvm) {
2154
			if (!pass && i <= last_boosted_vcpu) {
2155 2156 2157 2158
				i = last_boosted_vcpu;
				continue;
			} else if (pass && i > last_boosted_vcpu)
				break;
2159 2160
			if (!ACCESS_ONCE(vcpu->preempted))
				continue;
2161 2162
			if (vcpu == me)
				continue;
2163
			if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
2164
				continue;
2165 2166
			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
				continue;
2167 2168 2169

			yielded = kvm_vcpu_yield_to(vcpu);
			if (yielded > 0) {
2170 2171
				kvm->last_boosted_vcpu = i;
				break;
2172 2173 2174 2175
			} else if (yielded < 0) {
				try--;
				if (!try)
					break;
2176 2177 2178
			}
		}
	}
2179
	kvm_vcpu_set_in_spin_loop(me, false);
2180 2181 2182

	/* Ensure vcpu is not eligible during next spinloop */
	kvm_vcpu_set_dy_eligible(me, false);
Z
Zhai, Edwin 已提交
2183 2184 2185
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);

2186
static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2187 2188 2189 2190
{
	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
	struct page *page;

2191
	if (vmf->pgoff == 0)
2192
		page = virt_to_page(vcpu->run);
A
Avi Kivity 已提交
2193
#ifdef CONFIG_X86
2194
	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
2195
		page = virt_to_page(vcpu->arch.pio_data);
2196 2197 2198 2199
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
A
Avi Kivity 已提交
2200
#endif
2201
	else
2202
		return kvm_arch_vcpu_fault(vcpu, vmf);
2203
	get_page(page);
2204 2205
	vmf->page = page;
	return 0;
2206 2207
}

2208
static const struct vm_operations_struct kvm_vcpu_vm_ops = {
2209
	.fault = kvm_vcpu_fault,
2210 2211 2212 2213 2214 2215 2216 2217
};

static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vcpu_vm_ops;
	return 0;
}

A
Avi Kivity 已提交
2218 2219 2220 2221
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
	struct kvm_vcpu *vcpu = filp->private_data;

A
Al Viro 已提交
2222
	kvm_put_kvm(vcpu->kvm);
A
Avi Kivity 已提交
2223 2224 2225
	return 0;
}

2226
static struct file_operations kvm_vcpu_fops = {
A
Avi Kivity 已提交
2227 2228
	.release        = kvm_vcpu_release,
	.unlocked_ioctl = kvm_vcpu_ioctl,
2229
#ifdef CONFIG_KVM_COMPAT
2230 2231
	.compat_ioctl   = kvm_vcpu_compat_ioctl,
#endif
2232
	.mmap           = kvm_vcpu_mmap,
2233
	.llseek		= noop_llseek,
A
Avi Kivity 已提交
2234 2235 2236 2237 2238 2239 2240
};

/*
 * Allocates an inode for the vcpu.
 */
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
2241
	return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
A
Avi Kivity 已提交
2242 2243
}

2244 2245 2246
/*
 * Creates some virtual cpus.  Good luck creating more than one.
 */
2247
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
2248 2249
{
	int r;
2250
	struct kvm_vcpu *vcpu;
2251

2252 2253 2254
	if (id >= KVM_MAX_VCPUS)
		return -EINVAL;

2255
	vcpu = kvm_arch_vcpu_create(kvm, id);
R
Rusty Russell 已提交
2256 2257
	if (IS_ERR(vcpu))
		return PTR_ERR(vcpu);
2258

2259 2260
	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);

2261 2262
	r = kvm_arch_vcpu_setup(vcpu);
	if (r)
2263
		goto vcpu_destroy;
2264

S
Shaohua Li 已提交
2265
	mutex_lock(&kvm->lock);
2266 2267 2268 2269
	if (!kvm_vcpu_compatible(vcpu)) {
		r = -EINVAL;
		goto unlock_vcpu_destroy;
	}
2270 2271
	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
		r = -EINVAL;
2272
		goto unlock_vcpu_destroy;
R
Rusty Russell 已提交
2273
	}
2274 2275 2276 2277
	if (kvm_get_vcpu_by_id(kvm, id)) {
		r = -EEXIST;
		goto unlock_vcpu_destroy;
	}
2278 2279

	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
2280

R
Rusty Russell 已提交
2281
	/* Now it's all set up, let userspace reach it */
A
Al Viro 已提交
2282
	kvm_get_kvm(kvm);
A
Avi Kivity 已提交
2283
	r = create_vcpu_fd(vcpu);
2284 2285
	if (r < 0) {
		kvm_put_kvm(kvm);
2286
		goto unlock_vcpu_destroy;
2287 2288 2289
	}

	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
2290 2291 2292 2293 2294

	/*
	 * Pairs with smp_rmb() in kvm_get_vcpu.  Write kvm->vcpus
	 * before kvm->online_vcpu's incremented value.
	 */
2295 2296 2297 2298
	smp_wmb();
	atomic_inc(&kvm->online_vcpus);

	mutex_unlock(&kvm->lock);
2299
	kvm_arch_vcpu_postcreate(vcpu);
R
Rusty Russell 已提交
2300
	return r;
2301

2302
unlock_vcpu_destroy:
2303
	mutex_unlock(&kvm->lock);
2304
vcpu_destroy:
2305
	kvm_arch_vcpu_destroy(vcpu);
2306 2307 2308
	return r;
}

A
Avi Kivity 已提交
2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
	if (sigset) {
		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
		vcpu->sigset_active = 1;
		vcpu->sigset = *sigset;
	} else
		vcpu->sigset_active = 0;
	return 0;
}

A
Avi Kivity 已提交
2320 2321
static long kvm_vcpu_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
A
Avi Kivity 已提交
2322
{
A
Avi Kivity 已提交
2323
	struct kvm_vcpu *vcpu = filp->private_data;
A
Al Viro 已提交
2324
	void __user *argp = (void __user *)arg;
2325
	int r;
2326 2327
	struct kvm_fpu *fpu = NULL;
	struct kvm_sregs *kvm_sregs = NULL;
A
Avi Kivity 已提交
2328

2329 2330
	if (vcpu->kvm->mm != current->mm)
		return -EIO;
2331

2332 2333 2334
	if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
		return -EINVAL;

2335
#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
2336 2337 2338 2339
	/*
	 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
	 * so vcpu_load() would break it.
	 */
2340
	if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT)
2341 2342 2343 2344
		return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
#endif


2345 2346 2347
	r = vcpu_load(vcpu);
	if (r)
		return r;
A
Avi Kivity 已提交
2348
	switch (ioctl) {
2349
	case KVM_RUN:
2350 2351 2352
		r = -EINVAL;
		if (arg)
			goto out;
2353 2354 2355 2356
		if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
			/* The thread running this VCPU changed. */
			struct pid *oldpid = vcpu->pid;
			struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
2357

2358 2359 2360 2361 2362
			rcu_assign_pointer(vcpu->pid, newpid);
			if (oldpid)
				synchronize_rcu();
			put_pid(oldpid);
		}
2363
		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
2364
		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
A
Avi Kivity 已提交
2365 2366
		break;
	case KVM_GET_REGS: {
2367
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
2368

2369 2370 2371
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
2372
			goto out;
2373 2374 2375
		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
		if (r)
			goto out_free1;
A
Avi Kivity 已提交
2376
		r = -EFAULT;
2377 2378
		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
			goto out_free1;
A
Avi Kivity 已提交
2379
		r = 0;
2380 2381
out_free1:
		kfree(kvm_regs);
A
Avi Kivity 已提交
2382 2383 2384
		break;
	}
	case KVM_SET_REGS: {
2385
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
2386

2387
		r = -ENOMEM;
2388 2389 2390
		kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
		if (IS_ERR(kvm_regs)) {
			r = PTR_ERR(kvm_regs);
A
Avi Kivity 已提交
2391
			goto out;
2392
		}
2393 2394
		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
		kfree(kvm_regs);
A
Avi Kivity 已提交
2395 2396 2397
		break;
	}
	case KVM_GET_SREGS: {
2398 2399 2400 2401 2402
		kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
2403 2404 2405
		if (r)
			goto out;
		r = -EFAULT;
2406
		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
2407 2408 2409 2410 2411
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_SREGS: {
2412 2413 2414
		kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
		if (IS_ERR(kvm_sregs)) {
			r = PTR_ERR(kvm_sregs);
G
Guo Chao 已提交
2415
			kvm_sregs = NULL;
A
Avi Kivity 已提交
2416
			goto out;
2417
		}
2418
		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
2419 2420
		break;
	}
2421 2422 2423 2424 2425 2426 2427
	case KVM_GET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = -EFAULT;
2428
		if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
2429 2430 2431 2432 2433 2434 2435 2436
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = -EFAULT;
2437
		if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
2438 2439 2440 2441
			goto out;
		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
		break;
	}
A
Avi Kivity 已提交
2442 2443 2444 2445
	case KVM_TRANSLATE: {
		struct kvm_translation tr;

		r = -EFAULT;
2446
		if (copy_from_user(&tr, argp, sizeof(tr)))
A
Avi Kivity 已提交
2447
			goto out;
2448
		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
A
Avi Kivity 已提交
2449 2450 2451
		if (r)
			goto out;
		r = -EFAULT;
2452
		if (copy_to_user(argp, &tr, sizeof(tr)))
A
Avi Kivity 已提交
2453 2454 2455 2456
			goto out;
		r = 0;
		break;
	}
J
Jan Kiszka 已提交
2457 2458
	case KVM_SET_GUEST_DEBUG: {
		struct kvm_guest_debug dbg;
A
Avi Kivity 已提交
2459 2460

		r = -EFAULT;
2461
		if (copy_from_user(&dbg, argp, sizeof(dbg)))
A
Avi Kivity 已提交
2462
			goto out;
J
Jan Kiszka 已提交
2463
		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
A
Avi Kivity 已提交
2464 2465
		break;
	}
A
Avi Kivity 已提交
2466 2467 2468 2469 2470 2471 2472 2473 2474
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		sigset_t sigset, *p;

		p = NULL;
		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
2475
					   sizeof(kvm_sigmask)))
A
Avi Kivity 已提交
2476 2477
				goto out;
			r = -EINVAL;
2478
			if (kvm_sigmask.len != sizeof(sigset))
A
Avi Kivity 已提交
2479 2480 2481
				goto out;
			r = -EFAULT;
			if (copy_from_user(&sigset, sigmask_arg->sigset,
2482
					   sizeof(sigset)))
A
Avi Kivity 已提交
2483 2484 2485
				goto out;
			p = &sigset;
		}
2486
		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
A
Avi Kivity 已提交
2487 2488
		break;
	}
A
Avi Kivity 已提交
2489
	case KVM_GET_FPU: {
2490 2491 2492 2493 2494
		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
A
Avi Kivity 已提交
2495 2496 2497
		if (r)
			goto out;
		r = -EFAULT;
2498
		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
2499 2500 2501 2502 2503
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_FPU: {
2504 2505 2506
		fpu = memdup_user(argp, sizeof(*fpu));
		if (IS_ERR(fpu)) {
			r = PTR_ERR(fpu);
G
Guo Chao 已提交
2507
			fpu = NULL;
A
Avi Kivity 已提交
2508
			goto out;
2509
		}
2510
		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
A
Avi Kivity 已提交
2511 2512
		break;
	}
A
Avi Kivity 已提交
2513
	default:
2514
		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
2515 2516
	}
out:
2517
	vcpu_put(vcpu);
2518 2519
	kfree(fpu);
	kfree(kvm_sregs);
A
Avi Kivity 已提交
2520 2521 2522
	return r;
}

2523
#ifdef CONFIG_KVM_COMPAT
2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543
static long kvm_vcpu_compat_ioctl(struct file *filp,
				  unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = compat_ptr(arg);
	int r;

	if (vcpu->kvm->mm != current->mm)
		return -EIO;

	switch (ioctl) {
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		compat_sigset_t csigset;
		sigset_t sigset;

		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
2544
					   sizeof(kvm_sigmask)))
2545 2546
				goto out;
			r = -EINVAL;
2547
			if (kvm_sigmask.len != sizeof(csigset))
2548 2549 2550
				goto out;
			r = -EFAULT;
			if (copy_from_user(&csigset, sigmask_arg->sigset,
2551
					   sizeof(csigset)))
2552
				goto out;
2553 2554 2555 2556
			sigset_from_compat(&sigset, &csigset);
			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
		} else
			r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567
		break;
	}
	default:
		r = kvm_vcpu_ioctl(filp, ioctl, arg);
	}

out:
	return r;
}
#endif

S
Scott Wood 已提交
2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
static int kvm_device_ioctl_attr(struct kvm_device *dev,
				 int (*accessor)(struct kvm_device *dev,
						 struct kvm_device_attr *attr),
				 unsigned long arg)
{
	struct kvm_device_attr attr;

	if (!accessor)
		return -EPERM;

	if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
		return -EFAULT;

	return accessor(dev, &attr);
}

static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
			     unsigned long arg)
{
	struct kvm_device *dev = filp->private_data;

	switch (ioctl) {
	case KVM_SET_DEVICE_ATTR:
		return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
	case KVM_GET_DEVICE_ATTR:
		return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
	case KVM_HAS_DEVICE_ATTR:
		return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
	default:
		if (dev->ops->ioctl)
			return dev->ops->ioctl(dev, ioctl, arg);

		return -ENOTTY;
	}
}

static int kvm_device_release(struct inode *inode, struct file *filp)
{
	struct kvm_device *dev = filp->private_data;
	struct kvm *kvm = dev->kvm;

	kvm_put_kvm(kvm);
	return 0;
}

static const struct file_operations kvm_device_fops = {
	.unlocked_ioctl = kvm_device_ioctl,
2615
#ifdef CONFIG_KVM_COMPAT
2616 2617
	.compat_ioctl = kvm_device_ioctl,
#endif
S
Scott Wood 已提交
2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628
	.release = kvm_device_release,
};

struct kvm_device *kvm_device_from_filp(struct file *filp)
{
	if (filp->f_op != &kvm_device_fops)
		return NULL;

	return filp->private_data;
}

2629
static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
2630
#ifdef CONFIG_KVM_MPIC
2631 2632
	[KVM_DEV_TYPE_FSL_MPIC_20]	= &kvm_mpic_ops,
	[KVM_DEV_TYPE_FSL_MPIC_42]	= &kvm_mpic_ops,
2633
#endif
2634

2635
#ifdef CONFIG_KVM_XICS
2636
	[KVM_DEV_TYPE_XICS]		= &kvm_xics_ops,
A
Alex Williamson 已提交
2637
#endif
2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651
};

int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
{
	if (type >= ARRAY_SIZE(kvm_device_ops_table))
		return -ENOSPC;

	if (kvm_device_ops_table[type] != NULL)
		return -EEXIST;

	kvm_device_ops_table[type] = ops;
	return 0;
}

2652 2653 2654 2655 2656 2657
void kvm_unregister_device_ops(u32 type)
{
	if (kvm_device_ops_table[type] != NULL)
		kvm_device_ops_table[type] = NULL;
}

S
Scott Wood 已提交
2658 2659 2660 2661 2662 2663 2664 2665
static int kvm_ioctl_create_device(struct kvm *kvm,
				   struct kvm_create_device *cd)
{
	struct kvm_device_ops *ops = NULL;
	struct kvm_device *dev;
	bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
	int ret;

2666 2667 2668 2669 2670
	if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
		return -ENODEV;

	ops = kvm_device_ops_table[cd->type];
	if (ops == NULL)
S
Scott Wood 已提交
2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
		return -ENODEV;

	if (test)
		return 0;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;

	dev->ops = ops;
	dev->kvm = kvm;

	ret = ops->create(dev, cd->type);
	if (ret < 0) {
		kfree(dev);
		return ret;
	}

2689
	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
S
Scott Wood 已提交
2690 2691 2692 2693 2694
	if (ret < 0) {
		ops->destroy(dev);
		return ret;
	}

2695
	list_add(&dev->vm_node, &kvm->devices);
S
Scott Wood 已提交
2696 2697 2698 2699 2700
	kvm_get_kvm(kvm);
	cd->fd = ret;
	return 0;
}

2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
{
	switch (arg) {
	case KVM_CAP_USER_MEMORY:
	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
	case KVM_CAP_INTERNAL_ERROR_DATA:
#ifdef CONFIG_HAVE_KVM_MSI
	case KVM_CAP_SIGNAL_MSI:
#endif
2711
#ifdef CONFIG_HAVE_KVM_IRQFD
2712
	case KVM_CAP_IRQFD:
2713 2714
	case KVM_CAP_IRQFD_RESAMPLE:
#endif
2715
	case KVM_CAP_IOEVENTFD_ANY_LENGTH:
2716 2717 2718 2719 2720
	case KVM_CAP_CHECK_EXTENSION_VM:
		return 1;
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
	case KVM_CAP_IRQ_ROUTING:
		return KVM_MAX_IRQ_ROUTES;
2721 2722 2723 2724
#endif
#if KVM_ADDRESS_SPACE_NUM > 1
	case KVM_CAP_MULTI_ADDRESS_SPACE:
		return KVM_ADDRESS_SPACE_NUM;
2725 2726 2727 2728 2729 2730 2731
#endif
	default:
		break;
	}
	return kvm_vm_ioctl_check_extension(kvm, arg);
}

A
Avi Kivity 已提交
2732 2733 2734 2735 2736
static long kvm_vm_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
2737
	int r;
A
Avi Kivity 已提交
2738

2739 2740
	if (kvm->mm != current->mm)
		return -EIO;
A
Avi Kivity 已提交
2741 2742 2743 2744
	switch (ioctl) {
	case KVM_CREATE_VCPU:
		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
		break;
2745 2746 2747 2748 2749
	case KVM_SET_USER_MEMORY_REGION: {
		struct kvm_userspace_memory_region kvm_userspace_mem;

		r = -EFAULT;
		if (copy_from_user(&kvm_userspace_mem, argp,
2750
						sizeof(kvm_userspace_mem)))
2751 2752
			goto out;

2753
		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
A
Avi Kivity 已提交
2754 2755 2756 2757 2758 2759
		break;
	}
	case KVM_GET_DIRTY_LOG: {
		struct kvm_dirty_log log;

		r = -EFAULT;
2760
		if (copy_from_user(&log, argp, sizeof(log)))
A
Avi Kivity 已提交
2761
			goto out;
2762
		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
A
Avi Kivity 已提交
2763 2764
		break;
	}
2765 2766 2767
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	case KVM_REGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
2768

2769
		r = -EFAULT;
2770
		if (copy_from_user(&zone, argp, sizeof(zone)))
2771 2772 2773 2774 2775 2776
			goto out;
		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
		break;
	}
	case KVM_UNREGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
2777

2778
		r = -EFAULT;
2779
		if (copy_from_user(&zone, argp, sizeof(zone)))
2780 2781 2782 2783 2784
			goto out;
		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
		break;
	}
#endif
G
Gregory Haskins 已提交
2785 2786 2787 2788
	case KVM_IRQFD: {
		struct kvm_irqfd data;

		r = -EFAULT;
2789
		if (copy_from_user(&data, argp, sizeof(data)))
G
Gregory Haskins 已提交
2790
			goto out;
2791
		r = kvm_irqfd(kvm, &data);
G
Gregory Haskins 已提交
2792 2793
		break;
	}
G
Gregory Haskins 已提交
2794 2795 2796 2797
	case KVM_IOEVENTFD: {
		struct kvm_ioeventfd data;

		r = -EFAULT;
2798
		if (copy_from_user(&data, argp, sizeof(data)))
G
Gregory Haskins 已提交
2799 2800 2801 2802
			goto out;
		r = kvm_ioeventfd(kvm, &data);
		break;
	}
2803 2804 2805 2806 2807
#ifdef CONFIG_HAVE_KVM_MSI
	case KVM_SIGNAL_MSI: {
		struct kvm_msi msi;

		r = -EFAULT;
2808
		if (copy_from_user(&msi, argp, sizeof(msi)))
2809 2810 2811 2812
			goto out;
		r = kvm_send_userspace_msi(kvm, &msi);
		break;
	}
2813 2814 2815 2816 2817 2818 2819
#endif
#ifdef __KVM_HAVE_IRQ_LINE
	case KVM_IRQ_LINE_STATUS:
	case KVM_IRQ_LINE: {
		struct kvm_irq_level irq_event;

		r = -EFAULT;
2820
		if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
2821 2822
			goto out;

2823 2824
		r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
					ioctl == KVM_IRQ_LINE_STATUS);
2825 2826 2827 2828 2829
		if (r)
			goto out;

		r = -EFAULT;
		if (ioctl == KVM_IRQ_LINE_STATUS) {
2830
			if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
2831 2832 2833 2834 2835 2836
				goto out;
		}

		r = 0;
		break;
	}
2837
#endif
2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
	case KVM_SET_GSI_ROUTING: {
		struct kvm_irq_routing routing;
		struct kvm_irq_routing __user *urouting;
		struct kvm_irq_routing_entry *entries;

		r = -EFAULT;
		if (copy_from_user(&routing, argp, sizeof(routing)))
			goto out;
		r = -EINVAL;
		if (routing.nr >= KVM_MAX_IRQ_ROUTES)
			goto out;
		if (routing.flags)
			goto out;
		r = -ENOMEM;
		entries = vmalloc(routing.nr * sizeof(*entries));
		if (!entries)
			goto out;
		r = -EFAULT;
		urouting = argp;
		if (copy_from_user(entries, urouting->entries,
				   routing.nr * sizeof(*entries)))
			goto out_free_irq_routing;
		r = kvm_set_irq_routing(kvm, entries, routing.nr,
					routing.flags);
2863
out_free_irq_routing:
2864 2865 2866 2867
		vfree(entries);
		break;
	}
#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
S
Scott Wood 已提交
2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885
	case KVM_CREATE_DEVICE: {
		struct kvm_create_device cd;

		r = -EFAULT;
		if (copy_from_user(&cd, argp, sizeof(cd)))
			goto out;

		r = kvm_ioctl_create_device(kvm, &cd);
		if (r)
			goto out;

		r = -EFAULT;
		if (copy_to_user(argp, &cd, sizeof(cd)))
			goto out;

		r = 0;
		break;
	}
2886 2887 2888
	case KVM_CHECK_EXTENSION:
		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
		break;
2889
	default:
2890
		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2891 2892 2893 2894 2895
	}
out:
	return r;
}

2896
#ifdef CONFIG_KVM_COMPAT
2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939
struct compat_kvm_dirty_log {
	__u32 slot;
	__u32 padding1;
	union {
		compat_uptr_t dirty_bitmap; /* one bit per page */
		__u64 padding2;
	};
};

static long kvm_vm_compat_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	int r;

	if (kvm->mm != current->mm)
		return -EIO;
	switch (ioctl) {
	case KVM_GET_DIRTY_LOG: {
		struct compat_kvm_dirty_log compat_log;
		struct kvm_dirty_log log;

		r = -EFAULT;
		if (copy_from_user(&compat_log, (void __user *)arg,
				   sizeof(compat_log)))
			goto out;
		log.slot	 = compat_log.slot;
		log.padding1	 = compat_log.padding1;
		log.padding2	 = compat_log.padding2;
		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);

		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
		break;
	}
	default:
		r = kvm_vm_ioctl(filp, ioctl, arg);
	}

out:
	return r;
}
#endif

2940
static struct file_operations kvm_vm_fops = {
2941 2942
	.release        = kvm_vm_release,
	.unlocked_ioctl = kvm_vm_ioctl,
2943
#ifdef CONFIG_KVM_COMPAT
2944 2945
	.compat_ioctl   = kvm_vm_compat_ioctl,
#endif
2946
	.llseek		= noop_llseek,
2947 2948
};

2949
static int kvm_dev_ioctl_create_vm(unsigned long type)
2950
{
2951
	int r;
2952 2953
	struct kvm *kvm;

2954
	kvm = kvm_create_vm(type);
2955 2956
	if (IS_ERR(kvm))
		return PTR_ERR(kvm);
2957 2958 2959 2960 2961 2962 2963
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	r = kvm_coalesced_mmio_init(kvm);
	if (r < 0) {
		kvm_put_kvm(kvm);
		return r;
	}
#endif
2964
	r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC);
2965
	if (r < 0)
A
Al Viro 已提交
2966
		kvm_put_kvm(kvm);
2967

2968
	return r;
2969 2970 2971 2972 2973
}

static long kvm_dev_ioctl(struct file *filp,
			  unsigned int ioctl, unsigned long arg)
{
2974
	long r = -EINVAL;
2975 2976 2977

	switch (ioctl) {
	case KVM_GET_API_VERSION:
2978 2979
		if (arg)
			goto out;
2980 2981 2982
		r = KVM_API_VERSION;
		break;
	case KVM_CREATE_VM:
2983
		r = kvm_dev_ioctl_create_vm(arg);
2984
		break;
2985
	case KVM_CHECK_EXTENSION:
2986
		r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
2987
		break;
2988 2989 2990
	case KVM_GET_VCPU_MMAP_SIZE:
		if (arg)
			goto out;
2991 2992 2993
		r = PAGE_SIZE;     /* struct kvm_run */
#ifdef CONFIG_X86
		r += PAGE_SIZE;    /* pio data page */
2994 2995 2996
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
		r += PAGE_SIZE;    /* coalesced mmio ring page */
2997
#endif
2998
		break;
2999 3000 3001
	case KVM_TRACE_ENABLE:
	case KVM_TRACE_PAUSE:
	case KVM_TRACE_DISABLE:
3002
		r = -EOPNOTSUPP;
3003
		break;
A
Avi Kivity 已提交
3004
	default:
3005
		return kvm_arch_dev_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
3006 3007 3008 3009 3010 3011 3012 3013
	}
out:
	return r;
}

static struct file_operations kvm_chardev_ops = {
	.unlocked_ioctl = kvm_dev_ioctl,
	.compat_ioctl   = kvm_dev_ioctl,
3014
	.llseek		= noop_llseek,
A
Avi Kivity 已提交
3015 3016 3017
};

static struct miscdevice kvm_dev = {
A
Avi Kivity 已提交
3018
	KVM_MINOR,
A
Avi Kivity 已提交
3019 3020 3021 3022
	"kvm",
	&kvm_chardev_ops,
};

3023
static void hardware_enable_nolock(void *junk)
3024 3025
{
	int cpu = raw_smp_processor_id();
3026
	int r;
3027

3028
	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
3029
		return;
3030

3031
	cpumask_set_cpu(cpu, cpus_hardware_enabled);
3032

3033
	r = kvm_arch_hardware_enable();
3034 3035 3036 3037

	if (r) {
		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
		atomic_inc(&hardware_enable_failed);
X
Xiubo Li 已提交
3038
		pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
3039
	}
3040 3041
}

3042
static void hardware_enable(void)
3043
{
3044
	raw_spin_lock(&kvm_count_lock);
3045 3046
	if (kvm_usage_count)
		hardware_enable_nolock(NULL);
3047
	raw_spin_unlock(&kvm_count_lock);
3048 3049 3050
}

static void hardware_disable_nolock(void *junk)
3051 3052 3053
{
	int cpu = raw_smp_processor_id();

3054
	if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
3055
		return;
3056
	cpumask_clear_cpu(cpu, cpus_hardware_enabled);
3057
	kvm_arch_hardware_disable();
3058 3059
}

3060
static void hardware_disable(void)
3061
{
3062
	raw_spin_lock(&kvm_count_lock);
3063 3064
	if (kvm_usage_count)
		hardware_disable_nolock(NULL);
3065
	raw_spin_unlock(&kvm_count_lock);
3066 3067
}

3068 3069 3070 3071 3072 3073
static void hardware_disable_all_nolock(void)
{
	BUG_ON(!kvm_usage_count);

	kvm_usage_count--;
	if (!kvm_usage_count)
3074
		on_each_cpu(hardware_disable_nolock, NULL, 1);
3075 3076 3077 3078
}

static void hardware_disable_all(void)
{
3079
	raw_spin_lock(&kvm_count_lock);
3080
	hardware_disable_all_nolock();
3081
	raw_spin_unlock(&kvm_count_lock);
3082 3083 3084 3085 3086 3087
}

static int hardware_enable_all(void)
{
	int r = 0;

3088
	raw_spin_lock(&kvm_count_lock);
3089 3090 3091 3092

	kvm_usage_count++;
	if (kvm_usage_count == 1) {
		atomic_set(&hardware_enable_failed, 0);
3093
		on_each_cpu(hardware_enable_nolock, NULL, 1);
3094 3095 3096 3097 3098 3099 3100

		if (atomic_read(&hardware_enable_failed)) {
			hardware_disable_all_nolock();
			r = -EBUSY;
		}
	}

3101
	raw_spin_unlock(&kvm_count_lock);
3102 3103 3104 3105

	return r;
}

A
Avi Kivity 已提交
3106 3107 3108
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
			   void *v)
{
3109
	val &= ~CPU_TASKS_FROZEN;
A
Avi Kivity 已提交
3110
	switch (val) {
3111
	case CPU_DYING:
3112
		hardware_disable();
3113
		break;
3114
	case CPU_STARTING:
3115
		hardware_enable();
A
Avi Kivity 已提交
3116 3117 3118 3119 3120
		break;
	}
	return NOTIFY_OK;
}

3121
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
M
Mike Day 已提交
3122
		      void *v)
3123
{
3124 3125 3126 3127 3128 3129
	/*
	 * Some (well, at least mine) BIOSes hang on reboot if
	 * in vmx root mode.
	 *
	 * And Intel TXT required VMX off for all cpu when system shutdown.
	 */
X
Xiubo Li 已提交
3130
	pr_info("kvm: exiting hardware virtualization\n");
3131
	kvm_rebooting = true;
3132
	on_each_cpu(hardware_disable_nolock, NULL, 1);
3133 3134 3135 3136 3137 3138 3139 3140
	return NOTIFY_OK;
}

static struct notifier_block kvm_reboot_notifier = {
	.notifier_call = kvm_reboot,
	.priority = 0,
};

M
Marcelo Tosatti 已提交
3141
static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3142 3143 3144 3145
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
3146
		struct kvm_io_device *pos = bus->range[i].dev;
3147 3148 3149

		kvm_iodevice_destructor(pos);
	}
M
Marcelo Tosatti 已提交
3150
	kfree(bus);
3151 3152
}

3153
static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
X
Xiubo Li 已提交
3154
				 const struct kvm_io_range *r2)
3155
{
J
Jason Wang 已提交
3156 3157 3158 3159
	gpa_t addr1 = r1->addr;
	gpa_t addr2 = r2->addr;

	if (addr1 < addr2)
3160
		return -1;
J
Jason Wang 已提交
3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172

	/* If r2->len == 0, match the exact address.  If r2->len != 0,
	 * accept any overlapping write.  Any order is acceptable for
	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
	 * we process all of them.
	 */
	if (r2->len) {
		addr1 += r1->len;
		addr2 += r2->len;
	}

	if (addr1 > addr2)
3173
		return 1;
J
Jason Wang 已提交
3174

3175 3176 3177
	return 0;
}

3178 3179
static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
{
3180
	return kvm_io_bus_cmp(p1, p2);
3181 3182
}

G
Geoff Levand 已提交
3183
static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197
			  gpa_t addr, int len)
{
	bus->range[bus->dev_count++] = (struct kvm_io_range) {
		.addr = addr,
		.len = len,
		.dev = dev,
	};

	sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
		kvm_io_bus_sort_cmp, NULL);

	return 0;
}

G
Geoff Levand 已提交
3198
static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215
			     gpa_t addr, int len)
{
	struct kvm_io_range *range, key;
	int off;

	key = (struct kvm_io_range) {
		.addr = addr,
		.len = len,
	};

	range = bsearch(&key, bus->range, bus->dev_count,
			sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
	if (range == NULL)
		return -ENOENT;

	off = range - bus->range;

3216
	while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
3217 3218 3219 3220 3221
		off--;

	return off;
}

3222
static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
C
Cornelia Huck 已提交
3223 3224 3225 3226 3227 3228 3229 3230 3231
			      struct kvm_io_range *range, const void *val)
{
	int idx;

	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
	if (idx < 0)
		return -EOPNOTSUPP;

	while (idx < bus->dev_count &&
3232
		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3233
		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
C
Cornelia Huck 已提交
3234 3235 3236 3237 3238 3239 3240 3241
					range->len, val))
			return idx;
		idx++;
	}

	return -EOPNOTSUPP;
}

3242
/* kvm_io_bus_write - called under kvm->slots_lock */
3243
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3244
		     int len, const void *val)
3245
{
3246
	struct kvm_io_bus *bus;
3247
	struct kvm_io_range range;
C
Cornelia Huck 已提交
3248
	int r;
3249 3250 3251 3252 3253

	range = (struct kvm_io_range) {
		.addr = addr,
		.len = len,
	};
3254

3255 3256
	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
	r = __kvm_io_bus_write(vcpu, bus, &range, val);
C
Cornelia Huck 已提交
3257 3258 3259 3260
	return r < 0 ? r : 0;
}

/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
3261 3262
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
			    gpa_t addr, int len, const void *val, long cookie)
C
Cornelia Huck 已提交
3263 3264 3265 3266 3267 3268 3269 3270 3271
{
	struct kvm_io_bus *bus;
	struct kvm_io_range range;

	range = (struct kvm_io_range) {
		.addr = addr,
		.len = len,
	};

3272
	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
C
Cornelia Huck 已提交
3273 3274 3275

	/* First try the device referenced by cookie. */
	if ((cookie >= 0) && (cookie < bus->dev_count) &&
3276
	    (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
3277
		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
C
Cornelia Huck 已提交
3278 3279 3280 3281 3282 3283 3284
					val))
			return cookie;

	/*
	 * cookie contained garbage; fall back to search and return the
	 * correct cookie value.
	 */
3285
	return __kvm_io_bus_write(vcpu, bus, &range, val);
C
Cornelia Huck 已提交
3286 3287
}

3288 3289
static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
			     struct kvm_io_range *range, void *val)
C
Cornelia Huck 已提交
3290 3291 3292 3293
{
	int idx;

	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
3294 3295 3296 3297
	if (idx < 0)
		return -EOPNOTSUPP;

	while (idx < bus->dev_count &&
3298
		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3299
		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
C
Cornelia Huck 已提交
3300 3301
				       range->len, val))
			return idx;
3302 3303 3304
		idx++;
	}

3305 3306
	return -EOPNOTSUPP;
}
3307
EXPORT_SYMBOL_GPL(kvm_io_bus_write);
3308

3309
/* kvm_io_bus_read - called under kvm->slots_lock */
3310
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
M
Marcelo Tosatti 已提交
3311
		    int len, void *val)
3312
{
3313
	struct kvm_io_bus *bus;
3314
	struct kvm_io_range range;
C
Cornelia Huck 已提交
3315
	int r;
3316 3317 3318 3319 3320

	range = (struct kvm_io_range) {
		.addr = addr,
		.len = len,
	};
M
Marcelo Tosatti 已提交
3321

3322 3323
	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
	r = __kvm_io_bus_read(vcpu, bus, &range, val);
C
Cornelia Huck 已提交
3324 3325
	return r < 0 ? r : 0;
}
3326

3327

3328
/* Caller must hold slots_lock. */
3329 3330
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
			    int len, struct kvm_io_device *dev)
3331
{
M
Marcelo Tosatti 已提交
3332
	struct kvm_io_bus *new_bus, *bus;
3333

M
Marcelo Tosatti 已提交
3334
	bus = kvm->buses[bus_idx];
3335 3336
	/* exclude ioeventfd which is limited by maximum fd */
	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3337
		return -ENOSPC;
3338

3339
	new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) *
3340
			  sizeof(struct kvm_io_range)), GFP_KERNEL);
M
Marcelo Tosatti 已提交
3341 3342
	if (!new_bus)
		return -ENOMEM;
3343 3344
	memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
	       sizeof(struct kvm_io_range)));
3345
	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
M
Marcelo Tosatti 已提交
3346 3347 3348
	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
	synchronize_srcu_expedited(&kvm->srcu);
	kfree(bus);
3349 3350 3351 3352

	return 0;
}

3353
/* Caller must hold slots_lock. */
M
Marcelo Tosatti 已提交
3354 3355
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			      struct kvm_io_device *dev)
3356
{
M
Marcelo Tosatti 已提交
3357 3358
	int i, r;
	struct kvm_io_bus *new_bus, *bus;
3359

3360
	bus = kvm->buses[bus_idx];
M
Marcelo Tosatti 已提交
3361
	r = -ENOENT;
3362 3363
	for (i = 0; i < bus->dev_count; i++)
		if (bus->range[i].dev == dev) {
M
Marcelo Tosatti 已提交
3364
			r = 0;
3365 3366
			break;
		}
M
Marcelo Tosatti 已提交
3367

3368
	if (r)
M
Marcelo Tosatti 已提交
3369
		return r;
3370

3371
	new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
3372 3373 3374 3375 3376 3377 3378 3379
			  sizeof(struct kvm_io_range)), GFP_KERNEL);
	if (!new_bus)
		return -ENOMEM;

	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
	new_bus->dev_count--;
	memcpy(new_bus->range + i, bus->range + i + 1,
	       (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
M
Marcelo Tosatti 已提交
3380 3381 3382 3383 3384

	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
	synchronize_srcu_expedited(&kvm->srcu);
	kfree(bus);
	return r;
3385 3386
}

A
Avi Kivity 已提交
3387 3388 3389 3390
static struct notifier_block kvm_cpu_notifier = {
	.notifier_call = kvm_cpu_hotplug,
};

3391
static int vm_stat_get(void *_offset, u64 *val)
3392 3393 3394 3395
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;

3396
	*val = 0;
3397
	spin_lock(&kvm_lock);
3398
	list_for_each_entry(kvm, &vm_list, vm_list)
3399
		*val += *(u32 *)((void *)kvm + offset);
3400
	spin_unlock(&kvm_lock);
3401
	return 0;
3402 3403 3404 3405
}

DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");

3406
static int vcpu_stat_get(void *_offset, u64 *val)
A
Avi Kivity 已提交
3407 3408 3409 3410 3411 3412
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;

3413
	*val = 0;
3414
	spin_lock(&kvm_lock);
A
Avi Kivity 已提交
3415
	list_for_each_entry(kvm, &vm_list, vm_list)
3416 3417 3418
		kvm_for_each_vcpu(i, vcpu, kvm)
			*val += *(u32 *)((void *)vcpu + offset);

3419
	spin_unlock(&kvm_lock);
3420
	return 0;
A
Avi Kivity 已提交
3421 3422
}

3423 3424
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");

3425
static const struct file_operations *stat_fops[] = {
3426 3427 3428
	[KVM_STAT_VCPU] = &vcpu_stat_fops,
	[KVM_STAT_VM]   = &vm_stat_fops,
};
A
Avi Kivity 已提交
3429

3430
static int kvm_init_debug(void)
A
Avi Kivity 已提交
3431
{
3432
	int r = -EEXIST;
A
Avi Kivity 已提交
3433 3434
	struct kvm_stats_debugfs_item *p;

3435
	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
3436 3437 3438 3439
	if (kvm_debugfs_dir == NULL)
		goto out;

	for (p = debugfs_entries; p->name; ++p) {
3440 3441 3442
		if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
					 (void *)(long)p->offset,
					 stat_fops[p->kind]))
3443 3444 3445 3446 3447 3448 3449 3450 3451
			goto out_dir;
	}

	return 0;

out_dir:
	debugfs_remove_recursive(kvm_debugfs_dir);
out:
	return r;
A
Avi Kivity 已提交
3452 3453
}

3454
static int kvm_suspend(void)
3455
{
3456
	if (kvm_usage_count)
3457
		hardware_disable_nolock(NULL);
3458 3459 3460
	return 0;
}

3461
static void kvm_resume(void)
3462
{
3463
	if (kvm_usage_count) {
3464
		WARN_ON(raw_spin_is_locked(&kvm_count_lock));
3465
		hardware_enable_nolock(NULL);
3466
	}
3467 3468
}

3469
static struct syscore_ops kvm_syscore_ops = {
3470 3471 3472 3473
	.suspend = kvm_suspend,
	.resume = kvm_resume,
};

3474 3475 3476 3477 3478 3479 3480 3481 3482
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
	return container_of(pn, struct kvm_vcpu, preempt_notifier);
}

static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3483

3484 3485
	if (vcpu->preempted)
		vcpu->preempted = false;
3486

R
Radim Krčmář 已提交
3487 3488
	kvm_arch_sched_in(vcpu, cpu);

3489
	kvm_arch_vcpu_load(vcpu, cpu);
3490 3491 3492 3493 3494 3495 3496
}

static void kvm_sched_out(struct preempt_notifier *pn,
			  struct task_struct *next)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

3497 3498
	if (current->state == TASK_RUNNING)
		vcpu->preempted = true;
3499
	kvm_arch_vcpu_put(vcpu);
3500 3501
}

3502
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3503
		  struct module *module)
A
Avi Kivity 已提交
3504 3505
{
	int r;
Y
Yang, Sheng 已提交
3506
	int cpu;
A
Avi Kivity 已提交
3507

3508 3509
	r = kvm_arch_init(opaque);
	if (r)
3510
		goto out_fail;
3511

3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522
	/*
	 * kvm_arch_init makes sure there's at most one caller
	 * for architectures that support multiple implementations,
	 * like intel and amd on x86.
	 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
	 * conflicts in case kvm is already setup for another implementation.
	 */
	r = kvm_irqfd_init();
	if (r)
		goto out_irqfd;

3523
	if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
3524 3525 3526 3527
		r = -ENOMEM;
		goto out_free_0;
	}

3528
	r = kvm_arch_hardware_setup();
A
Avi Kivity 已提交
3529
	if (r < 0)
3530
		goto out_free_0a;
A
Avi Kivity 已提交
3531

Y
Yang, Sheng 已提交
3532 3533
	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu,
3534
				kvm_arch_check_processor_compat,
3535
				&r, 1);
Y
Yang, Sheng 已提交
3536
		if (r < 0)
3537
			goto out_free_1;
Y
Yang, Sheng 已提交
3538 3539
	}

A
Avi Kivity 已提交
3540 3541
	r = register_cpu_notifier(&kvm_cpu_notifier);
	if (r)
3542
		goto out_free_2;
A
Avi Kivity 已提交
3543 3544
	register_reboot_notifier(&kvm_reboot_notifier);

3545
	/* A kmem cache lets us meet the alignment requirements of fx_save. */
3546 3547 3548
	if (!vcpu_align)
		vcpu_align = __alignof__(struct kvm_vcpu);
	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
J
Joe Perches 已提交
3549
					   0, NULL);
3550 3551
	if (!kvm_vcpu_cache) {
		r = -ENOMEM;
3552
		goto out_free_3;
3553 3554
	}

3555 3556 3557 3558
	r = kvm_async_pf_init();
	if (r)
		goto out_free;

A
Avi Kivity 已提交
3559
	kvm_chardev_ops.owner = module;
3560 3561
	kvm_vm_fops.owner = module;
	kvm_vcpu_fops.owner = module;
A
Avi Kivity 已提交
3562 3563 3564

	r = misc_register(&kvm_dev);
	if (r) {
X
Xiubo Li 已提交
3565
		pr_err("kvm: misc device register failed\n");
3566
		goto out_unreg;
A
Avi Kivity 已提交
3567 3568
	}

3569 3570
	register_syscore_ops(&kvm_syscore_ops);

3571 3572 3573
	kvm_preempt_ops.sched_in = kvm_sched_in;
	kvm_preempt_ops.sched_out = kvm_sched_out;

3574 3575
	r = kvm_init_debug();
	if (r) {
X
Xiubo Li 已提交
3576
		pr_err("kvm: create debugfs files failed\n");
3577 3578
		goto out_undebugfs;
	}
3579

P
Paolo Bonzini 已提交
3580 3581 3582
	r = kvm_vfio_ops_init();
	WARN_ON(r);

3583
	return 0;
A
Avi Kivity 已提交
3584

3585 3586
out_undebugfs:
	unregister_syscore_ops(&kvm_syscore_ops);
3587
	misc_deregister(&kvm_dev);
3588 3589
out_unreg:
	kvm_async_pf_deinit();
A
Avi Kivity 已提交
3590
out_free:
3591
	kmem_cache_destroy(kvm_vcpu_cache);
3592
out_free_3:
A
Avi Kivity 已提交
3593
	unregister_reboot_notifier(&kvm_reboot_notifier);
A
Avi Kivity 已提交
3594
	unregister_cpu_notifier(&kvm_cpu_notifier);
3595 3596
out_free_2:
out_free_1:
3597
	kvm_arch_hardware_unsetup();
3598 3599
out_free_0a:
	free_cpumask_var(cpus_hardware_enabled);
3600
out_free_0:
3601 3602
	kvm_irqfd_exit();
out_irqfd:
3603 3604
	kvm_arch_exit();
out_fail:
A
Avi Kivity 已提交
3605 3606
	return r;
}
3607
EXPORT_SYMBOL_GPL(kvm_init);
A
Avi Kivity 已提交
3608

3609
void kvm_exit(void)
A
Avi Kivity 已提交
3610
{
3611
	debugfs_remove_recursive(kvm_debugfs_dir);
A
Avi Kivity 已提交
3612
	misc_deregister(&kvm_dev);
3613
	kmem_cache_destroy(kvm_vcpu_cache);
3614
	kvm_async_pf_deinit();
3615
	unregister_syscore_ops(&kvm_syscore_ops);
A
Avi Kivity 已提交
3616
	unregister_reboot_notifier(&kvm_reboot_notifier);
3617
	unregister_cpu_notifier(&kvm_cpu_notifier);
3618
	on_each_cpu(hardware_disable_nolock, NULL, 1);
3619
	kvm_arch_hardware_unsetup();
3620
	kvm_arch_exit();
3621
	kvm_irqfd_exit();
3622
	free_cpumask_var(cpus_hardware_enabled);
3623
	kvm_vfio_ops_exit();
A
Avi Kivity 已提交
3624
}
3625
EXPORT_SYMBOL_GPL(kvm_exit);