kvm_main.c 84.2 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
8
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
9 10 11 12 13 14 15 16 17 18
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

19
#include <kvm/iodev.h>
A
Avi Kivity 已提交
20

21
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
22 23 24 25 26 27 28 29 30 31 32
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
33
#include <linux/syscore_ops.h>
A
Avi Kivity 已提交
34
#include <linux/cpu.h>
A
Alexey Dobriyan 已提交
35
#include <linux/sched.h>
36 37
#include <linux/cpumask.h>
#include <linux/smp.h>
38
#include <linux/anon_inodes.h>
39
#include <linux/profile.h>
40
#include <linux/kvm_para.h>
41
#include <linux/pagemap.h>
42
#include <linux/mman.h>
43
#include <linux/swap.h>
44
#include <linux/bitops.h>
45
#include <linux/spinlock.h>
46
#include <linux/compat.h>
47
#include <linux/srcu.h>
J
Joerg Roedel 已提交
48
#include <linux/hugetlb.h>
49
#include <linux/slab.h>
50 51
#include <linux/sort.h>
#include <linux/bsearch.h>
A
Avi Kivity 已提交
52

A
Avi Kivity 已提交
53 54
#include <asm/processor.h>
#include <asm/io.h>
55
#include <asm/ioctl.h>
A
Avi Kivity 已提交
56
#include <asm/uaccess.h>
57
#include <asm/pgtable.h>
A
Avi Kivity 已提交
58

59
#include "coalesced_mmio.h"
60
#include "async_pf.h"
P
Paolo Bonzini 已提交
61
#include "vfio.h"
62

63 64 65
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>

A
Avi Kivity 已提交
66 67 68
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

69 70
/* Architectures should define their poll value according to the halt latency */
static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
71 72
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);

W
Wanpeng Li 已提交
73 74 75 76 77 78 79 80
/* Default doubles per-vcpu halt_poll_ns. */
static unsigned int halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, int, S_IRUGO);

/* Default resets per-vcpu halt_poll_ns . */
static unsigned int halt_poll_ns_shrink;
module_param(halt_poll_ns_shrink, int, S_IRUGO);

81 82 83
/*
 * Ordering of locks:
 *
84
 *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock
85 86
 */

87
DEFINE_SPINLOCK(kvm_lock);
88
static DEFINE_RAW_SPINLOCK(kvm_count_lock);
89
LIST_HEAD(vm_list);
90

91
static cpumask_var_t cpus_hardware_enabled;
92
static int kvm_usage_count;
93
static atomic_t hardware_enable_failed;
94

95 96
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
A
Avi Kivity 已提交
97

98 99
static __read_mostly struct preempt_ops kvm_preempt_ops;

100
struct dentry *kvm_debugfs_dir;
101
EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
A
Avi Kivity 已提交
102

A
Avi Kivity 已提交
103 104
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);
105
#ifdef CONFIG_KVM_COMPAT
106 107 108
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
				  unsigned long arg);
#endif
109 110
static int hardware_enable_all(void);
static void hardware_disable_all(void);
A
Avi Kivity 已提交
111

M
Marcelo Tosatti 已提交
112
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
113 114

static void kvm_release_pfn_dirty(pfn_t pfn);
115
static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
M
Marcelo Tosatti 已提交
116

117
__visible bool kvm_rebooting;
118
EXPORT_SYMBOL_GPL(kvm_rebooting);
119

120 121
static bool largepages_enabled = true;

122
bool kvm_is_reserved_pfn(pfn_t pfn)
B
Ben-Ami Yassour 已提交
123
{
124
	if (pfn_valid(pfn))
125
		return PageReserved(pfn_to_page(pfn));
B
Ben-Ami Yassour 已提交
126 127 128 129

	return true;
}

A
Avi Kivity 已提交
130 131 132
/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
133
int vcpu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
134
{
135 136
	int cpu;

137 138
	if (mutex_lock_killable(&vcpu->mutex))
		return -EINTR;
139 140
	cpu = get_cpu();
	preempt_notifier_register(&vcpu->preempt_notifier);
141
	kvm_arch_vcpu_load(vcpu, cpu);
142
	put_cpu();
143
	return 0;
A
Avi Kivity 已提交
144 145
}

146
void vcpu_put(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
147
{
148
	preempt_disable();
149
	kvm_arch_vcpu_put(vcpu);
150 151
	preempt_notifier_unregister(&vcpu->preempt_notifier);
	preempt_enable();
A
Avi Kivity 已提交
152 153 154
	mutex_unlock(&vcpu->mutex);
}

155 156 157 158
static void ack_flush(void *_completed)
{
}

159
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
160
{
161
	int i, cpu, me;
162 163
	cpumask_var_t cpus;
	bool called = true;
164 165
	struct kvm_vcpu *vcpu;

166
	zalloc_cpumask_var(&cpus, GFP_ATOMIC);
167

168
	me = get_cpu();
169
	kvm_for_each_vcpu(i, vcpu, kvm) {
170
		kvm_make_request(req, vcpu);
171
		cpu = vcpu->cpu;
172 173 174 175 176 177

		/* Set ->requests bit before we read ->mode */
		smp_mb();

		if (cpus != NULL && cpu != -1 && cpu != me &&
		      kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
178
			cpumask_set_cpu(cpu, cpus);
179
	}
180 181 182 183 184 185
	if (unlikely(cpus == NULL))
		smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
	else if (!cpumask_empty(cpus))
		smp_call_function_many(cpus, ack_flush, NULL, 1);
	else
		called = false;
186
	put_cpu();
187
	free_cpumask_var(cpus);
188
	return called;
189 190
}

191
#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
192
void kvm_flush_remote_tlbs(struct kvm *kvm)
193
{
194 195 196
	long dirty_count = kvm->tlbs_dirty;

	smp_mb();
197
	if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
198
		++kvm->stat.remote_tlb_flush;
199
	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
200
}
201
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
202
#endif
203

204 205
void kvm_reload_remote_mmus(struct kvm *kvm)
{
206
	kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
207
}
208

209 210
void kvm_make_mclock_inprogress_request(struct kvm *kvm)
{
211
	kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
212 213
}

214
void kvm_make_scan_ioapic_request(struct kvm *kvm)
215
{
216
	kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
217 218
}

R
Rusty Russell 已提交
219 220 221 222 223 224 225 226 227
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
	struct page *page;
	int r;

	mutex_init(&vcpu->mutex);
	vcpu->cpu = -1;
	vcpu->kvm = kvm;
	vcpu->vcpu_id = id;
228
	vcpu->pid = NULL;
W
Wanpeng Li 已提交
229
	vcpu->halt_poll_ns = 0;
E
Eddie Dong 已提交
230
	init_waitqueue_head(&vcpu->wq);
231
	kvm_async_pf_vcpu_init(vcpu);
R
Rusty Russell 已提交
232

233 234 235
	vcpu->pre_pcpu = -1;
	INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);

R
Rusty Russell 已提交
236 237 238 239 240 241 242
	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
	vcpu->run = page_address(page);

243 244
	kvm_vcpu_set_in_spin_loop(vcpu, false);
	kvm_vcpu_set_dy_eligible(vcpu, false);
245
	vcpu->preempted = false;
246

247
	r = kvm_arch_vcpu_init(vcpu);
R
Rusty Russell 已提交
248
	if (r < 0)
249
		goto fail_free_run;
R
Rusty Russell 已提交
250 251 252 253 254
	return 0;

fail_free_run:
	free_page((unsigned long)vcpu->run);
fail:
255
	return r;
R
Rusty Russell 已提交
256 257 258 259 260
}
EXPORT_SYMBOL_GPL(kvm_vcpu_init);

void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
261
	put_pid(vcpu->pid);
262
	kvm_arch_vcpu_uninit(vcpu);
R
Rusty Russell 已提交
263 264 265 266
	free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);

267 268 269 270 271 272 273 274 275 276 277
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
	return container_of(mn, struct kvm, mmu_notifier);
}

static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
278
	int need_tlb_flush, idx;
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

	/*
	 * When ->invalidate_page runs, the linux pte has been zapped
	 * already but the page is still allocated until
	 * ->invalidate_page returns. So if we increase the sequence
	 * here the kvm page fault will notice if the spte can't be
	 * established because the page is going to be freed. If
	 * instead the kvm page fault establishes the spte before
	 * ->invalidate_page runs, kvm_unmap_hva will release it
	 * before returning.
	 *
	 * The sequence increase only need to be seen at spin_unlock
	 * time, and not at spin_lock time.
	 *
	 * Increasing the sequence after the spin_unlock would be
	 * unsafe because the kvm page fault could then establish the
	 * pte after kvm_unmap_hva returned, without noticing the page
	 * is going to be freed.
	 */
298
	idx = srcu_read_lock(&kvm->srcu);
299
	spin_lock(&kvm->mmu_lock);
300

301
	kvm->mmu_notifier_seq++;
302
	need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
303 304 305 306
	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);

307
	spin_unlock(&kvm->mmu_lock);
308 309 310

	kvm_arch_mmu_notifier_invalidate_page(kvm, address);

311
	srcu_read_unlock(&kvm->srcu, idx);
312 313
}

314 315 316 317 318 319
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
					struct mm_struct *mm,
					unsigned long address,
					pte_t pte)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
320
	int idx;
321

322
	idx = srcu_read_lock(&kvm->srcu);
323 324 325 326
	spin_lock(&kvm->mmu_lock);
	kvm->mmu_notifier_seq++;
	kvm_set_spte_hva(kvm, address, pte);
	spin_unlock(&kvm->mmu_lock);
327
	srcu_read_unlock(&kvm->srcu, idx);
328 329
}

330 331 332 333 334 335
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
						    struct mm_struct *mm,
						    unsigned long start,
						    unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
336
	int need_tlb_flush = 0, idx;
337

338
	idx = srcu_read_lock(&kvm->srcu);
339 340 341 342 343 344 345
	spin_lock(&kvm->mmu_lock);
	/*
	 * The count increase must become visible at unlock time as no
	 * spte can be established without taking the mmu_lock and
	 * count is also read inside the mmu_lock critical section.
	 */
	kvm->mmu_notifier_count++;
346
	need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
347
	need_tlb_flush |= kvm->tlbs_dirty;
348 349 350
	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);
351 352 353

	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
}

static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
						  struct mm_struct *mm,
						  unsigned long start,
						  unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);

	spin_lock(&kvm->mmu_lock);
	/*
	 * This sequence increase will notify the kvm page fault that
	 * the page that is going to be mapped in the spte could have
	 * been freed.
	 */
	kvm->mmu_notifier_seq++;
370
	smp_wmb();
371 372
	/*
	 * The above sequence increase must be visible before the
373 374
	 * below count decrease, which is ensured by the smp_wmb above
	 * in conjunction with the smp_rmb in mmu_notifier_retry().
375 376 377 378 379 380 381 382 383
	 */
	kvm->mmu_notifier_count--;
	spin_unlock(&kvm->mmu_lock);

	BUG_ON(kvm->mmu_notifier_count < 0);
}

static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
					      struct mm_struct *mm,
A
Andres Lagar-Cavilla 已提交
384 385
					      unsigned long start,
					      unsigned long end)
386 387
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
388
	int young, idx;
389

390
	idx = srcu_read_lock(&kvm->srcu);
391 392
	spin_lock(&kvm->mmu_lock);

A
Andres Lagar-Cavilla 已提交
393
	young = kvm_age_hva(kvm, start, end);
394 395 396
	if (young)
		kvm_flush_remote_tlbs(kvm);

397 398 399
	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);

400 401 402
	return young;
}

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
					struct mm_struct *mm,
					unsigned long start,
					unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int young, idx;

	idx = srcu_read_lock(&kvm->srcu);
	spin_lock(&kvm->mmu_lock);
	/*
	 * Even though we do not flush TLB, this will still adversely
	 * affect performance on pre-Haswell Intel EPT, where there is
	 * no EPT Access Bit to clear so that we have to tear down EPT
	 * tables instead. If we find this unacceptable, we can always
	 * add a parameter to kvm_age_hva so that it effectively doesn't
	 * do anything on clear_young.
	 *
	 * Also note that currently we never issue secondary TLB flushes
	 * from clear_young, leaving this job up to the regular system
	 * cadence. If we find this inaccurate, we might come up with a
	 * more sophisticated heuristic later.
	 */
	young = kvm_age_hva(kvm, start, end);
	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);

	return young;
}

A
Andrea Arcangeli 已提交
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
				       struct mm_struct *mm,
				       unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
	int young, idx;

	idx = srcu_read_lock(&kvm->srcu);
	spin_lock(&kvm->mmu_lock);
	young = kvm_test_age_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);

	return young;
}

449 450 451 452
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
				     struct mm_struct *mm)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
453 454 455
	int idx;

	idx = srcu_read_lock(&kvm->srcu);
456
	kvm_arch_flush_shadow_all(kvm);
457
	srcu_read_unlock(&kvm->srcu, idx);
458 459
}

460 461 462 463 464
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
	.invalidate_page	= kvm_mmu_notifier_invalidate_page,
	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
465
	.clear_young		= kvm_mmu_notifier_clear_young,
A
Andrea Arcangeli 已提交
466
	.test_young		= kvm_mmu_notifier_test_young,
467
	.change_pte		= kvm_mmu_notifier_change_pte,
468
	.release		= kvm_mmu_notifier_release,
469
};
470 471 472 473 474 475 476 477 478 479 480 481 482 483

static int kvm_init_mmu_notifier(struct kvm *kvm)
{
	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
}

#else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */

static int kvm_init_mmu_notifier(struct kvm *kvm)
{
	return 0;
}

484 485
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

486
static struct kvm_memslots *kvm_alloc_memslots(void)
487 488
{
	int i;
489
	struct kvm_memslots *slots;
490

491 492 493 494 495 496 497 498 499
	slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
	if (!slots)
		return NULL;

	/*
	 * Init kvm generation close to the maximum to easily test the
	 * code of handling generation number wrap-around.
	 */
	slots->generation = -150;
500
	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
501
		slots->id_to_index[i] = slots->memslots[i].id = i;
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539

	return slots;
}

static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{
	if (!memslot->dirty_bitmap)
		return;

	kvfree(memslot->dirty_bitmap);
	memslot->dirty_bitmap = NULL;
}

/*
 * Free any memory in @free but not in @dont.
 */
static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
			      struct kvm_memory_slot *dont)
{
	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
		kvm_destroy_dirty_bitmap(free);

	kvm_arch_free_memslot(kvm, free, dont);

	free->npages = 0;
}

static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
{
	struct kvm_memory_slot *memslot;

	if (!slots)
		return;

	kvm_for_each_memslot(memslot, slots)
		kvm_free_memslot(kvm, memslot, NULL);

	kvfree(slots);
540 541
}

542
static struct kvm *kvm_create_vm(unsigned long type)
A
Avi Kivity 已提交
543
{
544 545
	int r, i;
	struct kvm *kvm = kvm_arch_alloc_vm();
A
Avi Kivity 已提交
546

547 548 549
	if (!kvm)
		return ERR_PTR(-ENOMEM);

550
	r = kvm_arch_init_vm(kvm, type);
551
	if (r)
552
		goto out_err_no_disable;
553 554 555

	r = hardware_enable_all();
	if (r)
556
		goto out_err_no_disable;
557

558
#ifdef CONFIG_HAVE_KVM_IRQFD
559
	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
560
#endif
A
Avi Kivity 已提交
561

562 563
	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);

564
	r = -ENOMEM;
565 566 567 568 569
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		kvm->memslots[i] = kvm_alloc_memslots();
		if (!kvm->memslots[i])
			goto out_err_no_srcu;
	}
570

571
	if (init_srcu_struct(&kvm->srcu))
572 573 574
		goto out_err_no_srcu;
	if (init_srcu_struct(&kvm->irq_srcu))
		goto out_err_no_irq_srcu;
M
Marcelo Tosatti 已提交
575 576 577
	for (i = 0; i < KVM_NR_BUSES; i++) {
		kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
					GFP_KERNEL);
578
		if (!kvm->buses[i])
M
Marcelo Tosatti 已提交
579 580
			goto out_err;
	}
581

582
	spin_lock_init(&kvm->mmu_lock);
583 584
	kvm->mm = current->mm;
	atomic_inc(&kvm->mm->mm_count);
G
Gregory Haskins 已提交
585
	kvm_eventfd_init(kvm);
S
Shaohua Li 已提交
586
	mutex_init(&kvm->lock);
587
	mutex_init(&kvm->irq_lock);
588
	mutex_init(&kvm->slots_lock);
I
Izik Eidus 已提交
589
	atomic_set(&kvm->users_count, 1);
590
	INIT_LIST_HEAD(&kvm->devices);
591 592 593 594 595

	r = kvm_init_mmu_notifier(kvm);
	if (r)
		goto out_err;

596
	spin_lock(&kvm_lock);
597
	list_add(&kvm->vm_list, &vm_list);
598
	spin_unlock(&kvm_lock);
599

600 601
	preempt_notifier_inc();

602
	return kvm;
603 604

out_err:
605 606
	cleanup_srcu_struct(&kvm->irq_srcu);
out_err_no_irq_srcu:
607
	cleanup_srcu_struct(&kvm->srcu);
608
out_err_no_srcu:
609
	hardware_disable_all();
610
out_err_no_disable:
M
Marcelo Tosatti 已提交
611 612
	for (i = 0; i < KVM_NR_BUSES; i++)
		kfree(kvm->buses[i]);
613 614
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
		kvm_free_memslots(kvm, kvm->memslots[i]);
615
	kvm_arch_free_vm(kvm);
616
	return ERR_PTR(r);
617 618
}

619 620 621 622
/*
 * Avoid using vmalloc for a small buffer.
 * Should not be used when the size is statically known.
 */
623
void *kvm_kvzalloc(unsigned long size)
624 625 626 627 628 629 630
{
	if (size > PAGE_SIZE)
		return vzalloc(size);
	else
		return kzalloc(size, GFP_KERNEL);
}

631 632 633 634 635 636 637 638 639 640 641 642 643
static void kvm_destroy_devices(struct kvm *kvm)
{
	struct list_head *node, *tmp;

	list_for_each_safe(node, tmp, &kvm->devices) {
		struct kvm_device *dev =
			list_entry(node, struct kvm_device, vm_node);

		list_del(node);
		dev->ops->destroy(dev);
	}
}

644 645
static void kvm_destroy_vm(struct kvm *kvm)
{
M
Marcelo Tosatti 已提交
646
	int i;
647 648
	struct mm_struct *mm = kvm->mm;

649
	kvm_arch_sync_events(kvm);
650
	spin_lock(&kvm_lock);
651
	list_del(&kvm->vm_list);
652
	spin_unlock(&kvm_lock);
653
	kvm_free_irq_routing(kvm);
M
Marcelo Tosatti 已提交
654 655
	for (i = 0; i < KVM_NR_BUSES; i++)
		kvm_io_bus_destroy(kvm->buses[i]);
656
	kvm_coalesced_mmio_free(kvm);
657 658
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
659
#else
660
	kvm_arch_flush_shadow_all(kvm);
661
#endif
662
	kvm_arch_destroy_vm(kvm);
663
	kvm_destroy_devices(kvm);
664 665
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
		kvm_free_memslots(kvm, kvm->memslots[i]);
666
	cleanup_srcu_struct(&kvm->irq_srcu);
667 668
	cleanup_srcu_struct(&kvm->srcu);
	kvm_arch_free_vm(kvm);
669
	preempt_notifier_dec();
670
	hardware_disable_all();
671
	mmdrop(mm);
672 673
}

I
Izik Eidus 已提交
674 675 676 677 678 679 680 681 682 683 684 685 686 687
void kvm_get_kvm(struct kvm *kvm)
{
	atomic_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);

void kvm_put_kvm(struct kvm *kvm)
{
	if (atomic_dec_and_test(&kvm->users_count))
		kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);


688 689 690 691
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
	struct kvm *kvm = filp->private_data;

G
Gregory Haskins 已提交
692 693
	kvm_irqfd_release(kvm);

I
Izik Eidus 已提交
694
	kvm_put_kvm(kvm);
A
Avi Kivity 已提交
695 696 697
	return 0;
}

698 699
/*
 * Allocation size is twice as large as the actual dirty bitmap size.
700
 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
701
 */
702 703
static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
704
	unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
705

706
	memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
707 708 709 710 711 712
	if (!memslot->dirty_bitmap)
		return -ENOMEM;

	return 0;
}

713
/*
714 715 716 717
 * Insert memslot and re-sort memslots based on their GFN,
 * so binary search could be used to lookup GFN.
 * Sorting algorithm takes advantage of having initially
 * sorted array and known changed memslot position.
718
 */
719 720
static void update_memslots(struct kvm_memslots *slots,
			    struct kvm_memory_slot *new)
721
{
722 723
	int id = new->id;
	int i = slots->id_to_index[id];
724
	struct kvm_memory_slot *mslots = slots->memslots;
725

726
	WARN_ON(mslots[i].id != id);
727
	if (!new->npages) {
728
		WARN_ON(!mslots[i].npages);
729 730 731 732 733 734
		if (mslots[i].npages)
			slots->used_slots--;
	} else {
		if (!mslots[i].npages)
			slots->used_slots++;
	}
735

736
	while (i < KVM_MEM_SLOTS_NUM - 1 &&
737 738 739
	       new->base_gfn <= mslots[i + 1].base_gfn) {
		if (!mslots[i + 1].npages)
			break;
740 741 742 743
		mslots[i] = mslots[i + 1];
		slots->id_to_index[mslots[i].id] = i;
		i++;
	}
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760

	/*
	 * The ">=" is needed when creating a slot with base_gfn == 0,
	 * so that it moves before all those with base_gfn == npages == 0.
	 *
	 * On the other hand, if new->npages is zero, the above loop has
	 * already left i pointing to the beginning of the empty part of
	 * mslots, and the ">=" would move the hole backwards in this
	 * case---which is wrong.  So skip the loop when deleting a slot.
	 */
	if (new->npages) {
		while (i > 0 &&
		       new->base_gfn >= mslots[i - 1].base_gfn) {
			mslots[i] = mslots[i - 1];
			slots->id_to_index[mslots[i].id] = i;
			i--;
		}
761 762
	} else
		WARN_ON_ONCE(i != slots->used_slots);
763

764 765
	mslots[i] = *new;
	slots->id_to_index[mslots[i].id] = i;
766 767
}

768
static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
769
{
X
Xiao Guangrong 已提交
770 771
	u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;

772
#ifdef __KVM_HAVE_READONLY_MEM
X
Xiao Guangrong 已提交
773 774 775 776
	valid_flags |= KVM_MEM_READONLY;
#endif

	if (mem->flags & ~valid_flags)
777 778 779 780 781
		return -EINVAL;

	return 0;
}

782
static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
783
		int as_id, struct kvm_memslots *slots)
784
{
785
	struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
786

787 788 789 790 791 792 793
	/*
	 * Set the low bit in the generation, which disables SPTE caching
	 * until the end of synchronize_srcu_expedited.
	 */
	WARN_ON(old_memslots->generation & 1);
	slots->generation = old_memslots->generation + 1;

794
	rcu_assign_pointer(kvm->memslots[as_id], slots);
795
	synchronize_srcu_expedited(&kvm->srcu);
796

797 798 799 800 801 802 803
	/*
	 * Increment the new memslot generation a second time. This prevents
	 * vm exits that race with memslot updates from caching a memslot
	 * generation that will (potentially) be valid forever.
	 */
	slots->generation++;

804
	kvm_arch_memslots_updated(kvm, slots);
805 806

	return old_memslots;
807 808
}

A
Avi Kivity 已提交
809 810 811 812 813
/*
 * Allocate some memory and give it an address in the guest physical address
 * space.
 *
 * Discontiguous memory is allowed, mostly for framebuffers.
814
 *
815
 * Must be called holding kvm->slots_lock for write.
A
Avi Kivity 已提交
816
 */
817
int __kvm_set_memory_region(struct kvm *kvm,
818
			    const struct kvm_userspace_memory_region *mem)
A
Avi Kivity 已提交
819
{
820
	int r;
A
Avi Kivity 已提交
821
	gfn_t base_gfn;
822
	unsigned long npages;
823
	struct kvm_memory_slot *slot;
A
Avi Kivity 已提交
824
	struct kvm_memory_slot old, new;
825
	struct kvm_memslots *slots = NULL, *old_memslots;
826
	int as_id, id;
827
	enum kvm_mr_change change;
A
Avi Kivity 已提交
828

829 830 831 832
	r = check_memory_region_flags(mem);
	if (r)
		goto out;

A
Avi Kivity 已提交
833
	r = -EINVAL;
834 835 836
	as_id = mem->slot >> 16;
	id = (u16)mem->slot;

A
Avi Kivity 已提交
837 838 839 840 841
	/* General sanity checks */
	if (mem->memory_size & (PAGE_SIZE - 1))
		goto out;
	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
		goto out;
842
	/* We can read the guest memory with __xxx_user() later on. */
843
	if ((id < KVM_USER_MEM_SLOTS) &&
844
	    ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
845 846 847
	     !access_ok(VERIFY_WRITE,
			(void __user *)(unsigned long)mem->userspace_addr,
			mem->memory_size)))
848
		goto out;
849
	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
A
Avi Kivity 已提交
850 851 852 853
		goto out;
	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
		goto out;

854
	slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
A
Avi Kivity 已提交
855 856 857
	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
	npages = mem->memory_size >> PAGE_SHIFT;

858 859 860
	if (npages > KVM_MEM_MAX_NR_PAGES)
		goto out;

861
	new = old = *slot;
A
Avi Kivity 已提交
862

863
	new.id = id;
A
Avi Kivity 已提交
864 865 866 867
	new.base_gfn = base_gfn;
	new.npages = npages;
	new.flags = mem->flags;

868 869 870 871 872
	if (npages) {
		if (!old.npages)
			change = KVM_MR_CREATE;
		else { /* Modify an existing slot. */
			if ((mem->userspace_addr != old.userspace_addr) ||
873 874
			    (npages != old.npages) ||
			    ((new.flags ^ old.flags) & KVM_MEM_READONLY))
875 876 877 878 879 880 881 882 883 884 885
				goto out;

			if (base_gfn != old.base_gfn)
				change = KVM_MR_MOVE;
			else if (new.flags != old.flags)
				change = KVM_MR_FLAGS_ONLY;
			else { /* Nothing to change. */
				r = 0;
				goto out;
			}
		}
886 887 888 889
	} else {
		if (!old.npages)
			goto out;

890
		change = KVM_MR_DELETE;
891 892 893
		new.base_gfn = 0;
		new.flags = 0;
	}
A
Avi Kivity 已提交
894

895
	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
896 897
		/* Check for overlaps */
		r = -EEXIST;
898
		kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
899
			if ((slot->id >= KVM_USER_MEM_SLOTS) ||
900
			    (slot->id == id))
901 902 903 904 905
				continue;
			if (!((base_gfn + npages <= slot->base_gfn) ||
			      (base_gfn >= slot->base_gfn + slot->npages)))
				goto out;
		}
A
Avi Kivity 已提交
906 907 908 909
	}

	/* Free page dirty bitmap if unneeded */
	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
A
Al Viro 已提交
910
		new.dirty_bitmap = NULL;
A
Avi Kivity 已提交
911 912

	r = -ENOMEM;
913
	if (change == KVM_MR_CREATE) {
914
		new.userspace_addr = mem->userspace_addr;
915

916
		if (kvm_arch_create_memslot(kvm, &new, npages))
917
			goto out_free;
A
Avi Kivity 已提交
918
	}
919

A
Avi Kivity 已提交
920 921
	/* Allocate page dirty bitmap if needed */
	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
922
		if (kvm_create_dirty_bitmap(&new) < 0)
923
			goto out_free;
A
Avi Kivity 已提交
924 925
	}

926
	slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
927 928
	if (!slots)
		goto out_free;
929
	memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
930

931
	if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
932
		slot = id_to_memslot(slots, id);
933 934
		slot->flags |= KVM_MEMSLOT_INVALID;

935
		old_memslots = install_new_memslots(kvm, as_id, slots);
936

937 938
		/* slot was deleted or moved, clear iommu mapping */
		kvm_iommu_unmap_pages(kvm, &old);
939 940
		/* From this point no new shadow pages pointing to a deleted,
		 * or moved, memslot will be created.
941 942
		 *
		 * validation of sp->gfn happens in:
943 944
		 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
		 *	- kvm_is_visible_gfn (mmu_check_roots)
945
		 */
946
		kvm_arch_flush_shadow_memslot(kvm, slot);
947 948 949 950 951 952

		/*
		 * We can re-use the old_memslots from above, the only difference
		 * from the currently installed memslots is the invalid flag.  This
		 * will get overwritten by update_memslots anyway.
		 */
953
		slots = old_memslots;
954
	}
955

956
	r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
957
	if (r)
958
		goto out_slots;
959

960
	/* actual memory is freed via old in kvm_free_memslot below */
961
	if (change == KVM_MR_DELETE) {
962
		new.dirty_bitmap = NULL;
963
		memset(&new.arch, 0, sizeof(new.arch));
964 965
	}

966
	update_memslots(slots, &new);
967
	old_memslots = install_new_memslots(kvm, as_id, slots);
968

969
	kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
970

971
	kvm_free_memslot(kvm, &old, &new);
972
	kvfree(old_memslots);
973

974 975
	/*
	 * IOMMU mapping:  New slots need to be mapped.  Old slots need to be
976 977 978 979 980 981
	 * un-mapped and re-mapped if their base changes.  Since base change
	 * unmapping is handled above with slot deletion, mapping alone is
	 * needed here.  Anything else the iommu might care about for existing
	 * slots (size changes, userspace addr changes and read-only flag
	 * changes) is disallowed above, so any other attribute changes getting
	 * here can be skipped.
982
	 */
983 984
	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
		r = kvm_iommu_map_pages(kvm, &new);
985
		return r;
986 987
	}

A
Avi Kivity 已提交
988 989
	return 0;

990
out_slots:
991
	kvfree(slots);
992
out_free:
993
	kvm_free_memslot(kvm, &new, &old);
A
Avi Kivity 已提交
994 995
out:
	return r;
996
}
997 998 999
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);

int kvm_set_memory_region(struct kvm *kvm,
1000
			  const struct kvm_userspace_memory_region *mem)
1001 1002 1003
{
	int r;

1004
	mutex_lock(&kvm->slots_lock);
1005
	r = __kvm_set_memory_region(kvm, mem);
1006
	mutex_unlock(&kvm->slots_lock);
1007 1008
	return r;
}
1009 1010
EXPORT_SYMBOL_GPL(kvm_set_memory_region);

1011 1012
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
					  struct kvm_userspace_memory_region *mem)
1013
{
1014
	if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
1015
		return -EINVAL;
1016

1017
	return kvm_set_memory_region(kvm, mem);
A
Avi Kivity 已提交
1018 1019
}

1020 1021
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty)
A
Avi Kivity 已提交
1022
{
1023
	struct kvm_memslots *slots;
A
Avi Kivity 已提交
1024
	struct kvm_memory_slot *memslot;
1025
	int r, i, as_id, id;
1026
	unsigned long n;
A
Avi Kivity 已提交
1027 1028 1029
	unsigned long any = 0;

	r = -EINVAL;
1030 1031 1032
	as_id = log->slot >> 16;
	id = (u16)log->slot;
	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
A
Avi Kivity 已提交
1033 1034
		goto out;

1035 1036
	slots = __kvm_memslots(kvm, as_id);
	memslot = id_to_memslot(slots, id);
A
Avi Kivity 已提交
1037 1038 1039 1040
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

1041
	n = kvm_dirty_bitmap_bytes(memslot);
A
Avi Kivity 已提交
1042

1043
	for (i = 0; !any && i < n/sizeof(long); ++i)
A
Avi Kivity 已提交
1044 1045 1046 1047 1048 1049
		any = memslot->dirty_bitmap[i];

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
		goto out;

1050 1051
	if (any)
		*is_dirty = 1;
A
Avi Kivity 已提交
1052 1053 1054 1055 1056

	r = 0;
out:
	return r;
}
1057
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
A
Avi Kivity 已提交
1058

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
/**
 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
 *	are dirty write protect them for next write.
 * @kvm:	pointer to kvm instance
 * @log:	slot id and address to which we copy the log
 * @is_dirty:	flag set if any page is dirty
 *
 * We need to keep it in mind that VCPU threads can write to the bitmap
 * concurrently. So, to avoid losing track of dirty pages we keep the
 * following order:
 *
 *    1. Take a snapshot of the bit and clear it if needed.
 *    2. Write protect the corresponding page.
 *    3. Copy the snapshot to the userspace.
 *    4. Upon return caller flushes TLB's if needed.
 *
 * Between 2 and 4, the guest may write to the page using the remaining TLB
 * entry.  This is not a problem because the page is reported dirty using
 * the snapshot taken before and step 4 ensures that writes done after
 * exiting to userspace will be logged for the next call.
 *
 */
int kvm_get_dirty_log_protect(struct kvm *kvm,
			struct kvm_dirty_log *log, bool *is_dirty)
{
1085
	struct kvm_memslots *slots;
1086
	struct kvm_memory_slot *memslot;
1087
	int r, i, as_id, id;
1088 1089 1090 1091 1092
	unsigned long n;
	unsigned long *dirty_bitmap;
	unsigned long *dirty_bitmap_buffer;

	r = -EINVAL;
1093 1094 1095
	as_id = log->slot >> 16;
	id = (u16)log->slot;
	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1096 1097
		goto out;

1098 1099
	slots = __kvm_memslots(kvm, as_id);
	memslot = id_to_memslot(slots, id);
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124

	dirty_bitmap = memslot->dirty_bitmap;
	r = -ENOENT;
	if (!dirty_bitmap)
		goto out;

	n = kvm_dirty_bitmap_bytes(memslot);

	dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
	memset(dirty_bitmap_buffer, 0, n);

	spin_lock(&kvm->mmu_lock);
	*is_dirty = false;
	for (i = 0; i < n / sizeof(long); i++) {
		unsigned long mask;
		gfn_t offset;

		if (!dirty_bitmap[i])
			continue;

		*is_dirty = true;

		mask = xchg(&dirty_bitmap[i], 0);
		dirty_bitmap_buffer[i] = mask;

1125 1126 1127 1128 1129
		if (mask) {
			offset = i * BITS_PER_LONG;
			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
								offset, mask);
		}
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	}

	spin_unlock(&kvm->mmu_lock);

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
		goto out;

	r = 0;
out:
	return r;
}
EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
#endif

1145 1146 1147 1148 1149
bool kvm_largepages_enabled(void)
{
	return largepages_enabled;
}

1150 1151 1152 1153 1154 1155
void kvm_disable_largepages(void)
{
	largepages_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_largepages);

1156 1157 1158 1159
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
}
A
Avi Kivity 已提交
1160
EXPORT_SYMBOL_GPL(gfn_to_memslot);
A
Avi Kivity 已提交
1161

1162 1163 1164 1165 1166
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
}

1167 1168
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
1169
	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1170

1171
	if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
1172 1173
	      memslot->flags & KVM_MEMSLOT_INVALID)
		return 0;
1174

1175
	return 1;
1176 1177 1178
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

J
Joerg Roedel 已提交
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
{
	struct vm_area_struct *vma;
	unsigned long addr, size;

	size = PAGE_SIZE;

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return PAGE_SIZE;

	down_read(&current->mm->mmap_sem);
	vma = find_vma(current->mm, addr);
	if (!vma)
		goto out;

	size = vma_kernel_pagesize(vma);

out:
	up_read(&current->mm->mmap_sem);

	return size;
}

X
Xiao Guangrong 已提交
1203 1204 1205 1206 1207 1208 1209
static bool memslot_is_readonly(struct kvm_memory_slot *slot)
{
	return slot->flags & KVM_MEM_READONLY;
}

static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
				       gfn_t *nr_pages, bool write)
I
Izik Eidus 已提交
1210
{
1211
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
X
Xiao Guangrong 已提交
1212
		return KVM_HVA_ERR_BAD;
1213

X
Xiao Guangrong 已提交
1214 1215
	if (memslot_is_readonly(slot) && write)
		return KVM_HVA_ERR_RO_BAD;
1216 1217 1218 1219

	if (nr_pages)
		*nr_pages = slot->npages - (gfn - slot->base_gfn);

X
Xiao Guangrong 已提交
1220
	return __gfn_to_hva_memslot(slot, gfn);
I
Izik Eidus 已提交
1221
}
1222

X
Xiao Guangrong 已提交
1223 1224 1225 1226
static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
				     gfn_t *nr_pages)
{
	return __gfn_to_hva_many(slot, gfn, nr_pages, true);
I
Izik Eidus 已提交
1227
}
1228

X
Xiao Guangrong 已提交
1229
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
1230
					gfn_t gfn)
X
Xiao Guangrong 已提交
1231 1232 1233 1234 1235
{
	return gfn_to_hva_many(slot, gfn, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);

1236 1237
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
1238
	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1239
}
1240
EXPORT_SYMBOL_GPL(gfn_to_hva);
I
Izik Eidus 已提交
1241

1242 1243 1244 1245 1246 1247
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);

1248
/*
1249 1250
 * If writable is set to false, the hva returned by this function is only
 * allowed to be read.
1251
 */
1252 1253
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
				      gfn_t gfn, bool *writable)
1254
{
1255 1256 1257
	unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);

	if (!kvm_is_error_hva(hva) && writable)
1258 1259
		*writable = !memslot_is_readonly(slot);

1260
	return hva;
1261 1262
}

1263 1264 1265 1266 1267 1268 1269
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
{
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);

	return gfn_to_hva_memslot_prot(slot, gfn, writable);
}

1270 1271 1272 1273 1274 1275 1276
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
{
	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);

	return gfn_to_hva_memslot_prot(slot, gfn, writable);
}

G
Geoff Levand 已提交
1277
static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
	unsigned long start, int write, struct page **page)
{
	int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;

	if (write)
		flags |= FOLL_WRITE;

	return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
}

1288 1289 1290 1291 1292 1293 1294 1295 1296
static inline int check_user_page_hwpoison(unsigned long addr)
{
	int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;

	rc = __get_user_pages(current, current->mm, addr, 1,
			      flags, NULL, NULL, NULL);
	return rc == -EHWPOISON;
}

X
Xiao Guangrong 已提交
1297 1298 1299 1300 1301 1302
/*
 * The atomic path to get the writable pfn which will be stored in @pfn,
 * true indicates success, otherwise false is returned.
 */
static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
			    bool write_fault, bool *writable, pfn_t *pfn)
A
Avi Kivity 已提交
1303
{
1304
	struct page *page[1];
X
Xiao Guangrong 已提交
1305
	int npages;
A
Avi Kivity 已提交
1306

X
Xiao Guangrong 已提交
1307 1308
	if (!(async || atomic))
		return false;
1309

1310 1311 1312 1313 1314 1315 1316
	/*
	 * Fast pin a writable pfn only if it is a write fault request
	 * or the caller allows to map a writable pfn for a read fault
	 * request.
	 */
	if (!(write_fault || writable))
		return false;
1317

X
Xiao Guangrong 已提交
1318 1319 1320
	npages = __get_user_pages_fast(addr, 1, 1, page);
	if (npages == 1) {
		*pfn = page_to_pfn(page[0]);
1321

X
Xiao Guangrong 已提交
1322 1323 1324 1325
		if (writable)
			*writable = true;
		return true;
	}
1326

X
Xiao Guangrong 已提交
1327 1328
	return false;
}
1329

X
Xiao Guangrong 已提交
1330 1331 1332 1333 1334 1335 1336 1337 1338
/*
 * The slow path to get the pfn of the specified host virtual address,
 * 1 indicates success, -errno is returned if error is detected.
 */
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
			   bool *writable, pfn_t *pfn)
{
	struct page *page[1];
	int npages = 0;
1339

X
Xiao Guangrong 已提交
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
	might_sleep();

	if (writable)
		*writable = write_fault;

	if (async) {
		down_read(&current->mm->mmap_sem);
		npages = get_user_page_nowait(current, current->mm,
					      addr, write_fault, page);
		up_read(&current->mm->mmap_sem);
1350 1351 1352 1353
	} else
		npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
						   write_fault, 0, page,
						   FOLL_TOUCH|FOLL_HWPOISON);
X
Xiao Guangrong 已提交
1354 1355 1356 1357
	if (npages != 1)
		return npages;

	/* map read fault as writable if possible */
1358
	if (unlikely(!write_fault) && writable) {
X
Xiao Guangrong 已提交
1359 1360 1361 1362 1363 1364 1365
		struct page *wpage[1];

		npages = __get_user_pages_fast(addr, 1, 1, wpage);
		if (npages == 1) {
			*writable = true;
			put_page(page[0]);
			page[0] = wpage[0];
1366
		}
X
Xiao Guangrong 已提交
1367 1368

		npages = 1;
1369
	}
X
Xiao Guangrong 已提交
1370 1371 1372
	*pfn = page_to_pfn(page[0]);
	return npages;
}
I
Izik Eidus 已提交
1373

X
Xiao Guangrong 已提交
1374 1375 1376 1377
static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
{
	if (unlikely(!(vma->vm_flags & VM_READ)))
		return false;
1378

X
Xiao Guangrong 已提交
1379 1380
	if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
		return false;
1381

X
Xiao Guangrong 已提交
1382 1383
	return true;
}
1384

1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
/*
 * Pin guest page in memory and return its pfn.
 * @addr: host virtual address which maps memory to the guest
 * @atomic: whether this function can sleep
 * @async: whether this function need to wait IO complete if the
 *         host page is not in the memory
 * @write_fault: whether we should get a writable host page
 * @writable: whether it allows to map a writable host page for !@write_fault
 *
 * The function will map a writable host page for these two cases:
 * 1): @write_fault = true
 * 2): @write_fault = false && @writable, @writable will tell the caller
 *     whether the mapping is writable.
 */
X
Xiao Guangrong 已提交
1399 1400 1401 1402 1403 1404
static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
			bool write_fault, bool *writable)
{
	struct vm_area_struct *vma;
	pfn_t pfn = 0;
	int npages;
1405

X
Xiao Guangrong 已提交
1406 1407
	/* we can do it either atomically or asynchronously, not both */
	BUG_ON(atomic && async);
1408

X
Xiao Guangrong 已提交
1409 1410 1411 1412 1413 1414 1415 1416 1417
	if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
		return pfn;

	if (atomic)
		return KVM_PFN_ERR_FAULT;

	npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
	if (npages == 1)
		return pfn;
1418

X
Xiao Guangrong 已提交
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
	down_read(&current->mm->mmap_sem);
	if (npages == -EHWPOISON ||
	      (!async && check_user_page_hwpoison(addr))) {
		pfn = KVM_PFN_ERR_HWPOISON;
		goto exit;
	}

	vma = find_vma_intersection(current->mm, addr, addr + 1);

	if (vma == NULL)
		pfn = KVM_PFN_ERR_FAULT;
	else if ((vma->vm_flags & VM_PFNMAP)) {
		pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
			vma->vm_pgoff;
1433
		BUG_ON(!kvm_is_reserved_pfn(pfn));
X
Xiao Guangrong 已提交
1434
	} else {
X
Xiao Guangrong 已提交
1435
		if (async && vma_is_valid(vma, write_fault))
X
Xiao Guangrong 已提交
1436 1437 1438 1439 1440
			*async = true;
		pfn = KVM_PFN_ERR_FAULT;
	}
exit:
	up_read(&current->mm->mmap_sem);
1441
	return pfn;
1442 1443
}

1444 1445
pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
			   bool *async, bool write_fault, bool *writable)
1446
{
X
Xiao Guangrong 已提交
1447 1448 1449 1450 1451 1452
	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);

	if (addr == KVM_HVA_ERR_RO_BAD)
		return KVM_PFN_ERR_RO_FAULT;

	if (kvm_is_error_hva(addr))
1453
		return KVM_PFN_NOSLOT;
X
Xiao Guangrong 已提交
1454 1455 1456 1457 1458 1459 1460 1461 1462

	/* Do not map writable pfn in the readonly memslot. */
	if (writable && memslot_is_readonly(slot)) {
		*writable = false;
		writable = NULL;
	}

	return hva_to_pfn(addr, atomic, async, write_fault,
			  writable);
1463
}
1464
EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
1465

1466 1467 1468
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable)
{
P
Paolo Bonzini 已提交
1469 1470
	return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
				    write_fault, writable);
1471 1472 1473
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);

1474
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1475
{
X
Xiao Guangrong 已提交
1476
	return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
1477
}
P
Paolo Bonzini 已提交
1478
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
1479

1480
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
1481
{
X
Xiao Guangrong 已提交
1482
	return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
1483
}
1484
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
1485

P
Paolo Bonzini 已提交
1486 1487 1488 1489 1490 1491
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
{
	return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);

1492 1493 1494 1495 1496 1497
pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);

P
Paolo Bonzini 已提交
1498 1499 1500 1501 1502 1503
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
	return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn);

1504 1505 1506 1507 1508 1509
pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);

1510 1511
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
			    struct page **pages, int nr_pages)
1512 1513 1514 1515
{
	unsigned long addr;
	gfn_t entry;

1516
	addr = gfn_to_hva_many(slot, gfn, &entry);
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
	if (kvm_is_error_hva(addr))
		return -1;

	if (entry < nr_pages)
		return 0;

	return __get_user_pages_fast(addr, nr_pages, 1, pages);
}
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);

X
Xiao Guangrong 已提交
1527 1528
static struct page *kvm_pfn_to_page(pfn_t pfn)
{
1529
	if (is_error_noslot_pfn(pfn))
1530
		return KVM_ERR_PTR_BAD_PAGE;
X
Xiao Guangrong 已提交
1531

1532
	if (kvm_is_reserved_pfn(pfn)) {
1533
		WARN_ON(1);
1534
		return KVM_ERR_PTR_BAD_PAGE;
1535
	}
X
Xiao Guangrong 已提交
1536 1537 1538 1539

	return pfn_to_page(pfn);
}

1540 1541
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
1542 1543 1544 1545
	pfn_t pfn;

	pfn = gfn_to_pfn(kvm, gfn);

X
Xiao Guangrong 已提交
1546
	return kvm_pfn_to_page(pfn);
A
Avi Kivity 已提交
1547 1548 1549
}
EXPORT_SYMBOL_GPL(gfn_to_page);

1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	pfn_t pfn;

	pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);

	return kvm_pfn_to_page(pfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);

1560 1561
void kvm_release_page_clean(struct page *page)
{
1562 1563
	WARN_ON(is_error_page(page));

1564
	kvm_release_pfn_clean(page_to_pfn(page));
1565 1566 1567
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);

1568 1569
void kvm_release_pfn_clean(pfn_t pfn)
{
1570
	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
1571
		put_page(pfn_to_page(pfn));
1572 1573 1574
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);

1575
void kvm_release_page_dirty(struct page *page)
1576
{
X
Xiao Guangrong 已提交
1577 1578
	WARN_ON(is_error_page(page));

1579 1580 1581 1582
	kvm_release_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);

1583
static void kvm_release_pfn_dirty(pfn_t pfn)
1584 1585 1586 1587 1588 1589 1590
{
	kvm_set_pfn_dirty(pfn);
	kvm_release_pfn_clean(pfn);
}

void kvm_set_pfn_dirty(pfn_t pfn)
{
1591
	if (!kvm_is_reserved_pfn(pfn)) {
1592
		struct page *page = pfn_to_page(pfn);
1593

1594 1595 1596
		if (!PageReserved(page))
			SetPageDirty(page);
	}
1597
}
1598 1599 1600 1601
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);

void kvm_set_pfn_accessed(pfn_t pfn)
{
1602
	if (!kvm_is_reserved_pfn(pfn))
1603
		mark_page_accessed(pfn_to_page(pfn));
1604 1605 1606 1607 1608
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);

void kvm_get_pfn(pfn_t pfn)
{
1609
	if (!kvm_is_reserved_pfn(pfn))
1610
		get_page(pfn_to_page(pfn));
1611 1612
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
1613

1614 1615 1616 1617 1618 1619 1620 1621
static int next_segment(unsigned long len, int offset)
{
	if (len > PAGE_SIZE - offset)
		return PAGE_SIZE - offset;
	else
		return len;
}

1622 1623
static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
				 void *data, int offset, int len)
1624
{
1625 1626
	int r;
	unsigned long addr;
1627

1628
	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
1629 1630
	if (kvm_is_error_hva(addr))
		return -EFAULT;
1631
	r = __copy_from_user(data, (void __user *)addr + offset, len);
1632
	if (r)
1633 1634 1635
		return -EFAULT;
	return 0;
}
1636 1637 1638 1639 1640 1641 1642 1643

int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len)
{
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);

	return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
1644 1645
EXPORT_SYMBOL_GPL(kvm_read_guest_page);

1646 1647 1648 1649 1650 1651 1652 1653 1654
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
			     int offset, int len)
{
	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);

	return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);

1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);

1675
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
1676 1677
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
1678
	int seg;
1679
	int offset = offset_in_page(gpa);
1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
1694

1695 1696 1697 1698 1699 1700 1701
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
			           void *data, int offset, unsigned long len)
{
	int r;
	unsigned long addr;

	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
1702 1703
	if (kvm_is_error_hva(addr))
		return -EFAULT;
1704
	pagefault_disable();
1705
	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1706
	pagefault_enable();
1707 1708 1709 1710 1711
	if (r)
		return -EFAULT;
	return 0;
}

1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
	int offset = offset_in_page(gpa);

	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);

int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
			       void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	int offset = offset_in_page(gpa);

	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);

static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
			          const void *data, int offset, int len)
1736
{
1737 1738
	int r;
	unsigned long addr;
1739

1740
	addr = gfn_to_hva_memslot(memslot, gfn);
1741 1742
	if (kvm_is_error_hva(addr))
		return -EFAULT;
1743
	r = __copy_to_user((void __user *)addr + offset, data, len);
1744
	if (r)
1745
		return -EFAULT;
1746
	mark_page_dirty_in_slot(memslot, gfn);
1747 1748
	return 0;
}
1749 1750 1751 1752 1753 1754 1755 1756

int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
			 const void *data, int offset, int len)
{
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);

	return __kvm_write_guest_page(slot, gfn, data, offset, len);
}
1757 1758
EXPORT_SYMBOL_GPL(kvm_write_guest_page);

1759 1760 1761 1762 1763 1764 1765 1766 1767
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
			      const void *data, int offset, int len)
{
	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);

	return __kvm_write_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);

1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
1787
EXPORT_SYMBOL_GPL(kvm_write_guest);
1788

1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
		         unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);

1810
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1811
			      gpa_t gpa, unsigned long len)
1812 1813 1814
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	int offset = offset_in_page(gpa);
1815 1816 1817 1818
	gfn_t start_gfn = gpa >> PAGE_SHIFT;
	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
	gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
	gfn_t nr_pages_avail;
1819 1820 1821

	ghc->gpa = gpa;
	ghc->generation = slots->generation;
1822 1823
	ghc->len = len;
	ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1824 1825
	ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
	if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
1826
		ghc->hva += offset;
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842
	} else {
		/*
		 * If the requested region crosses two memslots, we still
		 * verify that the entire region is valid here.
		 */
		while (start_gfn <= end_gfn) {
			ghc->memslot = gfn_to_memslot(kvm, start_gfn);
			ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
						   &nr_pages_avail);
			if (kvm_is_error_hva(ghc->hva))
				return -EFAULT;
			start_gfn += nr_pages_avail;
		}
		/* Use the slow path for cross page reads and writes. */
		ghc->memslot = NULL;
	}
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);

int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	int r;

1853 1854
	BUG_ON(len > ghc->len);

1855
	if (slots->generation != ghc->generation)
1856 1857 1858 1859
		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);

	if (unlikely(!ghc->memslot))
		return kvm_write_guest(kvm, ghc->gpa, data, len);
1860 1861 1862 1863

	if (kvm_is_error_hva(ghc->hva))
		return -EFAULT;

1864
	r = __copy_to_user((void __user *)ghc->hva, data, len);
1865 1866
	if (r)
		return -EFAULT;
1867
	mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1868 1869 1870 1871 1872

	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);

1873 1874 1875 1876 1877 1878
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	int r;

1879 1880
	BUG_ON(len > ghc->len);

1881
	if (slots->generation != ghc->generation)
1882 1883 1884 1885
		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);

	if (unlikely(!ghc->memslot))
		return kvm_read_guest(kvm, ghc->gpa, data, len);
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897

	if (kvm_is_error_hva(ghc->hva))
		return -EFAULT;

	r = __copy_from_user(data, (void __user *)ghc->hva, len);
	if (r)
		return -EFAULT;

	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_cached);

1898 1899
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
1900 1901 1902
	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));

	return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

1913
	while ((seg = next_segment(len, offset)) != 0) {
1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);

1925
static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
1926
				    gfn_t gfn)
A
Avi Kivity 已提交
1927
{
R
Rusty Russell 已提交
1928 1929
	if (memslot && memslot->dirty_bitmap) {
		unsigned long rel_gfn = gfn - memslot->base_gfn;
A
Avi Kivity 已提交
1930

1931
		set_bit_le(rel_gfn, memslot->dirty_bitmap);
A
Avi Kivity 已提交
1932 1933 1934
	}
}

1935 1936 1937 1938 1939
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
	struct kvm_memory_slot *memslot;

	memslot = gfn_to_memslot(kvm, gfn);
1940
	mark_page_dirty_in_slot(memslot, gfn);
1941
}
1942
EXPORT_SYMBOL_GPL(mark_page_dirty);
1943

1944 1945 1946 1947 1948 1949 1950 1951 1952
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	struct kvm_memory_slot *memslot;

	memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	mark_page_dirty_in_slot(memslot, gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);

W
Wanpeng Li 已提交
1953 1954
static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
{
1955
	int old, val;
W
Wanpeng Li 已提交
1956

1957
	old = val = vcpu->halt_poll_ns;
W
Wanpeng Li 已提交
1958 1959 1960 1961 1962 1963 1964
	/* 10us base */
	if (val == 0 && halt_poll_ns_grow)
		val = 10000;
	else
		val *= halt_poll_ns_grow;

	vcpu->halt_poll_ns = val;
1965
	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
W
Wanpeng Li 已提交
1966 1967 1968 1969
}

static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
{
1970
	int old, val;
W
Wanpeng Li 已提交
1971

1972
	old = val = vcpu->halt_poll_ns;
W
Wanpeng Li 已提交
1973 1974 1975 1976 1977 1978
	if (halt_poll_ns_shrink == 0)
		val = 0;
	else
		val /= halt_poll_ns_shrink;

	vcpu->halt_poll_ns = val;
1979
	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
W
Wanpeng Li 已提交
1980 1981
}

1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{
	if (kvm_arch_vcpu_runnable(vcpu)) {
		kvm_make_request(KVM_REQ_UNHALT, vcpu);
		return -EINTR;
	}
	if (kvm_cpu_has_pending_timer(vcpu))
		return -EINTR;
	if (signal_pending(current))
		return -EINTR;

	return 0;
}

E
Eddie Dong 已提交
1996 1997 1998
/*
 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
 */
1999
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
2000
{
2001
	ktime_t start, cur;
2002
	DEFINE_WAIT(wait);
2003
	bool waited = false;
W
Wanpeng Li 已提交
2004
	u64 block_ns;
2005 2006

	start = cur = ktime_get();
W
Wanpeng Li 已提交
2007 2008
	if (vcpu->halt_poll_ns) {
		ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
2009

2010
		++vcpu->stat.halt_attempted_poll;
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022
		do {
			/*
			 * This sets KVM_REQ_UNHALT if an interrupt
			 * arrives.
			 */
			if (kvm_vcpu_check_block(vcpu) < 0) {
				++vcpu->stat.halt_successful_poll;
				goto out;
			}
			cur = ktime_get();
		} while (single_task_running() && ktime_before(cur, stop));
	}
2023

2024 2025
	kvm_arch_vcpu_blocking(vcpu);

2026 2027 2028
	for (;;) {
		prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);

2029
		if (kvm_vcpu_check_block(vcpu) < 0)
2030 2031
			break;

2032
		waited = true;
E
Eddie Dong 已提交
2033 2034
		schedule();
	}
2035

2036
	finish_wait(&vcpu->wq, &wait);
2037 2038
	cur = ktime_get();

2039
	kvm_arch_vcpu_unblocking(vcpu);
2040
out:
W
Wanpeng Li 已提交
2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052
	block_ns = ktime_to_ns(cur) - ktime_to_ns(start);

	if (halt_poll_ns) {
		if (block_ns <= vcpu->halt_poll_ns)
			;
		/* we had a long block, shrink polling */
		else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
			shrink_halt_poll_ns(vcpu);
		/* we had a short halt and our poll time is too small */
		else if (vcpu->halt_poll_ns < halt_poll_ns &&
			block_ns < halt_poll_ns)
			grow_halt_poll_ns(vcpu);
2053 2054
	} else
		vcpu->halt_poll_ns = 0;
W
Wanpeng Li 已提交
2055 2056

	trace_kvm_vcpu_wakeup(block_ns, waited);
E
Eddie Dong 已提交
2057
}
2058
EXPORT_SYMBOL_GPL(kvm_vcpu_block);
E
Eddie Dong 已提交
2059

2060
#ifndef CONFIG_S390
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
/*
 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
 */
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
	int me;
	int cpu = vcpu->cpu;
	wait_queue_head_t *wqp;

	wqp = kvm_arch_vcpu_wq(vcpu);
	if (waitqueue_active(wqp)) {
		wake_up_interruptible(wqp);
		++vcpu->stat.halt_wakeup;
	}

	me = get_cpu();
	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
		if (kvm_arch_vcpu_should_kick(vcpu))
			smp_send_reschedule(cpu);
	put_cpu();
}
2082
EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
2083
#endif /* !CONFIG_S390 */
2084

2085
int kvm_vcpu_yield_to(struct kvm_vcpu *target)
2086 2087 2088
{
	struct pid *pid;
	struct task_struct *task = NULL;
2089
	int ret = 0;
2090 2091 2092 2093

	rcu_read_lock();
	pid = rcu_dereference(target->pid);
	if (pid)
2094
		task = get_pid_task(pid, PIDTYPE_PID);
2095 2096
	rcu_read_unlock();
	if (!task)
2097 2098
		return ret;
	ret = yield_to(task, 1);
2099
	put_task_struct(task);
2100 2101

	return ret;
2102 2103 2104
}
EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);

2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
/*
 * Helper that checks whether a VCPU is eligible for directed yield.
 * Most eligible candidate to yield is decided by following heuristics:
 *
 *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
 *  (preempted lock holder), indicated by @in_spin_loop.
 *  Set at the beiginning and cleared at the end of interception/PLE handler.
 *
 *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
 *  chance last time (mostly it has become eligible now since we have probably
 *  yielded to lockholder in last iteration. This is done by toggling
 *  @dy_eligible each time a VCPU checked for eligibility.)
 *
 *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
 *  to preempted lock-holder could result in wrong VCPU selection and CPU
 *  burning. Giving priority for a potential lock-holder increases lock
 *  progress.
 *
 *  Since algorithm is based on heuristics, accessing another VCPU data without
 *  locking does not harm. It may result in trying to yield to  same VCPU, fail
 *  and continue with next VCPU and so on.
 */
2127
static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2128
{
2129
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2130 2131 2132
	bool eligible;

	eligible = !vcpu->spin_loop.in_spin_loop ||
2133
		    vcpu->spin_loop.dy_eligible;
2134 2135 2136 2137 2138

	if (vcpu->spin_loop.in_spin_loop)
		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);

	return eligible;
2139 2140
#else
	return true;
2141
#endif
2142
}
2143

2144
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
Z
Zhai, Edwin 已提交
2145
{
2146 2147 2148 2149
	struct kvm *kvm = me->kvm;
	struct kvm_vcpu *vcpu;
	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
	int yielded = 0;
2150
	int try = 3;
2151 2152
	int pass;
	int i;
Z
Zhai, Edwin 已提交
2153

2154
	kvm_vcpu_set_in_spin_loop(me, true);
2155 2156 2157 2158 2159 2160 2161
	/*
	 * We boost the priority of a VCPU that is runnable but not
	 * currently running, because it got preempted by something
	 * else and called schedule in __vcpu_run.  Hopefully that
	 * VCPU is holding the lock that we need and will release it.
	 * We approximate round-robin by starting at the last boosted VCPU.
	 */
2162
	for (pass = 0; pass < 2 && !yielded && try; pass++) {
2163
		kvm_for_each_vcpu(i, vcpu, kvm) {
2164
			if (!pass && i <= last_boosted_vcpu) {
2165 2166 2167 2168
				i = last_boosted_vcpu;
				continue;
			} else if (pass && i > last_boosted_vcpu)
				break;
2169 2170
			if (!ACCESS_ONCE(vcpu->preempted))
				continue;
2171 2172
			if (vcpu == me)
				continue;
2173
			if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
2174
				continue;
2175 2176
			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
				continue;
2177 2178 2179

			yielded = kvm_vcpu_yield_to(vcpu);
			if (yielded > 0) {
2180 2181
				kvm->last_boosted_vcpu = i;
				break;
2182 2183 2184 2185
			} else if (yielded < 0) {
				try--;
				if (!try)
					break;
2186 2187 2188
			}
		}
	}
2189
	kvm_vcpu_set_in_spin_loop(me, false);
2190 2191 2192

	/* Ensure vcpu is not eligible during next spinloop */
	kvm_vcpu_set_dy_eligible(me, false);
Z
Zhai, Edwin 已提交
2193 2194 2195
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);

2196
static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2197 2198 2199 2200
{
	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
	struct page *page;

2201
	if (vmf->pgoff == 0)
2202
		page = virt_to_page(vcpu->run);
A
Avi Kivity 已提交
2203
#ifdef CONFIG_X86
2204
	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
2205
		page = virt_to_page(vcpu->arch.pio_data);
2206 2207 2208 2209
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
A
Avi Kivity 已提交
2210
#endif
2211
	else
2212
		return kvm_arch_vcpu_fault(vcpu, vmf);
2213
	get_page(page);
2214 2215
	vmf->page = page;
	return 0;
2216 2217
}

2218
static const struct vm_operations_struct kvm_vcpu_vm_ops = {
2219
	.fault = kvm_vcpu_fault,
2220 2221 2222 2223 2224 2225 2226 2227
};

static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vcpu_vm_ops;
	return 0;
}

A
Avi Kivity 已提交
2228 2229 2230 2231
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
	struct kvm_vcpu *vcpu = filp->private_data;

A
Al Viro 已提交
2232
	kvm_put_kvm(vcpu->kvm);
A
Avi Kivity 已提交
2233 2234 2235
	return 0;
}

2236
static struct file_operations kvm_vcpu_fops = {
A
Avi Kivity 已提交
2237 2238
	.release        = kvm_vcpu_release,
	.unlocked_ioctl = kvm_vcpu_ioctl,
2239
#ifdef CONFIG_KVM_COMPAT
2240 2241
	.compat_ioctl   = kvm_vcpu_compat_ioctl,
#endif
2242
	.mmap           = kvm_vcpu_mmap,
2243
	.llseek		= noop_llseek,
A
Avi Kivity 已提交
2244 2245 2246 2247 2248 2249 2250
};

/*
 * Allocates an inode for the vcpu.
 */
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
2251
	return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
A
Avi Kivity 已提交
2252 2253
}

2254 2255 2256
/*
 * Creates some virtual cpus.  Good luck creating more than one.
 */
2257
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
2258 2259
{
	int r;
2260
	struct kvm_vcpu *vcpu, *v;
2261

2262 2263 2264
	if (id >= KVM_MAX_VCPUS)
		return -EINVAL;

2265
	vcpu = kvm_arch_vcpu_create(kvm, id);
R
Rusty Russell 已提交
2266 2267
	if (IS_ERR(vcpu))
		return PTR_ERR(vcpu);
2268

2269 2270
	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);

2271 2272
	r = kvm_arch_vcpu_setup(vcpu);
	if (r)
2273
		goto vcpu_destroy;
2274

S
Shaohua Li 已提交
2275
	mutex_lock(&kvm->lock);
2276 2277 2278 2279
	if (!kvm_vcpu_compatible(vcpu)) {
		r = -EINVAL;
		goto unlock_vcpu_destroy;
	}
2280 2281
	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
		r = -EINVAL;
2282
		goto unlock_vcpu_destroy;
R
Rusty Russell 已提交
2283
	}
2284

2285 2286
	kvm_for_each_vcpu(r, v, kvm)
		if (v->vcpu_id == id) {
2287
			r = -EEXIST;
2288
			goto unlock_vcpu_destroy;
2289 2290 2291
		}

	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
2292

R
Rusty Russell 已提交
2293
	/* Now it's all set up, let userspace reach it */
A
Al Viro 已提交
2294
	kvm_get_kvm(kvm);
A
Avi Kivity 已提交
2295
	r = create_vcpu_fd(vcpu);
2296 2297
	if (r < 0) {
		kvm_put_kvm(kvm);
2298
		goto unlock_vcpu_destroy;
2299 2300 2301
	}

	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
2302 2303 2304 2305 2306

	/*
	 * Pairs with smp_rmb() in kvm_get_vcpu.  Write kvm->vcpus
	 * before kvm->online_vcpu's incremented value.
	 */
2307 2308 2309 2310
	smp_wmb();
	atomic_inc(&kvm->online_vcpus);

	mutex_unlock(&kvm->lock);
2311
	kvm_arch_vcpu_postcreate(vcpu);
R
Rusty Russell 已提交
2312
	return r;
2313

2314
unlock_vcpu_destroy:
2315
	mutex_unlock(&kvm->lock);
2316
vcpu_destroy:
2317
	kvm_arch_vcpu_destroy(vcpu);
2318 2319 2320
	return r;
}

A
Avi Kivity 已提交
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
	if (sigset) {
		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
		vcpu->sigset_active = 1;
		vcpu->sigset = *sigset;
	} else
		vcpu->sigset_active = 0;
	return 0;
}

A
Avi Kivity 已提交
2332 2333
static long kvm_vcpu_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
A
Avi Kivity 已提交
2334
{
A
Avi Kivity 已提交
2335
	struct kvm_vcpu *vcpu = filp->private_data;
A
Al Viro 已提交
2336
	void __user *argp = (void __user *)arg;
2337
	int r;
2338 2339
	struct kvm_fpu *fpu = NULL;
	struct kvm_sregs *kvm_sregs = NULL;
A
Avi Kivity 已提交
2340

2341 2342
	if (vcpu->kvm->mm != current->mm)
		return -EIO;
2343

2344 2345 2346
	if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
		return -EINVAL;

2347
#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
2348 2349 2350 2351
	/*
	 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
	 * so vcpu_load() would break it.
	 */
2352
	if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT)
2353 2354 2355 2356
		return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
#endif


2357 2358 2359
	r = vcpu_load(vcpu);
	if (r)
		return r;
A
Avi Kivity 已提交
2360
	switch (ioctl) {
2361
	case KVM_RUN:
2362 2363 2364
		r = -EINVAL;
		if (arg)
			goto out;
2365 2366 2367 2368
		if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
			/* The thread running this VCPU changed. */
			struct pid *oldpid = vcpu->pid;
			struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
2369

2370 2371 2372 2373 2374
			rcu_assign_pointer(vcpu->pid, newpid);
			if (oldpid)
				synchronize_rcu();
			put_pid(oldpid);
		}
2375
		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
2376
		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
A
Avi Kivity 已提交
2377 2378
		break;
	case KVM_GET_REGS: {
2379
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
2380

2381 2382 2383
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
2384
			goto out;
2385 2386 2387
		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
		if (r)
			goto out_free1;
A
Avi Kivity 已提交
2388
		r = -EFAULT;
2389 2390
		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
			goto out_free1;
A
Avi Kivity 已提交
2391
		r = 0;
2392 2393
out_free1:
		kfree(kvm_regs);
A
Avi Kivity 已提交
2394 2395 2396
		break;
	}
	case KVM_SET_REGS: {
2397
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
2398

2399
		r = -ENOMEM;
2400 2401 2402
		kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
		if (IS_ERR(kvm_regs)) {
			r = PTR_ERR(kvm_regs);
A
Avi Kivity 已提交
2403
			goto out;
2404
		}
2405 2406
		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
		kfree(kvm_regs);
A
Avi Kivity 已提交
2407 2408 2409
		break;
	}
	case KVM_GET_SREGS: {
2410 2411 2412 2413 2414
		kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
2415 2416 2417
		if (r)
			goto out;
		r = -EFAULT;
2418
		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
2419 2420 2421 2422 2423
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_SREGS: {
2424 2425 2426
		kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
		if (IS_ERR(kvm_sregs)) {
			r = PTR_ERR(kvm_sregs);
G
Guo Chao 已提交
2427
			kvm_sregs = NULL;
A
Avi Kivity 已提交
2428
			goto out;
2429
		}
2430
		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
2431 2432
		break;
	}
2433 2434 2435 2436 2437 2438 2439
	case KVM_GET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = -EFAULT;
2440
		if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
2441 2442 2443 2444 2445 2446 2447 2448
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = -EFAULT;
2449
		if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
2450 2451 2452 2453
			goto out;
		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
		break;
	}
A
Avi Kivity 已提交
2454 2455 2456 2457
	case KVM_TRANSLATE: {
		struct kvm_translation tr;

		r = -EFAULT;
2458
		if (copy_from_user(&tr, argp, sizeof(tr)))
A
Avi Kivity 已提交
2459
			goto out;
2460
		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
A
Avi Kivity 已提交
2461 2462 2463
		if (r)
			goto out;
		r = -EFAULT;
2464
		if (copy_to_user(argp, &tr, sizeof(tr)))
A
Avi Kivity 已提交
2465 2466 2467 2468
			goto out;
		r = 0;
		break;
	}
J
Jan Kiszka 已提交
2469 2470
	case KVM_SET_GUEST_DEBUG: {
		struct kvm_guest_debug dbg;
A
Avi Kivity 已提交
2471 2472

		r = -EFAULT;
2473
		if (copy_from_user(&dbg, argp, sizeof(dbg)))
A
Avi Kivity 已提交
2474
			goto out;
J
Jan Kiszka 已提交
2475
		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
A
Avi Kivity 已提交
2476 2477
		break;
	}
A
Avi Kivity 已提交
2478 2479 2480 2481 2482 2483 2484 2485 2486
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		sigset_t sigset, *p;

		p = NULL;
		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
2487
					   sizeof(kvm_sigmask)))
A
Avi Kivity 已提交
2488 2489
				goto out;
			r = -EINVAL;
2490
			if (kvm_sigmask.len != sizeof(sigset))
A
Avi Kivity 已提交
2491 2492 2493
				goto out;
			r = -EFAULT;
			if (copy_from_user(&sigset, sigmask_arg->sigset,
2494
					   sizeof(sigset)))
A
Avi Kivity 已提交
2495 2496 2497
				goto out;
			p = &sigset;
		}
2498
		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
A
Avi Kivity 已提交
2499 2500
		break;
	}
A
Avi Kivity 已提交
2501
	case KVM_GET_FPU: {
2502 2503 2504 2505 2506
		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
A
Avi Kivity 已提交
2507 2508 2509
		if (r)
			goto out;
		r = -EFAULT;
2510
		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
2511 2512 2513 2514 2515
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_FPU: {
2516 2517 2518
		fpu = memdup_user(argp, sizeof(*fpu));
		if (IS_ERR(fpu)) {
			r = PTR_ERR(fpu);
G
Guo Chao 已提交
2519
			fpu = NULL;
A
Avi Kivity 已提交
2520
			goto out;
2521
		}
2522
		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
A
Avi Kivity 已提交
2523 2524
		break;
	}
A
Avi Kivity 已提交
2525
	default:
2526
		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
2527 2528
	}
out:
2529
	vcpu_put(vcpu);
2530 2531
	kfree(fpu);
	kfree(kvm_sregs);
A
Avi Kivity 已提交
2532 2533 2534
	return r;
}

2535
#ifdef CONFIG_KVM_COMPAT
2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555
static long kvm_vcpu_compat_ioctl(struct file *filp,
				  unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = compat_ptr(arg);
	int r;

	if (vcpu->kvm->mm != current->mm)
		return -EIO;

	switch (ioctl) {
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		compat_sigset_t csigset;
		sigset_t sigset;

		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
2556
					   sizeof(kvm_sigmask)))
2557 2558
				goto out;
			r = -EINVAL;
2559
			if (kvm_sigmask.len != sizeof(csigset))
2560 2561 2562
				goto out;
			r = -EFAULT;
			if (copy_from_user(&csigset, sigmask_arg->sigset,
2563
					   sizeof(csigset)))
2564
				goto out;
2565 2566 2567 2568
			sigset_from_compat(&sigset, &csigset);
			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
		} else
			r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579
		break;
	}
	default:
		r = kvm_vcpu_ioctl(filp, ioctl, arg);
	}

out:
	return r;
}
#endif

S
Scott Wood 已提交
2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
static int kvm_device_ioctl_attr(struct kvm_device *dev,
				 int (*accessor)(struct kvm_device *dev,
						 struct kvm_device_attr *attr),
				 unsigned long arg)
{
	struct kvm_device_attr attr;

	if (!accessor)
		return -EPERM;

	if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
		return -EFAULT;

	return accessor(dev, &attr);
}

static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
			     unsigned long arg)
{
	struct kvm_device *dev = filp->private_data;

	switch (ioctl) {
	case KVM_SET_DEVICE_ATTR:
		return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
	case KVM_GET_DEVICE_ATTR:
		return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
	case KVM_HAS_DEVICE_ATTR:
		return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
	default:
		if (dev->ops->ioctl)
			return dev->ops->ioctl(dev, ioctl, arg);

		return -ENOTTY;
	}
}

static int kvm_device_release(struct inode *inode, struct file *filp)
{
	struct kvm_device *dev = filp->private_data;
	struct kvm *kvm = dev->kvm;

	kvm_put_kvm(kvm);
	return 0;
}

static const struct file_operations kvm_device_fops = {
	.unlocked_ioctl = kvm_device_ioctl,
2627
#ifdef CONFIG_KVM_COMPAT
2628 2629
	.compat_ioctl = kvm_device_ioctl,
#endif
S
Scott Wood 已提交
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
	.release = kvm_device_release,
};

struct kvm_device *kvm_device_from_filp(struct file *filp)
{
	if (filp->f_op != &kvm_device_fops)
		return NULL;

	return filp->private_data;
}

2641
static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
2642
#ifdef CONFIG_KVM_MPIC
2643 2644
	[KVM_DEV_TYPE_FSL_MPIC_20]	= &kvm_mpic_ops,
	[KVM_DEV_TYPE_FSL_MPIC_42]	= &kvm_mpic_ops,
2645
#endif
2646

2647
#ifdef CONFIG_KVM_XICS
2648
	[KVM_DEV_TYPE_XICS]		= &kvm_xics_ops,
A
Alex Williamson 已提交
2649
#endif
2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
};

int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
{
	if (type >= ARRAY_SIZE(kvm_device_ops_table))
		return -ENOSPC;

	if (kvm_device_ops_table[type] != NULL)
		return -EEXIST;

	kvm_device_ops_table[type] = ops;
	return 0;
}

2664 2665 2666 2667 2668 2669
void kvm_unregister_device_ops(u32 type)
{
	if (kvm_device_ops_table[type] != NULL)
		kvm_device_ops_table[type] = NULL;
}

S
Scott Wood 已提交
2670 2671 2672 2673 2674 2675 2676 2677
static int kvm_ioctl_create_device(struct kvm *kvm,
				   struct kvm_create_device *cd)
{
	struct kvm_device_ops *ops = NULL;
	struct kvm_device *dev;
	bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
	int ret;

2678 2679 2680 2681 2682
	if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
		return -ENODEV;

	ops = kvm_device_ops_table[cd->type];
	if (ops == NULL)
S
Scott Wood 已提交
2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700
		return -ENODEV;

	if (test)
		return 0;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;

	dev->ops = ops;
	dev->kvm = kvm;

	ret = ops->create(dev, cd->type);
	if (ret < 0) {
		kfree(dev);
		return ret;
	}

2701
	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
S
Scott Wood 已提交
2702 2703 2704 2705 2706
	if (ret < 0) {
		ops->destroy(dev);
		return ret;
	}

2707
	list_add(&dev->vm_node, &kvm->devices);
S
Scott Wood 已提交
2708 2709 2710 2711 2712
	kvm_get_kvm(kvm);
	cd->fd = ret;
	return 0;
}

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722
static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
{
	switch (arg) {
	case KVM_CAP_USER_MEMORY:
	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
	case KVM_CAP_INTERNAL_ERROR_DATA:
#ifdef CONFIG_HAVE_KVM_MSI
	case KVM_CAP_SIGNAL_MSI:
#endif
2723
#ifdef CONFIG_HAVE_KVM_IRQFD
2724
	case KVM_CAP_IRQFD:
2725 2726
	case KVM_CAP_IRQFD_RESAMPLE:
#endif
2727
	case KVM_CAP_IOEVENTFD_ANY_LENGTH:
2728 2729 2730 2731 2732
	case KVM_CAP_CHECK_EXTENSION_VM:
		return 1;
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
	case KVM_CAP_IRQ_ROUTING:
		return KVM_MAX_IRQ_ROUTES;
2733 2734 2735 2736
#endif
#if KVM_ADDRESS_SPACE_NUM > 1
	case KVM_CAP_MULTI_ADDRESS_SPACE:
		return KVM_ADDRESS_SPACE_NUM;
2737 2738 2739 2740 2741 2742 2743
#endif
	default:
		break;
	}
	return kvm_vm_ioctl_check_extension(kvm, arg);
}

A
Avi Kivity 已提交
2744 2745 2746 2747 2748
static long kvm_vm_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
2749
	int r;
A
Avi Kivity 已提交
2750

2751 2752
	if (kvm->mm != current->mm)
		return -EIO;
A
Avi Kivity 已提交
2753 2754 2755 2756
	switch (ioctl) {
	case KVM_CREATE_VCPU:
		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
		break;
2757 2758 2759 2760 2761
	case KVM_SET_USER_MEMORY_REGION: {
		struct kvm_userspace_memory_region kvm_userspace_mem;

		r = -EFAULT;
		if (copy_from_user(&kvm_userspace_mem, argp,
2762
						sizeof(kvm_userspace_mem)))
2763 2764
			goto out;

2765
		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
A
Avi Kivity 已提交
2766 2767 2768 2769 2770 2771
		break;
	}
	case KVM_GET_DIRTY_LOG: {
		struct kvm_dirty_log log;

		r = -EFAULT;
2772
		if (copy_from_user(&log, argp, sizeof(log)))
A
Avi Kivity 已提交
2773
			goto out;
2774
		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
A
Avi Kivity 已提交
2775 2776
		break;
	}
2777 2778 2779
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	case KVM_REGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
2780

2781
		r = -EFAULT;
2782
		if (copy_from_user(&zone, argp, sizeof(zone)))
2783 2784 2785 2786 2787 2788
			goto out;
		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
		break;
	}
	case KVM_UNREGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
2789

2790
		r = -EFAULT;
2791
		if (copy_from_user(&zone, argp, sizeof(zone)))
2792 2793 2794 2795 2796
			goto out;
		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
		break;
	}
#endif
G
Gregory Haskins 已提交
2797 2798 2799 2800
	case KVM_IRQFD: {
		struct kvm_irqfd data;

		r = -EFAULT;
2801
		if (copy_from_user(&data, argp, sizeof(data)))
G
Gregory Haskins 已提交
2802
			goto out;
2803
		r = kvm_irqfd(kvm, &data);
G
Gregory Haskins 已提交
2804 2805
		break;
	}
G
Gregory Haskins 已提交
2806 2807 2808 2809
	case KVM_IOEVENTFD: {
		struct kvm_ioeventfd data;

		r = -EFAULT;
2810
		if (copy_from_user(&data, argp, sizeof(data)))
G
Gregory Haskins 已提交
2811 2812 2813 2814
			goto out;
		r = kvm_ioeventfd(kvm, &data);
		break;
	}
2815 2816 2817 2818 2819
#ifdef CONFIG_HAVE_KVM_MSI
	case KVM_SIGNAL_MSI: {
		struct kvm_msi msi;

		r = -EFAULT;
2820
		if (copy_from_user(&msi, argp, sizeof(msi)))
2821 2822 2823 2824
			goto out;
		r = kvm_send_userspace_msi(kvm, &msi);
		break;
	}
2825 2826 2827 2828 2829 2830 2831
#endif
#ifdef __KVM_HAVE_IRQ_LINE
	case KVM_IRQ_LINE_STATUS:
	case KVM_IRQ_LINE: {
		struct kvm_irq_level irq_event;

		r = -EFAULT;
2832
		if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
2833 2834
			goto out;

2835 2836
		r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
					ioctl == KVM_IRQ_LINE_STATUS);
2837 2838 2839 2840 2841
		if (r)
			goto out;

		r = -EFAULT;
		if (ioctl == KVM_IRQ_LINE_STATUS) {
2842
			if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
2843 2844 2845 2846 2847 2848
				goto out;
		}

		r = 0;
		break;
	}
2849
#endif
2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
	case KVM_SET_GSI_ROUTING: {
		struct kvm_irq_routing routing;
		struct kvm_irq_routing __user *urouting;
		struct kvm_irq_routing_entry *entries;

		r = -EFAULT;
		if (copy_from_user(&routing, argp, sizeof(routing)))
			goto out;
		r = -EINVAL;
		if (routing.nr >= KVM_MAX_IRQ_ROUTES)
			goto out;
		if (routing.flags)
			goto out;
		r = -ENOMEM;
		entries = vmalloc(routing.nr * sizeof(*entries));
		if (!entries)
			goto out;
		r = -EFAULT;
		urouting = argp;
		if (copy_from_user(entries, urouting->entries,
				   routing.nr * sizeof(*entries)))
			goto out_free_irq_routing;
		r = kvm_set_irq_routing(kvm, entries, routing.nr,
					routing.flags);
2875
out_free_irq_routing:
2876 2877 2878 2879
		vfree(entries);
		break;
	}
#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
S
Scott Wood 已提交
2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
	case KVM_CREATE_DEVICE: {
		struct kvm_create_device cd;

		r = -EFAULT;
		if (copy_from_user(&cd, argp, sizeof(cd)))
			goto out;

		r = kvm_ioctl_create_device(kvm, &cd);
		if (r)
			goto out;

		r = -EFAULT;
		if (copy_to_user(argp, &cd, sizeof(cd)))
			goto out;

		r = 0;
		break;
	}
2898 2899 2900
	case KVM_CHECK_EXTENSION:
		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
		break;
2901
	default:
2902
		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2903 2904 2905 2906 2907
	}
out:
	return r;
}

2908
#ifdef CONFIG_KVM_COMPAT
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951
struct compat_kvm_dirty_log {
	__u32 slot;
	__u32 padding1;
	union {
		compat_uptr_t dirty_bitmap; /* one bit per page */
		__u64 padding2;
	};
};

static long kvm_vm_compat_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	int r;

	if (kvm->mm != current->mm)
		return -EIO;
	switch (ioctl) {
	case KVM_GET_DIRTY_LOG: {
		struct compat_kvm_dirty_log compat_log;
		struct kvm_dirty_log log;

		r = -EFAULT;
		if (copy_from_user(&compat_log, (void __user *)arg,
				   sizeof(compat_log)))
			goto out;
		log.slot	 = compat_log.slot;
		log.padding1	 = compat_log.padding1;
		log.padding2	 = compat_log.padding2;
		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);

		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
		break;
	}
	default:
		r = kvm_vm_ioctl(filp, ioctl, arg);
	}

out:
	return r;
}
#endif

2952
static struct file_operations kvm_vm_fops = {
2953 2954
	.release        = kvm_vm_release,
	.unlocked_ioctl = kvm_vm_ioctl,
2955
#ifdef CONFIG_KVM_COMPAT
2956 2957
	.compat_ioctl   = kvm_vm_compat_ioctl,
#endif
2958
	.llseek		= noop_llseek,
2959 2960
};

2961
static int kvm_dev_ioctl_create_vm(unsigned long type)
2962
{
2963
	int r;
2964 2965
	struct kvm *kvm;

2966
	kvm = kvm_create_vm(type);
2967 2968
	if (IS_ERR(kvm))
		return PTR_ERR(kvm);
2969 2970 2971 2972 2973 2974 2975
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	r = kvm_coalesced_mmio_init(kvm);
	if (r < 0) {
		kvm_put_kvm(kvm);
		return r;
	}
#endif
2976
	r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC);
2977
	if (r < 0)
A
Al Viro 已提交
2978
		kvm_put_kvm(kvm);
2979

2980
	return r;
2981 2982 2983 2984 2985
}

static long kvm_dev_ioctl(struct file *filp,
			  unsigned int ioctl, unsigned long arg)
{
2986
	long r = -EINVAL;
2987 2988 2989

	switch (ioctl) {
	case KVM_GET_API_VERSION:
2990 2991
		if (arg)
			goto out;
2992 2993 2994
		r = KVM_API_VERSION;
		break;
	case KVM_CREATE_VM:
2995
		r = kvm_dev_ioctl_create_vm(arg);
2996
		break;
2997
	case KVM_CHECK_EXTENSION:
2998
		r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
2999
		break;
3000 3001 3002
	case KVM_GET_VCPU_MMAP_SIZE:
		if (arg)
			goto out;
3003 3004 3005
		r = PAGE_SIZE;     /* struct kvm_run */
#ifdef CONFIG_X86
		r += PAGE_SIZE;    /* pio data page */
3006 3007 3008
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
		r += PAGE_SIZE;    /* coalesced mmio ring page */
3009
#endif
3010
		break;
3011 3012 3013
	case KVM_TRACE_ENABLE:
	case KVM_TRACE_PAUSE:
	case KVM_TRACE_DISABLE:
3014
		r = -EOPNOTSUPP;
3015
		break;
A
Avi Kivity 已提交
3016
	default:
3017
		return kvm_arch_dev_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
3018 3019 3020 3021 3022 3023 3024 3025
	}
out:
	return r;
}

static struct file_operations kvm_chardev_ops = {
	.unlocked_ioctl = kvm_dev_ioctl,
	.compat_ioctl   = kvm_dev_ioctl,
3026
	.llseek		= noop_llseek,
A
Avi Kivity 已提交
3027 3028 3029
};

static struct miscdevice kvm_dev = {
A
Avi Kivity 已提交
3030
	KVM_MINOR,
A
Avi Kivity 已提交
3031 3032 3033 3034
	"kvm",
	&kvm_chardev_ops,
};

3035
static void hardware_enable_nolock(void *junk)
3036 3037
{
	int cpu = raw_smp_processor_id();
3038
	int r;
3039

3040
	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
3041
		return;
3042

3043
	cpumask_set_cpu(cpu, cpus_hardware_enabled);
3044

3045
	r = kvm_arch_hardware_enable();
3046 3047 3048 3049

	if (r) {
		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
		atomic_inc(&hardware_enable_failed);
X
Xiubo Li 已提交
3050
		pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
3051
	}
3052 3053
}

3054
static void hardware_enable(void)
3055
{
3056
	raw_spin_lock(&kvm_count_lock);
3057 3058
	if (kvm_usage_count)
		hardware_enable_nolock(NULL);
3059
	raw_spin_unlock(&kvm_count_lock);
3060 3061 3062
}

static void hardware_disable_nolock(void *junk)
3063 3064 3065
{
	int cpu = raw_smp_processor_id();

3066
	if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
3067
		return;
3068
	cpumask_clear_cpu(cpu, cpus_hardware_enabled);
3069
	kvm_arch_hardware_disable();
3070 3071
}

3072
static void hardware_disable(void)
3073
{
3074
	raw_spin_lock(&kvm_count_lock);
3075 3076
	if (kvm_usage_count)
		hardware_disable_nolock(NULL);
3077
	raw_spin_unlock(&kvm_count_lock);
3078 3079
}

3080 3081 3082 3083 3084 3085
static void hardware_disable_all_nolock(void)
{
	BUG_ON(!kvm_usage_count);

	kvm_usage_count--;
	if (!kvm_usage_count)
3086
		on_each_cpu(hardware_disable_nolock, NULL, 1);
3087 3088 3089 3090
}

static void hardware_disable_all(void)
{
3091
	raw_spin_lock(&kvm_count_lock);
3092
	hardware_disable_all_nolock();
3093
	raw_spin_unlock(&kvm_count_lock);
3094 3095 3096 3097 3098 3099
}

static int hardware_enable_all(void)
{
	int r = 0;

3100
	raw_spin_lock(&kvm_count_lock);
3101 3102 3103 3104

	kvm_usage_count++;
	if (kvm_usage_count == 1) {
		atomic_set(&hardware_enable_failed, 0);
3105
		on_each_cpu(hardware_enable_nolock, NULL, 1);
3106 3107 3108 3109 3110 3111 3112

		if (atomic_read(&hardware_enable_failed)) {
			hardware_disable_all_nolock();
			r = -EBUSY;
		}
	}

3113
	raw_spin_unlock(&kvm_count_lock);
3114 3115 3116 3117

	return r;
}

A
Avi Kivity 已提交
3118 3119 3120
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
			   void *v)
{
3121
	val &= ~CPU_TASKS_FROZEN;
A
Avi Kivity 已提交
3122
	switch (val) {
3123
	case CPU_DYING:
3124
		hardware_disable();
3125
		break;
3126
	case CPU_STARTING:
3127
		hardware_enable();
A
Avi Kivity 已提交
3128 3129 3130 3131 3132
		break;
	}
	return NOTIFY_OK;
}

3133
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
M
Mike Day 已提交
3134
		      void *v)
3135
{
3136 3137 3138 3139 3140 3141
	/*
	 * Some (well, at least mine) BIOSes hang on reboot if
	 * in vmx root mode.
	 *
	 * And Intel TXT required VMX off for all cpu when system shutdown.
	 */
X
Xiubo Li 已提交
3142
	pr_info("kvm: exiting hardware virtualization\n");
3143
	kvm_rebooting = true;
3144
	on_each_cpu(hardware_disable_nolock, NULL, 1);
3145 3146 3147 3148 3149 3150 3151 3152
	return NOTIFY_OK;
}

static struct notifier_block kvm_reboot_notifier = {
	.notifier_call = kvm_reboot,
	.priority = 0,
};

M
Marcelo Tosatti 已提交
3153
static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3154 3155 3156 3157
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
3158
		struct kvm_io_device *pos = bus->range[i].dev;
3159 3160 3161

		kvm_iodevice_destructor(pos);
	}
M
Marcelo Tosatti 已提交
3162
	kfree(bus);
3163 3164
}

3165
static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
X
Xiubo Li 已提交
3166
				 const struct kvm_io_range *r2)
3167
{
J
Jason Wang 已提交
3168 3169 3170 3171
	gpa_t addr1 = r1->addr;
	gpa_t addr2 = r2->addr;

	if (addr1 < addr2)
3172
		return -1;
J
Jason Wang 已提交
3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184

	/* If r2->len == 0, match the exact address.  If r2->len != 0,
	 * accept any overlapping write.  Any order is acceptable for
	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
	 * we process all of them.
	 */
	if (r2->len) {
		addr1 += r1->len;
		addr2 += r2->len;
	}

	if (addr1 > addr2)
3185
		return 1;
J
Jason Wang 已提交
3186

3187 3188 3189
	return 0;
}

3190 3191
static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
{
3192
	return kvm_io_bus_cmp(p1, p2);
3193 3194
}

G
Geoff Levand 已提交
3195
static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209
			  gpa_t addr, int len)
{
	bus->range[bus->dev_count++] = (struct kvm_io_range) {
		.addr = addr,
		.len = len,
		.dev = dev,
	};

	sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
		kvm_io_bus_sort_cmp, NULL);

	return 0;
}

G
Geoff Levand 已提交
3210
static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227
			     gpa_t addr, int len)
{
	struct kvm_io_range *range, key;
	int off;

	key = (struct kvm_io_range) {
		.addr = addr,
		.len = len,
	};

	range = bsearch(&key, bus->range, bus->dev_count,
			sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
	if (range == NULL)
		return -ENOENT;

	off = range - bus->range;

3228
	while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
3229 3230 3231 3232 3233
		off--;

	return off;
}

3234
static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
C
Cornelia Huck 已提交
3235 3236 3237 3238 3239 3240 3241 3242 3243
			      struct kvm_io_range *range, const void *val)
{
	int idx;

	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
	if (idx < 0)
		return -EOPNOTSUPP;

	while (idx < bus->dev_count &&
3244
		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3245
		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
C
Cornelia Huck 已提交
3246 3247 3248 3249 3250 3251 3252 3253
					range->len, val))
			return idx;
		idx++;
	}

	return -EOPNOTSUPP;
}

3254
/* kvm_io_bus_write - called under kvm->slots_lock */
3255
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3256
		     int len, const void *val)
3257
{
3258
	struct kvm_io_bus *bus;
3259
	struct kvm_io_range range;
C
Cornelia Huck 已提交
3260
	int r;
3261 3262 3263 3264 3265

	range = (struct kvm_io_range) {
		.addr = addr,
		.len = len,
	};
3266

3267 3268
	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
	r = __kvm_io_bus_write(vcpu, bus, &range, val);
C
Cornelia Huck 已提交
3269 3270 3271 3272
	return r < 0 ? r : 0;
}

/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
3273 3274
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
			    gpa_t addr, int len, const void *val, long cookie)
C
Cornelia Huck 已提交
3275 3276 3277 3278 3279 3280 3281 3282 3283
{
	struct kvm_io_bus *bus;
	struct kvm_io_range range;

	range = (struct kvm_io_range) {
		.addr = addr,
		.len = len,
	};

3284
	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
C
Cornelia Huck 已提交
3285 3286 3287

	/* First try the device referenced by cookie. */
	if ((cookie >= 0) && (cookie < bus->dev_count) &&
3288
	    (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
3289
		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
C
Cornelia Huck 已提交
3290 3291 3292 3293 3294 3295 3296
					val))
			return cookie;

	/*
	 * cookie contained garbage; fall back to search and return the
	 * correct cookie value.
	 */
3297
	return __kvm_io_bus_write(vcpu, bus, &range, val);
C
Cornelia Huck 已提交
3298 3299
}

3300 3301
static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
			     struct kvm_io_range *range, void *val)
C
Cornelia Huck 已提交
3302 3303 3304 3305
{
	int idx;

	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
3306 3307 3308 3309
	if (idx < 0)
		return -EOPNOTSUPP;

	while (idx < bus->dev_count &&
3310
		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3311
		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
C
Cornelia Huck 已提交
3312 3313
				       range->len, val))
			return idx;
3314 3315 3316
		idx++;
	}

3317 3318
	return -EOPNOTSUPP;
}
3319
EXPORT_SYMBOL_GPL(kvm_io_bus_write);
3320

3321
/* kvm_io_bus_read - called under kvm->slots_lock */
3322
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
M
Marcelo Tosatti 已提交
3323
		    int len, void *val)
3324
{
3325
	struct kvm_io_bus *bus;
3326
	struct kvm_io_range range;
C
Cornelia Huck 已提交
3327
	int r;
3328 3329 3330 3331 3332

	range = (struct kvm_io_range) {
		.addr = addr,
		.len = len,
	};
M
Marcelo Tosatti 已提交
3333

3334 3335
	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
	r = __kvm_io_bus_read(vcpu, bus, &range, val);
C
Cornelia Huck 已提交
3336 3337
	return r < 0 ? r : 0;
}
3338

3339

3340
/* Caller must hold slots_lock. */
3341 3342
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
			    int len, struct kvm_io_device *dev)
3343
{
M
Marcelo Tosatti 已提交
3344
	struct kvm_io_bus *new_bus, *bus;
3345

M
Marcelo Tosatti 已提交
3346
	bus = kvm->buses[bus_idx];
3347 3348
	/* exclude ioeventfd which is limited by maximum fd */
	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3349
		return -ENOSPC;
3350

3351
	new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) *
3352
			  sizeof(struct kvm_io_range)), GFP_KERNEL);
M
Marcelo Tosatti 已提交
3353 3354
	if (!new_bus)
		return -ENOMEM;
3355 3356
	memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
	       sizeof(struct kvm_io_range)));
3357
	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
M
Marcelo Tosatti 已提交
3358 3359 3360
	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
	synchronize_srcu_expedited(&kvm->srcu);
	kfree(bus);
3361 3362 3363 3364

	return 0;
}

3365
/* Caller must hold slots_lock. */
M
Marcelo Tosatti 已提交
3366 3367
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			      struct kvm_io_device *dev)
3368
{
M
Marcelo Tosatti 已提交
3369 3370
	int i, r;
	struct kvm_io_bus *new_bus, *bus;
3371

3372
	bus = kvm->buses[bus_idx];
M
Marcelo Tosatti 已提交
3373
	r = -ENOENT;
3374 3375
	for (i = 0; i < bus->dev_count; i++)
		if (bus->range[i].dev == dev) {
M
Marcelo Tosatti 已提交
3376
			r = 0;
3377 3378
			break;
		}
M
Marcelo Tosatti 已提交
3379

3380
	if (r)
M
Marcelo Tosatti 已提交
3381
		return r;
3382

3383
	new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
3384 3385 3386 3387 3388 3389 3390 3391
			  sizeof(struct kvm_io_range)), GFP_KERNEL);
	if (!new_bus)
		return -ENOMEM;

	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
	new_bus->dev_count--;
	memcpy(new_bus->range + i, bus->range + i + 1,
	       (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
M
Marcelo Tosatti 已提交
3392 3393 3394 3395 3396

	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
	synchronize_srcu_expedited(&kvm->srcu);
	kfree(bus);
	return r;
3397 3398
}

A
Avi Kivity 已提交
3399 3400 3401 3402
static struct notifier_block kvm_cpu_notifier = {
	.notifier_call = kvm_cpu_hotplug,
};

3403
static int vm_stat_get(void *_offset, u64 *val)
3404 3405 3406 3407
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;

3408
	*val = 0;
3409
	spin_lock(&kvm_lock);
3410
	list_for_each_entry(kvm, &vm_list, vm_list)
3411
		*val += *(u32 *)((void *)kvm + offset);
3412
	spin_unlock(&kvm_lock);
3413
	return 0;
3414 3415 3416 3417
}

DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");

3418
static int vcpu_stat_get(void *_offset, u64 *val)
A
Avi Kivity 已提交
3419 3420 3421 3422 3423 3424
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;

3425
	*val = 0;
3426
	spin_lock(&kvm_lock);
A
Avi Kivity 已提交
3427
	list_for_each_entry(kvm, &vm_list, vm_list)
3428 3429 3430
		kvm_for_each_vcpu(i, vcpu, kvm)
			*val += *(u32 *)((void *)vcpu + offset);

3431
	spin_unlock(&kvm_lock);
3432
	return 0;
A
Avi Kivity 已提交
3433 3434
}

3435 3436
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");

3437
static const struct file_operations *stat_fops[] = {
3438 3439 3440
	[KVM_STAT_VCPU] = &vcpu_stat_fops,
	[KVM_STAT_VM]   = &vm_stat_fops,
};
A
Avi Kivity 已提交
3441

3442
static int kvm_init_debug(void)
A
Avi Kivity 已提交
3443
{
3444
	int r = -EEXIST;
A
Avi Kivity 已提交
3445 3446
	struct kvm_stats_debugfs_item *p;

3447
	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
3448 3449 3450 3451
	if (kvm_debugfs_dir == NULL)
		goto out;

	for (p = debugfs_entries; p->name; ++p) {
3452
		p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
A
Avi Kivity 已提交
3453
						(void *)(long)p->offset,
3454
						stat_fops[p->kind]);
3455 3456 3457 3458 3459 3460 3461 3462 3463 3464
		if (p->dentry == NULL)
			goto out_dir;
	}

	return 0;

out_dir:
	debugfs_remove_recursive(kvm_debugfs_dir);
out:
	return r;
A
Avi Kivity 已提交
3465 3466 3467 3468 3469 3470 3471 3472
}

static void kvm_exit_debug(void)
{
	struct kvm_stats_debugfs_item *p;

	for (p = debugfs_entries; p->name; ++p)
		debugfs_remove(p->dentry);
3473
	debugfs_remove(kvm_debugfs_dir);
A
Avi Kivity 已提交
3474 3475
}

3476
static int kvm_suspend(void)
3477
{
3478
	if (kvm_usage_count)
3479
		hardware_disable_nolock(NULL);
3480 3481 3482
	return 0;
}

3483
static void kvm_resume(void)
3484
{
3485
	if (kvm_usage_count) {
3486
		WARN_ON(raw_spin_is_locked(&kvm_count_lock));
3487
		hardware_enable_nolock(NULL);
3488
	}
3489 3490
}

3491
static struct syscore_ops kvm_syscore_ops = {
3492 3493 3494 3495
	.suspend = kvm_suspend,
	.resume = kvm_resume,
};

3496 3497 3498 3499 3500 3501 3502 3503 3504
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
	return container_of(pn, struct kvm_vcpu, preempt_notifier);
}

static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3505

3506 3507
	if (vcpu->preempted)
		vcpu->preempted = false;
3508

R
Radim Krčmář 已提交
3509 3510
	kvm_arch_sched_in(vcpu, cpu);

3511
	kvm_arch_vcpu_load(vcpu, cpu);
3512 3513 3514 3515 3516 3517 3518
}

static void kvm_sched_out(struct preempt_notifier *pn,
			  struct task_struct *next)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

3519 3520
	if (current->state == TASK_RUNNING)
		vcpu->preempted = true;
3521
	kvm_arch_vcpu_put(vcpu);
3522 3523
}

3524
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3525
		  struct module *module)
A
Avi Kivity 已提交
3526 3527
{
	int r;
Y
Yang, Sheng 已提交
3528
	int cpu;
A
Avi Kivity 已提交
3529

3530 3531
	r = kvm_arch_init(opaque);
	if (r)
3532
		goto out_fail;
3533

3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544
	/*
	 * kvm_arch_init makes sure there's at most one caller
	 * for architectures that support multiple implementations,
	 * like intel and amd on x86.
	 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
	 * conflicts in case kvm is already setup for another implementation.
	 */
	r = kvm_irqfd_init();
	if (r)
		goto out_irqfd;

3545
	if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
3546 3547 3548 3549
		r = -ENOMEM;
		goto out_free_0;
	}

3550
	r = kvm_arch_hardware_setup();
A
Avi Kivity 已提交
3551
	if (r < 0)
3552
		goto out_free_0a;
A
Avi Kivity 已提交
3553

Y
Yang, Sheng 已提交
3554 3555
	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu,
3556
				kvm_arch_check_processor_compat,
3557
				&r, 1);
Y
Yang, Sheng 已提交
3558
		if (r < 0)
3559
			goto out_free_1;
Y
Yang, Sheng 已提交
3560 3561
	}

A
Avi Kivity 已提交
3562 3563
	r = register_cpu_notifier(&kvm_cpu_notifier);
	if (r)
3564
		goto out_free_2;
A
Avi Kivity 已提交
3565 3566
	register_reboot_notifier(&kvm_reboot_notifier);

3567
	/* A kmem cache lets us meet the alignment requirements of fx_save. */
3568 3569 3570
	if (!vcpu_align)
		vcpu_align = __alignof__(struct kvm_vcpu);
	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
J
Joe Perches 已提交
3571
					   0, NULL);
3572 3573
	if (!kvm_vcpu_cache) {
		r = -ENOMEM;
3574
		goto out_free_3;
3575 3576
	}

3577 3578 3579 3580
	r = kvm_async_pf_init();
	if (r)
		goto out_free;

A
Avi Kivity 已提交
3581
	kvm_chardev_ops.owner = module;
3582 3583
	kvm_vm_fops.owner = module;
	kvm_vcpu_fops.owner = module;
A
Avi Kivity 已提交
3584 3585 3586

	r = misc_register(&kvm_dev);
	if (r) {
X
Xiubo Li 已提交
3587
		pr_err("kvm: misc device register failed\n");
3588
		goto out_unreg;
A
Avi Kivity 已提交
3589 3590
	}

3591 3592
	register_syscore_ops(&kvm_syscore_ops);

3593 3594 3595
	kvm_preempt_ops.sched_in = kvm_sched_in;
	kvm_preempt_ops.sched_out = kvm_sched_out;

3596 3597
	r = kvm_init_debug();
	if (r) {
X
Xiubo Li 已提交
3598
		pr_err("kvm: create debugfs files failed\n");
3599 3600
		goto out_undebugfs;
	}
3601

P
Paolo Bonzini 已提交
3602 3603 3604
	r = kvm_vfio_ops_init();
	WARN_ON(r);

3605
	return 0;
A
Avi Kivity 已提交
3606

3607 3608
out_undebugfs:
	unregister_syscore_ops(&kvm_syscore_ops);
3609
	misc_deregister(&kvm_dev);
3610 3611
out_unreg:
	kvm_async_pf_deinit();
A
Avi Kivity 已提交
3612
out_free:
3613
	kmem_cache_destroy(kvm_vcpu_cache);
3614
out_free_3:
A
Avi Kivity 已提交
3615
	unregister_reboot_notifier(&kvm_reboot_notifier);
A
Avi Kivity 已提交
3616
	unregister_cpu_notifier(&kvm_cpu_notifier);
3617 3618
out_free_2:
out_free_1:
3619
	kvm_arch_hardware_unsetup();
3620 3621
out_free_0a:
	free_cpumask_var(cpus_hardware_enabled);
3622
out_free_0:
3623 3624
	kvm_irqfd_exit();
out_irqfd:
3625 3626
	kvm_arch_exit();
out_fail:
A
Avi Kivity 已提交
3627 3628
	return r;
}
3629
EXPORT_SYMBOL_GPL(kvm_init);
A
Avi Kivity 已提交
3630

3631
void kvm_exit(void)
A
Avi Kivity 已提交
3632
{
3633
	kvm_exit_debug();
A
Avi Kivity 已提交
3634
	misc_deregister(&kvm_dev);
3635
	kmem_cache_destroy(kvm_vcpu_cache);
3636
	kvm_async_pf_deinit();
3637
	unregister_syscore_ops(&kvm_syscore_ops);
A
Avi Kivity 已提交
3638
	unregister_reboot_notifier(&kvm_reboot_notifier);
3639
	unregister_cpu_notifier(&kvm_cpu_notifier);
3640
	on_each_cpu(hardware_disable_nolock, NULL, 1);
3641
	kvm_arch_hardware_unsetup();
3642
	kvm_arch_exit();
3643
	kvm_irqfd_exit();
3644
	free_cpumask_var(cpus_hardware_enabled);
3645
	kvm_vfio_ops_exit();
A
Avi Kivity 已提交
3646
}
3647
EXPORT_SYMBOL_GPL(kvm_exit);