kvm_main.c 55.6 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
8
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
9 10 11 12 13 14 15 16 17 18
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

19
#include "iodev.h"
A
Avi Kivity 已提交
20

21
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
22 23 24 25 26 27 28 29 30 31 32
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
33
#include <linux/sysdev.h>
A
Avi Kivity 已提交
34
#include <linux/cpu.h>
A
Alexey Dobriyan 已提交
35
#include <linux/sched.h>
36 37
#include <linux/cpumask.h>
#include <linux/smp.h>
38
#include <linux/anon_inodes.h>
39
#include <linux/profile.h>
40
#include <linux/kvm_para.h>
41
#include <linux/pagemap.h>
42
#include <linux/mman.h>
43
#include <linux/swap.h>
44
#include <linux/bitops.h>
45
#include <linux/spinlock.h>
46
#include <linux/compat.h>
47
#include <linux/srcu.h>
J
Joerg Roedel 已提交
48
#include <linux/hugetlb.h>
49
#include <linux/slab.h>
A
Avi Kivity 已提交
50

A
Avi Kivity 已提交
51 52 53
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
54
#include <asm/pgtable.h>
55
#include <asm-generic/bitops/le.h>
A
Avi Kivity 已提交
56

57
#include "coalesced_mmio.h"
58
#include "async_pf.h"
59

60 61 62
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>

A
Avi Kivity 已提交
63 64 65
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

66 67 68
/*
 * Ordering of locks:
 *
69
 * 		kvm->lock --> kvm->slots_lock --> kvm->irq_lock
70 71
 */

72 73
DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
74

75
static cpumask_var_t cpus_hardware_enabled;
76 77
static int kvm_usage_count = 0;
static atomic_t hardware_enable_failed;
78

79 80
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
A
Avi Kivity 已提交
81

82 83
static __read_mostly struct preempt_ops kvm_preempt_ops;

84
struct dentry *kvm_debugfs_dir;
A
Avi Kivity 已提交
85

A
Avi Kivity 已提交
86 87
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);
88 89
static int hardware_enable_all(void);
static void hardware_disable_all(void);
A
Avi Kivity 已提交
90

M
Marcelo Tosatti 已提交
91 92
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);

H
Hannes Eder 已提交
93
static bool kvm_rebooting;
94

95 96
static bool largepages_enabled = true;

97 98
static struct page *hwpoison_page;
static pfn_t hwpoison_pfn;
99

100 101 102
static struct page *fault_page;
static pfn_t fault_pfn;

103
inline int kvm_is_mmio_pfn(pfn_t pfn)
B
Ben-Ami Yassour 已提交
104
{
105 106 107 108
	if (pfn_valid(pfn)) {
		struct page *page = compound_head(pfn_to_page(pfn));
		return PageReserved(page);
	}
B
Ben-Ami Yassour 已提交
109 110 111 112

	return true;
}

A
Avi Kivity 已提交
113 114 115
/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
116
void vcpu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
117
{
118 119
	int cpu;

A
Avi Kivity 已提交
120
	mutex_lock(&vcpu->mutex);
121 122
	cpu = get_cpu();
	preempt_notifier_register(&vcpu->preempt_notifier);
123
	kvm_arch_vcpu_load(vcpu, cpu);
124
	put_cpu();
A
Avi Kivity 已提交
125 126
}

127
void vcpu_put(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
128
{
129
	preempt_disable();
130
	kvm_arch_vcpu_put(vcpu);
131 132
	preempt_notifier_unregister(&vcpu->preempt_notifier);
	preempt_enable();
A
Avi Kivity 已提交
133 134 135
	mutex_unlock(&vcpu->mutex);
}

136 137 138 139
static void ack_flush(void *_completed)
{
}

140
static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
141
{
142
	int i, cpu, me;
143 144
	cpumask_var_t cpus;
	bool called = true;
145 146
	struct kvm_vcpu *vcpu;

147
	zalloc_cpumask_var(&cpus, GFP_ATOMIC);
148

149
	raw_spin_lock(&kvm->requests_lock);
150
	me = smp_processor_id();
151
	kvm_for_each_vcpu(i, vcpu, kvm) {
152
		if (kvm_make_check_request(req, vcpu))
153 154
			continue;
		cpu = vcpu->cpu;
155 156
		if (cpus != NULL && cpu != -1 && cpu != me)
			cpumask_set_cpu(cpu, cpus);
157
	}
158 159 160 161 162 163
	if (unlikely(cpus == NULL))
		smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
	else if (!cpumask_empty(cpus))
		smp_call_function_many(cpus, ack_flush, NULL, 1);
	else
		called = false;
164
	raw_spin_unlock(&kvm->requests_lock);
165
	free_cpumask_var(cpus);
166
	return called;
167 168
}

169
void kvm_flush_remote_tlbs(struct kvm *kvm)
170
{
171 172 173
	int dirty_count = kvm->tlbs_dirty;

	smp_mb();
174 175
	if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
		++kvm->stat.remote_tlb_flush;
176
	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
177 178
}

179 180 181 182
void kvm_reload_remote_mmus(struct kvm *kvm)
{
	make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
}
183

R
Rusty Russell 已提交
184 185 186 187 188 189 190 191 192
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
	struct page *page;
	int r;

	mutex_init(&vcpu->mutex);
	vcpu->cpu = -1;
	vcpu->kvm = kvm;
	vcpu->vcpu_id = id;
E
Eddie Dong 已提交
193
	init_waitqueue_head(&vcpu->wq);
194
	kvm_async_pf_vcpu_init(vcpu);
R
Rusty Russell 已提交
195 196 197 198 199 200 201 202

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
	vcpu->run = page_address(page);

203
	r = kvm_arch_vcpu_init(vcpu);
R
Rusty Russell 已提交
204
	if (r < 0)
205
		goto fail_free_run;
R
Rusty Russell 已提交
206 207 208 209 210
	return 0;

fail_free_run:
	free_page((unsigned long)vcpu->run);
fail:
211
	return r;
R
Rusty Russell 已提交
212 213 214 215 216
}
EXPORT_SYMBOL_GPL(kvm_vcpu_init);

void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
217
	kvm_arch_vcpu_uninit(vcpu);
R
Rusty Russell 已提交
218 219 220 221
	free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);

222 223 224 225 226 227 228 229 230 231 232
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
	return container_of(mn, struct kvm, mmu_notifier);
}

static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
233
	int need_tlb_flush, idx;
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252

	/*
	 * When ->invalidate_page runs, the linux pte has been zapped
	 * already but the page is still allocated until
	 * ->invalidate_page returns. So if we increase the sequence
	 * here the kvm page fault will notice if the spte can't be
	 * established because the page is going to be freed. If
	 * instead the kvm page fault establishes the spte before
	 * ->invalidate_page runs, kvm_unmap_hva will release it
	 * before returning.
	 *
	 * The sequence increase only need to be seen at spin_unlock
	 * time, and not at spin_lock time.
	 *
	 * Increasing the sequence after the spin_unlock would be
	 * unsafe because the kvm page fault could then establish the
	 * pte after kvm_unmap_hva returned, without noticing the page
	 * is going to be freed.
	 */
253
	idx = srcu_read_lock(&kvm->srcu);
254 255
	spin_lock(&kvm->mmu_lock);
	kvm->mmu_notifier_seq++;
256
	need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
257
	spin_unlock(&kvm->mmu_lock);
258
	srcu_read_unlock(&kvm->srcu, idx);
259 260 261 262 263 264 265

	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);

}

266 267 268 269 270 271
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
					struct mm_struct *mm,
					unsigned long address,
					pte_t pte)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
272
	int idx;
273

274
	idx = srcu_read_lock(&kvm->srcu);
275 276 277 278
	spin_lock(&kvm->mmu_lock);
	kvm->mmu_notifier_seq++;
	kvm_set_spte_hva(kvm, address, pte);
	spin_unlock(&kvm->mmu_lock);
279
	srcu_read_unlock(&kvm->srcu, idx);
280 281
}

282 283 284 285 286 287
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
						    struct mm_struct *mm,
						    unsigned long start,
						    unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
288
	int need_tlb_flush = 0, idx;
289

290
	idx = srcu_read_lock(&kvm->srcu);
291 292 293 294 295 296 297 298 299
	spin_lock(&kvm->mmu_lock);
	/*
	 * The count increase must become visible at unlock time as no
	 * spte can be established without taking the mmu_lock and
	 * count is also read inside the mmu_lock critical section.
	 */
	kvm->mmu_notifier_count++;
	for (; start < end; start += PAGE_SIZE)
		need_tlb_flush |= kvm_unmap_hva(kvm, start);
300
	need_tlb_flush |= kvm->tlbs_dirty;
301
	spin_unlock(&kvm->mmu_lock);
302
	srcu_read_unlock(&kvm->srcu, idx);
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339

	/* we've to flush the tlb before the pages can be freed */
	if (need_tlb_flush)
		kvm_flush_remote_tlbs(kvm);
}

static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
						  struct mm_struct *mm,
						  unsigned long start,
						  unsigned long end)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);

	spin_lock(&kvm->mmu_lock);
	/*
	 * This sequence increase will notify the kvm page fault that
	 * the page that is going to be mapped in the spte could have
	 * been freed.
	 */
	kvm->mmu_notifier_seq++;
	/*
	 * The above sequence increase must be visible before the
	 * below count decrease but both values are read by the kvm
	 * page fault under mmu_lock spinlock so we don't need to add
	 * a smb_wmb() here in between the two.
	 */
	kvm->mmu_notifier_count--;
	spin_unlock(&kvm->mmu_lock);

	BUG_ON(kvm->mmu_notifier_count < 0);
}

static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
					      struct mm_struct *mm,
					      unsigned long address)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
340
	int young, idx;
341

342
	idx = srcu_read_lock(&kvm->srcu);
343 344 345
	spin_lock(&kvm->mmu_lock);
	young = kvm_age_hva(kvm, address);
	spin_unlock(&kvm->mmu_lock);
346
	srcu_read_unlock(&kvm->srcu, idx);
347 348 349 350 351 352 353

	if (young)
		kvm_flush_remote_tlbs(kvm);

	return young;
}

354 355 356 357
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
				     struct mm_struct *mm)
{
	struct kvm *kvm = mmu_notifier_to_kvm(mn);
358 359 360
	int idx;

	idx = srcu_read_lock(&kvm->srcu);
361
	kvm_arch_flush_shadow(kvm);
362
	srcu_read_unlock(&kvm->srcu, idx);
363 364
}

365 366 367 368 369
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
	.invalidate_page	= kvm_mmu_notifier_invalidate_page,
	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
370
	.change_pte		= kvm_mmu_notifier_change_pte,
371
	.release		= kvm_mmu_notifier_release,
372
};
373 374 375 376 377 378 379 380 381 382 383 384 385 386

static int kvm_init_mmu_notifier(struct kvm *kvm)
{
	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
}

#else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */

static int kvm_init_mmu_notifier(struct kvm *kvm)
{
	return 0;
}

387 388
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

389
static struct kvm *kvm_create_vm(void)
A
Avi Kivity 已提交
390
{
391 392
	int r, i;
	struct kvm *kvm = kvm_arch_alloc_vm();
A
Avi Kivity 已提交
393

394 395 396 397 398 399
	if (!kvm)
		return ERR_PTR(-ENOMEM);

	r = kvm_arch_init_vm(kvm);
	if (r)
		goto out_err_nodisable;
400 401 402 403 404

	r = hardware_enable_all();
	if (r)
		goto out_err_nodisable;

405 406
#ifdef CONFIG_HAVE_KVM_IRQCHIP
	INIT_HLIST_HEAD(&kvm->mask_notifier_list);
407
	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
408
#endif
A
Avi Kivity 已提交
409

410 411 412
	r = -ENOMEM;
	kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
	if (!kvm->memslots)
413
		goto out_err_nosrcu;
414
	if (init_srcu_struct(&kvm->srcu))
415
		goto out_err_nosrcu;
M
Marcelo Tosatti 已提交
416 417 418
	for (i = 0; i < KVM_NR_BUSES; i++) {
		kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
					GFP_KERNEL);
419
		if (!kvm->buses[i])
M
Marcelo Tosatti 已提交
420 421
			goto out_err;
	}
422

423
	r = kvm_init_mmu_notifier(kvm);
424
	if (r)
425
		goto out_err;
426

427 428
	kvm->mm = current->mm;
	atomic_inc(&kvm->mm->mm_count);
429
	spin_lock_init(&kvm->mmu_lock);
430
	raw_spin_lock_init(&kvm->requests_lock);
G
Gregory Haskins 已提交
431
	kvm_eventfd_init(kvm);
S
Shaohua Li 已提交
432
	mutex_init(&kvm->lock);
433
	mutex_init(&kvm->irq_lock);
434
	mutex_init(&kvm->slots_lock);
I
Izik Eidus 已提交
435
	atomic_set(&kvm->users_count, 1);
436 437 438
	spin_lock(&kvm_lock);
	list_add(&kvm->vm_list, &vm_list);
	spin_unlock(&kvm_lock);
439

440
	return kvm;
441 442

out_err:
443 444
	cleanup_srcu_struct(&kvm->srcu);
out_err_nosrcu:
445 446
	hardware_disable_all();
out_err_nodisable:
M
Marcelo Tosatti 已提交
447 448
	for (i = 0; i < KVM_NR_BUSES; i++)
		kfree(kvm->buses[i]);
449
	kfree(kvm->memslots);
450
	kvm_arch_free_vm(kvm);
451
	return ERR_PTR(r);
452 453
}

454 455 456 457 458
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{
	if (!memslot->dirty_bitmap)
		return;

459 460 461 462 463
	if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
		vfree(memslot->dirty_bitmap_head);
	else
		kfree(memslot->dirty_bitmap_head);

464
	memslot->dirty_bitmap = NULL;
465
	memslot->dirty_bitmap_head = NULL;
466 467
}

A
Avi Kivity 已提交
468 469 470 471 472 473
/*
 * Free any memory in @free but not in @dont.
 */
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
				  struct kvm_memory_slot *dont)
{
474 475
	int i;

476 477
	if (!dont || free->rmap != dont->rmap)
		vfree(free->rmap);
A
Avi Kivity 已提交
478 479

	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
480
		kvm_destroy_dirty_bitmap(free);
A
Avi Kivity 已提交
481

482 483 484 485 486 487 488

	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
		if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
			vfree(free->lpage_info[i]);
			free->lpage_info[i] = NULL;
		}
	}
M
Marcelo Tosatti 已提交
489

A
Avi Kivity 已提交
490
	free->npages = 0;
491
	free->rmap = NULL;
A
Avi Kivity 已提交
492 493
}

494
void kvm_free_physmem(struct kvm *kvm)
A
Avi Kivity 已提交
495 496
{
	int i;
497 498 499 500
	struct kvm_memslots *slots = kvm->memslots;

	for (i = 0; i < slots->nmemslots; ++i)
		kvm_free_physmem_slot(&slots->memslots[i], NULL);
A
Avi Kivity 已提交
501

502
	kfree(kvm->memslots);
A
Avi Kivity 已提交
503 504
}

505 506
static void kvm_destroy_vm(struct kvm *kvm)
{
M
Marcelo Tosatti 已提交
507
	int i;
508 509
	struct mm_struct *mm = kvm->mm;

510
	kvm_arch_sync_events(kvm);
511 512 513
	spin_lock(&kvm_lock);
	list_del(&kvm->vm_list);
	spin_unlock(&kvm_lock);
514
	kvm_free_irq_routing(kvm);
M
Marcelo Tosatti 已提交
515 516
	for (i = 0; i < KVM_NR_BUSES; i++)
		kvm_io_bus_destroy(kvm->buses[i]);
517
	kvm_coalesced_mmio_free(kvm);
518 519
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
520 521
#else
	kvm_arch_flush_shadow(kvm);
522
#endif
523
	kvm_arch_destroy_vm(kvm);
524 525 526
	kvm_free_physmem(kvm);
	cleanup_srcu_struct(&kvm->srcu);
	kvm_arch_free_vm(kvm);
527
	hardware_disable_all();
528
	mmdrop(mm);
529 530
}

I
Izik Eidus 已提交
531 532 533 534 535 536 537 538 539 540 541 542 543 544
void kvm_get_kvm(struct kvm *kvm)
{
	atomic_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);

void kvm_put_kvm(struct kvm *kvm)
{
	if (atomic_dec_and_test(&kvm->users_count))
		kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);


545 546 547 548
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
	struct kvm *kvm = filp->private_data;

G
Gregory Haskins 已提交
549 550
	kvm_irqfd_release(kvm);

I
Izik Eidus 已提交
551
	kvm_put_kvm(kvm);
A
Avi Kivity 已提交
552 553 554
	return 0;
}

555 556 557 558 559
/*
 * Allocation size is twice as large as the actual dirty bitmap size.
 * This makes it possible to do double buffering: see x86's
 * kvm_vm_ioctl_get_dirty_log().
 */
560 561
static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
562
	unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
563

564 565 566 567 568
	if (dirty_bytes > PAGE_SIZE)
		memslot->dirty_bitmap = vzalloc(dirty_bytes);
	else
		memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);

569 570 571
	if (!memslot->dirty_bitmap)
		return -ENOMEM;

572
	memslot->dirty_bitmap_head = memslot->dirty_bitmap;
573 574 575
	return 0;
}

A
Avi Kivity 已提交
576 577 578 579 580
/*
 * Allocate some memory and give it an address in the guest physical address
 * space.
 *
 * Discontiguous memory is allowed, mostly for framebuffers.
581
 *
582
 * Must be called holding mmap_sem for write.
A
Avi Kivity 已提交
583
 */
584 585 586
int __kvm_set_memory_region(struct kvm *kvm,
			    struct kvm_userspace_memory_region *mem,
			    int user_alloc)
A
Avi Kivity 已提交
587
{
588
	int r, flush_shadow = 0;
A
Avi Kivity 已提交
589
	gfn_t base_gfn;
590 591
	unsigned long npages;
	unsigned long i;
A
Avi Kivity 已提交
592 593
	struct kvm_memory_slot *memslot;
	struct kvm_memory_slot old, new;
594
	struct kvm_memslots *slots, *old_memslots;
A
Avi Kivity 已提交
595 596 597 598 599 600 601

	r = -EINVAL;
	/* General sanity checks */
	if (mem->memory_size & (PAGE_SIZE - 1))
		goto out;
	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
		goto out;
S
Sheng Yang 已提交
602
	if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
603
		goto out;
604
	if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
A
Avi Kivity 已提交
605 606 607 608
		goto out;
	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
		goto out;

609
	memslot = &kvm->memslots->memslots[mem->slot];
A
Avi Kivity 已提交
610 611 612
	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
	npages = mem->memory_size >> PAGE_SHIFT;

613 614 615 616
	r = -EINVAL;
	if (npages > KVM_MEM_MAX_NR_PAGES)
		goto out;

A
Avi Kivity 已提交
617 618 619 620 621
	if (!npages)
		mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;

	new = old = *memslot;

622
	new.id = mem->slot;
A
Avi Kivity 已提交
623 624 625 626 627 628 629
	new.base_gfn = base_gfn;
	new.npages = npages;
	new.flags = mem->flags;

	/* Disallow changing a memory slot's size. */
	r = -EINVAL;
	if (npages && old.npages && npages != old.npages)
630
		goto out_free;
A
Avi Kivity 已提交
631 632 633 634

	/* Check for overlaps */
	r = -EEXIST;
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
635
		struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
A
Avi Kivity 已提交
636

637
		if (s == memslot || !s->npages)
A
Avi Kivity 已提交
638 639 640
			continue;
		if (!((base_gfn + npages <= s->base_gfn) ||
		      (base_gfn >= s->base_gfn + s->npages)))
641
			goto out_free;
A
Avi Kivity 已提交
642 643 644 645
	}

	/* Free page dirty bitmap if unneeded */
	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
A
Al Viro 已提交
646
		new.dirty_bitmap = NULL;
A
Avi Kivity 已提交
647 648 649 650

	r = -ENOMEM;

	/* Allocate if a slot is being created */
651
#ifndef CONFIG_S390
652
	if (npages && !new.rmap) {
653
		new.rmap = vzalloc(npages * sizeof(*new.rmap));
654 655

		if (!new.rmap)
656
			goto out_free;
657

658
		new.user_alloc = user_alloc;
659
		new.userspace_addr = mem->userspace_addr;
A
Avi Kivity 已提交
660
	}
661 662
	if (!npages)
		goto skip_lpage;
M
Marcelo Tosatti 已提交
663

664
	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
665 666 667
		unsigned long ugfn;
		unsigned long j;
		int lpages;
668
		int level = i + 2;
M
Marcelo Tosatti 已提交
669

670 671 672 673 674 675
		/* Avoid unused variable warning if no large pages */
		(void)level;

		if (new.lpage_info[i])
			continue;

676 677 678
		lpages = 1 + ((base_gfn + npages - 1)
			     >> KVM_HPAGE_GFN_SHIFT(level));
		lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
679

680
		new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
681 682

		if (!new.lpage_info[i])
M
Marcelo Tosatti 已提交
683 684
			goto out_free;

685
		if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
686
			new.lpage_info[i][0].write_count = 1;
687
		if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
688
			new.lpage_info[i][lpages - 1].write_count = 1;
689 690 691
		ugfn = new.userspace_addr >> PAGE_SHIFT;
		/*
		 * If the gfn and userspace address are not aligned wrt each
692 693
		 * other, or if explicitly asked to, disable large page
		 * support for this slot
694
		 */
695
		if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
696
		    !largepages_enabled)
697 698
			for (j = 0; j < lpages; ++j)
				new.lpage_info[i][j].write_count = 1;
M
Marcelo Tosatti 已提交
699
	}
A
Avi Kivity 已提交
700

701 702
skip_lpage:

A
Avi Kivity 已提交
703 704
	/* Allocate page dirty bitmap if needed */
	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
705
		if (kvm_create_dirty_bitmap(&new) < 0)
706
			goto out_free;
707
		/* destroy any largepage mappings for dirty tracking */
708
		if (old.npages)
709
			flush_shadow = 1;
A
Avi Kivity 已提交
710
	}
711 712 713 714
#else  /* not defined CONFIG_S390 */
	new.user_alloc = user_alloc;
	if (user_alloc)
		new.userspace_addr = mem->userspace_addr;
715
#endif /* not defined CONFIG_S390 */
A
Avi Kivity 已提交
716

717 718 719 720 721 722 723 724
	if (!npages) {
		r = -ENOMEM;
		slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
		if (!slots)
			goto out_free;
		memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
		if (mem->slot >= slots->nmemslots)
			slots->nmemslots = mem->slot + 1;
725
		slots->generation++;
726 727 728 729 730 731 732 733 734 735 736 737
		slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;

		old_memslots = kvm->memslots;
		rcu_assign_pointer(kvm->memslots, slots);
		synchronize_srcu_expedited(&kvm->srcu);
		/* From this point no new shadow pages pointing to a deleted
		 * memslot will be created.
		 *
		 * validation of sp->gfn happens in:
		 * 	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
		 * 	- kvm_is_visible_gfn (mmu_check_roots)
		 */
738
		kvm_arch_flush_shadow(kvm);
739 740
		kfree(old_memslots);
	}
741

742 743 744 745
	r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
	if (r)
		goto out_free;

746 747 748 749 750 751
	/* map the pages in iommu page table */
	if (npages) {
		r = kvm_iommu_map_pages(kvm, &new);
		if (r)
			goto out_free;
	}
752

753 754 755 756 757 758 759
	r = -ENOMEM;
	slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
	if (!slots)
		goto out_free;
	memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
	if (mem->slot >= slots->nmemslots)
		slots->nmemslots = mem->slot + 1;
760
	slots->generation++;
761 762 763 764 765 766 767 768 769 770 771 772 773

	/* actual memory is freed via old in kvm_free_physmem_slot below */
	if (!npages) {
		new.rmap = NULL;
		new.dirty_bitmap = NULL;
		for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
			new.lpage_info[i] = NULL;
	}

	slots->memslots[mem->slot] = new;
	old_memslots = kvm->memslots;
	rcu_assign_pointer(kvm->memslots, slots);
	synchronize_srcu_expedited(&kvm->srcu);
774

775
	kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
776

777 778 779 780 781 782
	kvm_free_physmem_slot(&old, &new);
	kfree(old_memslots);

	if (flush_shadow)
		kvm_arch_flush_shadow(kvm);

A
Avi Kivity 已提交
783 784
	return 0;

785
out_free:
A
Avi Kivity 已提交
786 787 788
	kvm_free_physmem_slot(&new, &old);
out:
	return r;
789 790

}
791 792 793 794 795 796 797 798
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);

int kvm_set_memory_region(struct kvm *kvm,
			  struct kvm_userspace_memory_region *mem,
			  int user_alloc)
{
	int r;

799
	mutex_lock(&kvm->slots_lock);
800
	r = __kvm_set_memory_region(kvm, mem, user_alloc);
801
	mutex_unlock(&kvm->slots_lock);
802 803
	return r;
}
804 805
EXPORT_SYMBOL_GPL(kvm_set_memory_region);

806 807 808 809
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
				   struct
				   kvm_userspace_memory_region *mem,
				   int user_alloc)
810
{
811 812
	if (mem->slot >= KVM_MEMORY_SLOTS)
		return -EINVAL;
813
	return kvm_set_memory_region(kvm, mem, user_alloc);
A
Avi Kivity 已提交
814 815
}

816 817
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty)
A
Avi Kivity 已提交
818 819 820
{
	struct kvm_memory_slot *memslot;
	int r, i;
821
	unsigned long n;
A
Avi Kivity 已提交
822 823 824 825 826 827
	unsigned long any = 0;

	r = -EINVAL;
	if (log->slot >= KVM_MEMORY_SLOTS)
		goto out;

828
	memslot = &kvm->memslots->memslots[log->slot];
A
Avi Kivity 已提交
829 830 831 832
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

833
	n = kvm_dirty_bitmap_bytes(memslot);
A
Avi Kivity 已提交
834

835
	for (i = 0; !any && i < n/sizeof(long); ++i)
A
Avi Kivity 已提交
836 837 838 839 840 841
		any = memslot->dirty_bitmap[i];

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
		goto out;

842 843
	if (any)
		*is_dirty = 1;
A
Avi Kivity 已提交
844 845 846 847 848 849

	r = 0;
out:
	return r;
}

850 851 852 853 854 855
void kvm_disable_largepages(void)
{
	largepages_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_largepages);

856 857
int is_error_page(struct page *page)
{
858
	return page == bad_page || page == hwpoison_page || page == fault_page;
859 860 861
}
EXPORT_SYMBOL_GPL(is_error_page);

862 863
int is_error_pfn(pfn_t pfn)
{
864
	return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
865 866 867
}
EXPORT_SYMBOL_GPL(is_error_pfn);

868 869 870 871 872 873
int is_hwpoison_pfn(pfn_t pfn)
{
	return pfn == hwpoison_pfn;
}
EXPORT_SYMBOL_GPL(is_hwpoison_pfn);

874 875 876 877 878 879
int is_fault_pfn(pfn_t pfn)
{
	return pfn == fault_pfn;
}
EXPORT_SYMBOL_GPL(is_fault_pfn);

I
Izik Eidus 已提交
880 881 882 883 884 885 886 887 888 889 890
static inline unsigned long bad_hva(void)
{
	return PAGE_OFFSET;
}

int kvm_is_error_hva(unsigned long addr)
{
	return addr == bad_hva();
}
EXPORT_SYMBOL_GPL(kvm_is_error_hva);

891 892
static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
						gfn_t gfn)
A
Avi Kivity 已提交
893 894 895
{
	int i;

896 897
	for (i = 0; i < slots->nmemslots; ++i) {
		struct kvm_memory_slot *memslot = &slots->memslots[i];
A
Avi Kivity 已提交
898 899 900 901 902

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return memslot;
	}
A
Al Viro 已提交
903
	return NULL;
A
Avi Kivity 已提交
904
}
905 906 907 908 909

struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
}
A
Avi Kivity 已提交
910
EXPORT_SYMBOL_GPL(gfn_to_memslot);
A
Avi Kivity 已提交
911

912 913 914
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
	int i;
915
	struct kvm_memslots *slots = kvm_memslots(kvm);
916 917

	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
918
		struct kvm_memory_slot *memslot = &slots->memslots[i];
919

920 921 922
		if (memslot->flags & KVM_MEMSLOT_INVALID)
			continue;

923 924 925 926 927 928 929 930
		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

J
Joerg Roedel 已提交
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
{
	struct vm_area_struct *vma;
	unsigned long addr, size;

	size = PAGE_SIZE;

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return PAGE_SIZE;

	down_read(&current->mm->mmap_sem);
	vma = find_vma(current->mm, addr);
	if (!vma)
		goto out;

	size = vma_kernel_pagesize(vma);

out:
	up_read(&current->mm->mmap_sem);

	return size;
}

955 956 957
int memslot_id(struct kvm *kvm, gfn_t gfn)
{
	int i;
958
	struct kvm_memslots *slots = kvm_memslots(kvm);
959 960 961 962 963 964 965 966 967 968 969 970 971
	struct kvm_memory_slot *memslot = NULL;

	for (i = 0; i < slots->nmemslots; ++i) {
		memslot = &slots->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			break;
	}

	return memslot - slots->memslots;
}

972
static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
973
				     gfn_t *nr_pages)
I
Izik Eidus 已提交
974
{
975
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
I
Izik Eidus 已提交
976
		return bad_hva();
977 978 979 980

	if (nr_pages)
		*nr_pages = slot->npages - (gfn - slot->base_gfn);

981
	return gfn_to_hva_memslot(slot, gfn);
I
Izik Eidus 已提交
982
}
983 984 985

unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
986
	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
987
}
988
EXPORT_SYMBOL_GPL(gfn_to_hva);
I
Izik Eidus 已提交
989

990 991 992 993 994 995
static pfn_t get_fault_pfn(void)
{
	get_page(fault_page);
	return fault_pfn;
}

996
static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
997
			bool *async, bool write_fault, bool *writable)
A
Avi Kivity 已提交
998
{
999
	struct page *page[1];
1000
	int npages = 0;
1001
	pfn_t pfn;
A
Avi Kivity 已提交
1002

1003 1004 1005
	/* we can do it either atomically or asynchronously, not both */
	BUG_ON(atomic && async);

1006 1007 1008 1009 1010
	BUG_ON(!write_fault && !writable);

	if (writable)
		*writable = true;

1011
	if (atomic || async)
1012
		npages = __get_user_pages_fast(addr, 1, 1, page);
1013 1014

	if (unlikely(npages != 1) && !atomic) {
1015
		might_sleep();
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033

		if (writable)
			*writable = write_fault;

		npages = get_user_pages_fast(addr, 1, write_fault, page);

		/* map read fault as writable if possible */
		if (unlikely(!write_fault) && npages == 1) {
			struct page *wpage[1];

			npages = __get_user_pages_fast(addr, 1, 1, wpage);
			if (npages == 1) {
				*writable = true;
				put_page(page[0]);
				page[0] = wpage[0];
			}
			npages = 1;
		}
1034
	}
I
Izik Eidus 已提交
1035

1036 1037 1038
	if (unlikely(npages != 1)) {
		struct vm_area_struct *vma;

1039
		if (atomic)
1040
			return get_fault_pfn();
1041

1042
		down_read(&current->mm->mmap_sem);
1043
		if (is_hwpoison_address(addr)) {
1044
			up_read(&current->mm->mmap_sem);
1045 1046 1047 1048
			get_page(hwpoison_page);
			return page_to_pfn(hwpoison_page);
		}

1049
		vma = find_vma_intersection(current->mm, addr, addr+1);
1050

1051 1052 1053 1054 1055 1056 1057 1058
		if (vma == NULL)
			pfn = get_fault_pfn();
		else if ((vma->vm_flags & VM_PFNMAP)) {
			pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
				vma->vm_pgoff;
			BUG_ON(!kvm_is_mmio_pfn(pfn));
		} else {
			if (async && (vma->vm_flags & VM_WRITE))
1059
				*async = true;
1060
			pfn = get_fault_pfn();
1061
		}
1062
		up_read(&current->mm->mmap_sem);
1063 1064
	} else
		pfn = page_to_pfn(page[0]);
1065

1066
	return pfn;
1067 1068
}

1069 1070
pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
{
1071
	return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
1072 1073 1074
}
EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);

1075 1076
static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
			  bool write_fault, bool *writable)
1077 1078 1079
{
	unsigned long addr;

1080 1081 1082
	if (async)
		*async = false;

1083 1084 1085 1086 1087 1088
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr)) {
		get_page(bad_page);
		return page_to_pfn(bad_page);
	}

1089
	return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
1090 1091 1092 1093
}

pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
{
1094
	return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
1095 1096 1097
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);

1098 1099
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
		       bool write_fault, bool *writable)
1100
{
1101
	return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
1102 1103 1104
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_async);

1105 1106
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
1107
	return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
1108
}
1109 1110
EXPORT_SYMBOL_GPL(gfn_to_pfn);

1111 1112 1113 1114 1115 1116 1117
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable)
{
	return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);

1118 1119 1120 1121
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
			 struct kvm_memory_slot *slot, gfn_t gfn)
{
	unsigned long addr = gfn_to_hva_memslot(slot, gfn);
1122
	return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
1123 1124
}

1125 1126 1127 1128 1129 1130
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
								  int nr_pages)
{
	unsigned long addr;
	gfn_t entry;

1131
	addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
	if (kvm_is_error_hva(addr))
		return -1;

	if (entry < nr_pages)
		return 0;

	return __get_user_pages_fast(addr, nr_pages, 1, pages);
}
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);

1142 1143
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
1144 1145 1146
	pfn_t pfn;

	pfn = gfn_to_pfn(kvm, gfn);
1147
	if (!kvm_is_mmio_pfn(pfn))
1148 1149
		return pfn_to_page(pfn);

1150
	WARN_ON(kvm_is_mmio_pfn(pfn));
1151 1152 1153

	get_page(bad_page);
	return bad_page;
A
Avi Kivity 已提交
1154
}
1155

A
Avi Kivity 已提交
1156 1157
EXPORT_SYMBOL_GPL(gfn_to_page);

1158 1159
void kvm_release_page_clean(struct page *page)
{
1160
	kvm_release_pfn_clean(page_to_pfn(page));
1161 1162 1163
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);

1164 1165
void kvm_release_pfn_clean(pfn_t pfn)
{
1166
	if (!kvm_is_mmio_pfn(pfn))
1167
		put_page(pfn_to_page(pfn));
1168 1169 1170
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);

1171
void kvm_release_page_dirty(struct page *page)
1172
{
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
	kvm_release_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);

void kvm_release_pfn_dirty(pfn_t pfn)
{
	kvm_set_pfn_dirty(pfn);
	kvm_release_pfn_clean(pfn);
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);

void kvm_set_page_dirty(struct page *page)
{
	kvm_set_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_set_page_dirty);

void kvm_set_pfn_dirty(pfn_t pfn)
{
1192
	if (!kvm_is_mmio_pfn(pfn)) {
1193 1194 1195 1196
		struct page *page = pfn_to_page(pfn);
		if (!PageReserved(page))
			SetPageDirty(page);
	}
1197
}
1198 1199 1200 1201
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);

void kvm_set_pfn_accessed(pfn_t pfn)
{
1202
	if (!kvm_is_mmio_pfn(pfn))
1203
		mark_page_accessed(pfn_to_page(pfn));
1204 1205 1206 1207 1208
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);

void kvm_get_pfn(pfn_t pfn)
{
1209
	if (!kvm_is_mmio_pfn(pfn))
1210
		get_page(pfn_to_page(pfn));
1211 1212
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
1213

1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
static int next_segment(unsigned long len, int offset)
{
	if (len > PAGE_SIZE - offset)
		return PAGE_SIZE - offset;
	else
		return len;
}

int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len)
{
1225 1226
	int r;
	unsigned long addr;
1227

1228 1229 1230 1231 1232
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_from_user(data, (void __user *)addr + offset, len);
	if (r)
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);

int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len)
{
	int r;
	unsigned long addr;
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int offset = offset_in_page(gpa);

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
1269
	pagefault_disable();
1270
	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1271
	pagefault_enable();
1272 1273 1274 1275 1276 1277
	if (r)
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL(kvm_read_guest_atomic);

1278 1279 1280
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len)
{
1281 1282
	int r;
	unsigned long addr;
1283

1284 1285 1286 1287 1288
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_to_user((void __user *)addr + offset, data, len);
	if (r)
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
		return -EFAULT;
	mark_page_dirty(kvm, gfn);
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);

int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}

1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			      gpa_t gpa)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	int offset = offset_in_page(gpa);
	gfn_t gfn = gpa >> PAGE_SHIFT;

	ghc->gpa = gpa;
	ghc->generation = slots->generation;
	ghc->memslot = __gfn_to_memslot(slots, gfn);
	ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
	if (!kvm_is_error_hva(ghc->hva))
		ghc->hva += offset;
	else
		return -EFAULT;

	return 0;
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);

int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	int r;

	if (slots->generation != ghc->generation)
		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);

	if (kvm_is_error_hva(ghc->hva))
		return -EFAULT;

	r = copy_to_user((void __user *)ghc->hva, data, len);
	if (r)
		return -EFAULT;
	mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);

	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);

1356 1357
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
1358 1359
	return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
				    offset, len);
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

        while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);

1382 1383
void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
			     gfn_t gfn)
A
Avi Kivity 已提交
1384
{
R
Rusty Russell 已提交
1385 1386
	if (memslot && memslot->dirty_bitmap) {
		unsigned long rel_gfn = gfn - memslot->base_gfn;
A
Avi Kivity 已提交
1387

1388
		generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
A
Avi Kivity 已提交
1389 1390 1391
	}
}

1392 1393 1394 1395 1396 1397 1398 1399
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
	struct kvm_memory_slot *memslot;

	memslot = gfn_to_memslot(kvm, gfn);
	mark_page_dirty_in_slot(kvm, memslot, gfn);
}

E
Eddie Dong 已提交
1400 1401 1402
/*
 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
 */
1403
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1404
{
1405 1406 1407 1408 1409
	DEFINE_WAIT(wait);

	for (;;) {
		prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);

1410
		if (kvm_arch_vcpu_runnable(vcpu)) {
1411
			kvm_make_request(KVM_REQ_UNHALT, vcpu);
1412
			break;
1413
		}
1414 1415
		if (kvm_cpu_has_pending_timer(vcpu))
			break;
1416 1417 1418
		if (signal_pending(current))
			break;

E
Eddie Dong 已提交
1419 1420
		schedule();
	}
1421

1422
	finish_wait(&vcpu->wq, &wait);
E
Eddie Dong 已提交
1423 1424
}

A
Avi Kivity 已提交
1425 1426
void kvm_resched(struct kvm_vcpu *vcpu)
{
1427 1428
	if (!need_resched())
		return;
A
Avi Kivity 已提交
1429 1430 1431 1432
	cond_resched();
}
EXPORT_SYMBOL_GPL(kvm_resched);

Z
Zhai, Edwin 已提交
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
{
	ktime_t expires;
	DEFINE_WAIT(wait);

	prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);

	/* Sleep for 100 us, and hope lock-holder got scheduled */
	expires = ktime_add_ns(ktime_get(), 100000UL);
	schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);

	finish_wait(&vcpu->wq, &wait);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);

1448
static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1449 1450 1451 1452
{
	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
	struct page *page;

1453
	if (vmf->pgoff == 0)
1454
		page = virt_to_page(vcpu->run);
A
Avi Kivity 已提交
1455
#ifdef CONFIG_X86
1456
	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1457
		page = virt_to_page(vcpu->arch.pio_data);
1458 1459 1460 1461
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
A
Avi Kivity 已提交
1462
#endif
1463
	else
1464
		return VM_FAULT_SIGBUS;
1465
	get_page(page);
1466 1467
	vmf->page = page;
	return 0;
1468 1469
}

1470
static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1471
	.fault = kvm_vcpu_fault,
1472 1473 1474 1475 1476 1477 1478 1479
};

static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vcpu_vm_ops;
	return 0;
}

A
Avi Kivity 已提交
1480 1481 1482 1483
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
	struct kvm_vcpu *vcpu = filp->private_data;

A
Al Viro 已提交
1484
	kvm_put_kvm(vcpu->kvm);
A
Avi Kivity 已提交
1485 1486 1487
	return 0;
}

1488
static struct file_operations kvm_vcpu_fops = {
A
Avi Kivity 已提交
1489 1490 1491
	.release        = kvm_vcpu_release,
	.unlocked_ioctl = kvm_vcpu_ioctl,
	.compat_ioctl   = kvm_vcpu_ioctl,
1492
	.mmap           = kvm_vcpu_mmap,
1493
	.llseek		= noop_llseek,
A
Avi Kivity 已提交
1494 1495 1496 1497 1498 1499 1500
};

/*
 * Allocates an inode for the vcpu.
 */
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
1501
	return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
A
Avi Kivity 已提交
1502 1503
}

1504 1505 1506
/*
 * Creates some virtual cpus.  Good luck creating more than one.
 */
1507
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1508 1509
{
	int r;
1510
	struct kvm_vcpu *vcpu, *v;
1511

1512
	vcpu = kvm_arch_vcpu_create(kvm, id);
R
Rusty Russell 已提交
1513 1514
	if (IS_ERR(vcpu))
		return PTR_ERR(vcpu);
1515

1516 1517
	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);

1518 1519
	r = kvm_arch_vcpu_setup(vcpu);
	if (r)
1520
		return r;
1521

S
Shaohua Li 已提交
1522
	mutex_lock(&kvm->lock);
1523 1524
	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
		r = -EINVAL;
1525
		goto vcpu_destroy;
R
Rusty Russell 已提交
1526
	}
1527

1528 1529
	kvm_for_each_vcpu(r, v, kvm)
		if (v->vcpu_id == id) {
1530 1531 1532 1533 1534
			r = -EEXIST;
			goto vcpu_destroy;
		}

	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1535

R
Rusty Russell 已提交
1536
	/* Now it's all set up, let userspace reach it */
A
Al Viro 已提交
1537
	kvm_get_kvm(kvm);
A
Avi Kivity 已提交
1538
	r = create_vcpu_fd(vcpu);
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
	if (r < 0) {
		kvm_put_kvm(kvm);
		goto vcpu_destroy;
	}

	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
	smp_wmb();
	atomic_inc(&kvm->online_vcpus);

#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	if (kvm->bsp_vcpu_id == id)
		kvm->bsp_vcpu = vcpu;
#endif
	mutex_unlock(&kvm->lock);
R
Rusty Russell 已提交
1553
	return r;
1554

1555
vcpu_destroy:
1556
	mutex_unlock(&kvm->lock);
1557
	kvm_arch_vcpu_destroy(vcpu);
1558 1559 1560
	return r;
}

A
Avi Kivity 已提交
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
	if (sigset) {
		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
		vcpu->sigset_active = 1;
		vcpu->sigset = *sigset;
	} else
		vcpu->sigset_active = 0;
	return 0;
}

A
Avi Kivity 已提交
1572 1573
static long kvm_vcpu_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
A
Avi Kivity 已提交
1574
{
A
Avi Kivity 已提交
1575
	struct kvm_vcpu *vcpu = filp->private_data;
A
Al Viro 已提交
1576
	void __user *argp = (void __user *)arg;
1577
	int r;
1578 1579
	struct kvm_fpu *fpu = NULL;
	struct kvm_sregs *kvm_sregs = NULL;
A
Avi Kivity 已提交
1580

1581 1582
	if (vcpu->kvm->mm != current->mm)
		return -EIO;
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594

#if defined(CONFIG_S390) || defined(CONFIG_PPC)
	/*
	 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
	 * so vcpu_load() would break it.
	 */
	if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
		return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
#endif


	vcpu_load(vcpu);
A
Avi Kivity 已提交
1595
	switch (ioctl) {
1596
	case KVM_RUN:
1597 1598 1599
		r = -EINVAL;
		if (arg)
			goto out;
1600
		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1601
		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
A
Avi Kivity 已提交
1602 1603
		break;
	case KVM_GET_REGS: {
1604
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
1605

1606 1607 1608
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
1609
			goto out;
1610 1611 1612
		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
		if (r)
			goto out_free1;
A
Avi Kivity 已提交
1613
		r = -EFAULT;
1614 1615
		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
			goto out_free1;
A
Avi Kivity 已提交
1616
		r = 0;
1617 1618
out_free1:
		kfree(kvm_regs);
A
Avi Kivity 已提交
1619 1620 1621
		break;
	}
	case KVM_SET_REGS: {
1622
		struct kvm_regs *kvm_regs;
A
Avi Kivity 已提交
1623

1624 1625 1626
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
A
Avi Kivity 已提交
1627
			goto out;
1628 1629 1630 1631
		r = -EFAULT;
		if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
			goto out_free2;
		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
A
Avi Kivity 已提交
1632
		if (r)
1633
			goto out_free2;
A
Avi Kivity 已提交
1634
		r = 0;
1635 1636
out_free2:
		kfree(kvm_regs);
A
Avi Kivity 已提交
1637 1638 1639
		break;
	}
	case KVM_GET_SREGS: {
1640 1641 1642 1643 1644
		kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
1645 1646 1647
		if (r)
			goto out;
		r = -EFAULT;
1648
		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
1649 1650 1651 1652 1653
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_SREGS: {
1654 1655 1656 1657
		kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
		r = -ENOMEM;
		if (!kvm_sregs)
			goto out;
A
Avi Kivity 已提交
1658
		r = -EFAULT;
1659
		if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
A
Avi Kivity 已提交
1660
			goto out;
1661
		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
A
Avi Kivity 已提交
1662 1663 1664 1665 1666
		if (r)
			goto out;
		r = 0;
		break;
	}
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
	case KVM_GET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &mp_state, sizeof mp_state))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = -EFAULT;
		if (copy_from_user(&mp_state, argp, sizeof mp_state))
			goto out;
		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1691 1692 1693 1694
	case KVM_TRANSLATE: {
		struct kvm_translation tr;

		r = -EFAULT;
A
Al Viro 已提交
1695
		if (copy_from_user(&tr, argp, sizeof tr))
A
Avi Kivity 已提交
1696
			goto out;
1697
		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
A
Avi Kivity 已提交
1698 1699 1700
		if (r)
			goto out;
		r = -EFAULT;
A
Al Viro 已提交
1701
		if (copy_to_user(argp, &tr, sizeof tr))
A
Avi Kivity 已提交
1702 1703 1704 1705
			goto out;
		r = 0;
		break;
	}
J
Jan Kiszka 已提交
1706 1707
	case KVM_SET_GUEST_DEBUG: {
		struct kvm_guest_debug dbg;
A
Avi Kivity 已提交
1708 1709

		r = -EFAULT;
A
Al Viro 已提交
1710
		if (copy_from_user(&dbg, argp, sizeof dbg))
A
Avi Kivity 已提交
1711
			goto out;
J
Jan Kiszka 已提交
1712
		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
A
Avi Kivity 已提交
1713 1714 1715 1716 1717
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		sigset_t sigset, *p;

		p = NULL;
		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
					   sizeof kvm_sigmask))
				goto out;
			r = -EINVAL;
			if (kvm_sigmask.len != sizeof sigset)
				goto out;
			r = -EFAULT;
			if (copy_from_user(&sigset, sigmask_arg->sigset,
					   sizeof sigset))
				goto out;
			p = &sigset;
		}
1738
		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
A
Avi Kivity 已提交
1739 1740
		break;
	}
A
Avi Kivity 已提交
1741
	case KVM_GET_FPU: {
1742 1743 1744 1745 1746
		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
A
Avi Kivity 已提交
1747 1748 1749
		if (r)
			goto out;
		r = -EFAULT;
1750
		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
1751 1752 1753 1754 1755
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_FPU: {
1756 1757 1758 1759
		fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
		r = -ENOMEM;
		if (!fpu)
			goto out;
A
Avi Kivity 已提交
1760
		r = -EFAULT;
1761
		if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
A
Avi Kivity 已提交
1762
			goto out;
1763
		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
A
Avi Kivity 已提交
1764 1765 1766 1767 1768
		if (r)
			goto out;
		r = 0;
		break;
	}
A
Avi Kivity 已提交
1769
	default:
1770
		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
1771 1772
	}
out:
1773
	vcpu_put(vcpu);
1774 1775
	kfree(fpu);
	kfree(kvm_sregs);
A
Avi Kivity 已提交
1776 1777 1778 1779 1780 1781 1782 1783
	return r;
}

static long kvm_vm_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
1784
	int r;
A
Avi Kivity 已提交
1785

1786 1787
	if (kvm->mm != current->mm)
		return -EIO;
A
Avi Kivity 已提交
1788 1789 1790 1791 1792 1793
	switch (ioctl) {
	case KVM_CREATE_VCPU:
		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
		if (r < 0)
			goto out;
		break;
1794 1795 1796 1797 1798 1799 1800 1801 1802
	case KVM_SET_USER_MEMORY_REGION: {
		struct kvm_userspace_memory_region kvm_userspace_mem;

		r = -EFAULT;
		if (copy_from_user(&kvm_userspace_mem, argp,
						sizeof kvm_userspace_mem))
			goto out;

		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
A
Avi Kivity 已提交
1803 1804 1805 1806 1807 1808 1809 1810
		if (r)
			goto out;
		break;
	}
	case KVM_GET_DIRTY_LOG: {
		struct kvm_dirty_log log;

		r = -EFAULT;
A
Al Viro 已提交
1811
		if (copy_from_user(&log, argp, sizeof log))
A
Avi Kivity 已提交
1812
			goto out;
1813
		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
A
Avi Kivity 已提交
1814 1815 1816 1817
		if (r)
			goto out;
		break;
	}
1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	case KVM_REGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
		r = -EFAULT;
		if (copy_from_user(&zone, argp, sizeof zone))
			goto out;
		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
		if (r)
			goto out;
		r = 0;
		break;
	}
	case KVM_UNREGISTER_COALESCED_MMIO: {
		struct kvm_coalesced_mmio_zone zone;
		r = -EFAULT;
		if (copy_from_user(&zone, argp, sizeof zone))
			goto out;
		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
		if (r)
			goto out;
		r = 0;
		break;
	}
#endif
G
Gregory Haskins 已提交
1842 1843 1844 1845 1846 1847 1848 1849 1850
	case KVM_IRQFD: {
		struct kvm_irqfd data;

		r = -EFAULT;
		if (copy_from_user(&data, argp, sizeof data))
			goto out;
		r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
		break;
	}
G
Gregory Haskins 已提交
1851 1852 1853 1854 1855 1856 1857 1858 1859
	case KVM_IOEVENTFD: {
		struct kvm_ioeventfd data;

		r = -EFAULT;
		if (copy_from_user(&data, argp, sizeof data))
			goto out;
		r = kvm_ioeventfd(kvm, &data);
		break;
	}
1860 1861 1862
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	case KVM_SET_BOOT_CPU_ID:
		r = 0;
1863
		mutex_lock(&kvm->lock);
1864 1865 1866 1867
		if (atomic_read(&kvm->online_vcpus) != 0)
			r = -EBUSY;
		else
			kvm->bsp_vcpu_id = arg;
1868
		mutex_unlock(&kvm->lock);
1869 1870
		break;
#endif
1871
	default:
1872
		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1873 1874
		if (r == -ENOTTY)
			r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
1875 1876 1877 1878 1879
	}
out:
	return r;
}

1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
#ifdef CONFIG_COMPAT
struct compat_kvm_dirty_log {
	__u32 slot;
	__u32 padding1;
	union {
		compat_uptr_t dirty_bitmap; /* one bit per page */
		__u64 padding2;
	};
};

static long kvm_vm_compat_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	int r;

	if (kvm->mm != current->mm)
		return -EIO;
	switch (ioctl) {
	case KVM_GET_DIRTY_LOG: {
		struct compat_kvm_dirty_log compat_log;
		struct kvm_dirty_log log;

		r = -EFAULT;
		if (copy_from_user(&compat_log, (void __user *)arg,
				   sizeof(compat_log)))
			goto out;
		log.slot	 = compat_log.slot;
		log.padding1	 = compat_log.padding1;
		log.padding2	 = compat_log.padding2;
		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);

		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
		if (r)
			goto out;
		break;
	}
	default:
		r = kvm_vm_ioctl(filp, ioctl, arg);
	}

out:
	return r;
}
#endif

1926
static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1927
{
1928 1929 1930 1931
	struct page *page[1];
	unsigned long addr;
	int npages;
	gfn_t gfn = vmf->pgoff;
1932 1933
	struct kvm *kvm = vma->vm_file->private_data;

1934 1935
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
1936
		return VM_FAULT_SIGBUS;
1937 1938 1939 1940

	npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
				NULL);
	if (unlikely(npages != 1))
1941
		return VM_FAULT_SIGBUS;
1942 1943

	vmf->page = page[0];
1944
	return 0;
1945 1946
}

1947
static const struct vm_operations_struct kvm_vm_vm_ops = {
1948
	.fault = kvm_vm_fault,
1949 1950 1951 1952 1953 1954 1955 1956
};

static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vm_vm_ops;
	return 0;
}

1957
static struct file_operations kvm_vm_fops = {
1958 1959
	.release        = kvm_vm_release,
	.unlocked_ioctl = kvm_vm_ioctl,
1960 1961 1962
#ifdef CONFIG_COMPAT
	.compat_ioctl   = kvm_vm_compat_ioctl,
#endif
1963
	.mmap           = kvm_vm_mmap,
1964
	.llseek		= noop_llseek,
1965 1966 1967 1968
};

static int kvm_dev_ioctl_create_vm(void)
{
1969
	int r;
1970 1971 1972
	struct kvm *kvm;

	kvm = kvm_create_vm();
1973 1974
	if (IS_ERR(kvm))
		return PTR_ERR(kvm);
1975 1976 1977 1978 1979 1980 1981
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	r = kvm_coalesced_mmio_init(kvm);
	if (r < 0) {
		kvm_put_kvm(kvm);
		return r;
	}
#endif
1982 1983
	r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
	if (r < 0)
A
Al Viro 已提交
1984
		kvm_put_kvm(kvm);
1985

1986
	return r;
1987 1988
}

1989 1990 1991
static long kvm_dev_ioctl_check_extension_generic(long arg)
{
	switch (arg) {
1992
	case KVM_CAP_USER_MEMORY:
1993
	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
1994
	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
1995 1996 1997
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	case KVM_CAP_SET_BOOT_CPU_ID:
#endif
1998
	case KVM_CAP_INTERNAL_ERROR_DATA:
1999
		return 1;
2000 2001
#ifdef CONFIG_HAVE_KVM_IRQCHIP
	case KVM_CAP_IRQ_ROUTING:
2002
		return KVM_MAX_IRQ_ROUTES;
2003
#endif
2004 2005 2006 2007 2008 2009
	default:
		break;
	}
	return kvm_dev_ioctl_check_extension(arg);
}

2010 2011 2012
static long kvm_dev_ioctl(struct file *filp,
			  unsigned int ioctl, unsigned long arg)
{
2013
	long r = -EINVAL;
2014 2015 2016

	switch (ioctl) {
	case KVM_GET_API_VERSION:
2017 2018 2019
		r = -EINVAL;
		if (arg)
			goto out;
2020 2021 2022
		r = KVM_API_VERSION;
		break;
	case KVM_CREATE_VM:
2023 2024 2025
		r = -EINVAL;
		if (arg)
			goto out;
2026 2027
		r = kvm_dev_ioctl_create_vm();
		break;
2028
	case KVM_CHECK_EXTENSION:
2029
		r = kvm_dev_ioctl_check_extension_generic(arg);
2030
		break;
2031 2032 2033 2034
	case KVM_GET_VCPU_MMAP_SIZE:
		r = -EINVAL;
		if (arg)
			goto out;
2035 2036 2037
		r = PAGE_SIZE;     /* struct kvm_run */
#ifdef CONFIG_X86
		r += PAGE_SIZE;    /* pio data page */
2038 2039 2040
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
		r += PAGE_SIZE;    /* coalesced mmio ring page */
2041
#endif
2042
		break;
2043 2044 2045
	case KVM_TRACE_ENABLE:
	case KVM_TRACE_PAUSE:
	case KVM_TRACE_DISABLE:
2046
		r = -EOPNOTSUPP;
2047
		break;
A
Avi Kivity 已提交
2048
	default:
2049
		return kvm_arch_dev_ioctl(filp, ioctl, arg);
A
Avi Kivity 已提交
2050 2051 2052 2053 2054 2055 2056 2057
	}
out:
	return r;
}

static struct file_operations kvm_chardev_ops = {
	.unlocked_ioctl = kvm_dev_ioctl,
	.compat_ioctl   = kvm_dev_ioctl,
2058
	.llseek		= noop_llseek,
A
Avi Kivity 已提交
2059 2060 2061
};

static struct miscdevice kvm_dev = {
A
Avi Kivity 已提交
2062
	KVM_MINOR,
A
Avi Kivity 已提交
2063 2064 2065 2066
	"kvm",
	&kvm_chardev_ops,
};

2067
static void hardware_enable_nolock(void *junk)
2068 2069
{
	int cpu = raw_smp_processor_id();
2070
	int r;
2071

2072
	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2073
		return;
2074

2075
	cpumask_set_cpu(cpu, cpus_hardware_enabled);
2076 2077 2078 2079 2080 2081 2082 2083 2084

	r = kvm_arch_hardware_enable(NULL);

	if (r) {
		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
		atomic_inc(&hardware_enable_failed);
		printk(KERN_INFO "kvm: enabling virtualization on "
				 "CPU%d failed\n", cpu);
	}
2085 2086
}

2087 2088 2089 2090 2091 2092 2093 2094
static void hardware_enable(void *junk)
{
	spin_lock(&kvm_lock);
	hardware_enable_nolock(junk);
	spin_unlock(&kvm_lock);
}

static void hardware_disable_nolock(void *junk)
2095 2096 2097
{
	int cpu = raw_smp_processor_id();

2098
	if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2099
		return;
2100
	cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2101
	kvm_arch_hardware_disable(NULL);
2102 2103
}

2104 2105 2106 2107 2108 2109 2110
static void hardware_disable(void *junk)
{
	spin_lock(&kvm_lock);
	hardware_disable_nolock(junk);
	spin_unlock(&kvm_lock);
}

2111 2112 2113 2114 2115 2116
static void hardware_disable_all_nolock(void)
{
	BUG_ON(!kvm_usage_count);

	kvm_usage_count--;
	if (!kvm_usage_count)
2117
		on_each_cpu(hardware_disable_nolock, NULL, 1);
2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
}

static void hardware_disable_all(void)
{
	spin_lock(&kvm_lock);
	hardware_disable_all_nolock();
	spin_unlock(&kvm_lock);
}

static int hardware_enable_all(void)
{
	int r = 0;

	spin_lock(&kvm_lock);

	kvm_usage_count++;
	if (kvm_usage_count == 1) {
		atomic_set(&hardware_enable_failed, 0);
2136
		on_each_cpu(hardware_enable_nolock, NULL, 1);
2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148

		if (atomic_read(&hardware_enable_failed)) {
			hardware_disable_all_nolock();
			r = -EBUSY;
		}
	}

	spin_unlock(&kvm_lock);

	return r;
}

A
Avi Kivity 已提交
2149 2150 2151 2152 2153
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
			   void *v)
{
	int cpu = (long)v;

2154 2155 2156
	if (!kvm_usage_count)
		return NOTIFY_OK;

2157
	val &= ~CPU_TASKS_FROZEN;
A
Avi Kivity 已提交
2158
	switch (val) {
2159
	case CPU_DYING:
2160 2161 2162 2163
		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
		       cpu);
		hardware_disable(NULL);
		break;
2164
	case CPU_STARTING:
2165 2166
		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
		       cpu);
2167
		hardware_enable(NULL);
A
Avi Kivity 已提交
2168 2169 2170 2171 2172
		break;
	}
	return NOTIFY_OK;
}

2173 2174 2175

asmlinkage void kvm_handle_fault_on_reboot(void)
{
A
Avi Kivity 已提交
2176
	if (kvm_rebooting) {
2177
		/* spin while reset goes on */
A
Avi Kivity 已提交
2178
		local_irq_enable();
2179
		while (true)
2180
			cpu_relax();
A
Avi Kivity 已提交
2181
	}
2182 2183 2184 2185 2186
	/* Fault while not rebooting.  We want the trace. */
	BUG();
}
EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);

2187
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
M
Mike Day 已提交
2188
		      void *v)
2189
{
2190 2191 2192 2193 2194 2195 2196 2197
	/*
	 * Some (well, at least mine) BIOSes hang on reboot if
	 * in vmx root mode.
	 *
	 * And Intel TXT required VMX off for all cpu when system shutdown.
	 */
	printk(KERN_INFO "kvm: exiting hardware virtualization\n");
	kvm_rebooting = true;
2198
	on_each_cpu(hardware_disable_nolock, NULL, 1);
2199 2200 2201 2202 2203 2204 2205 2206
	return NOTIFY_OK;
}

static struct notifier_block kvm_reboot_notifier = {
	.notifier_call = kvm_reboot,
	.priority = 0,
};

M
Marcelo Tosatti 已提交
2207
static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2208 2209 2210 2211 2212 2213 2214 2215
{
	int i;

	for (i = 0; i < bus->dev_count; i++) {
		struct kvm_io_device *pos = bus->devs[i];

		kvm_iodevice_destructor(pos);
	}
M
Marcelo Tosatti 已提交
2216
	kfree(bus);
2217 2218
}

2219
/* kvm_io_bus_write - called under kvm->slots_lock */
M
Marcelo Tosatti 已提交
2220
int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2221
		     int len, const void *val)
2222 2223
{
	int i;
2224 2225 2226
	struct kvm_io_bus *bus;

	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2227 2228 2229 2230 2231
	for (i = 0; i < bus->dev_count; i++)
		if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
			return 0;
	return -EOPNOTSUPP;
}
2232

2233
/* kvm_io_bus_read - called under kvm->slots_lock */
M
Marcelo Tosatti 已提交
2234 2235
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
		    int len, void *val)
2236 2237
{
	int i;
2238
	struct kvm_io_bus *bus;
M
Marcelo Tosatti 已提交
2239

2240
	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2241 2242 2243 2244
	for (i = 0; i < bus->dev_count; i++)
		if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
			return 0;
	return -EOPNOTSUPP;
2245 2246
}

2247
/* Caller must hold slots_lock. */
M
Marcelo Tosatti 已提交
2248 2249
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			    struct kvm_io_device *dev)
2250
{
M
Marcelo Tosatti 已提交
2251
	struct kvm_io_bus *new_bus, *bus;
2252

M
Marcelo Tosatti 已提交
2253
	bus = kvm->buses[bus_idx];
2254 2255
	if (bus->dev_count > NR_IOBUS_DEVS-1)
		return -ENOSPC;
2256

M
Marcelo Tosatti 已提交
2257 2258 2259 2260 2261 2262 2263 2264
	new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
	if (!new_bus)
		return -ENOMEM;
	memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
	new_bus->devs[new_bus->dev_count++] = dev;
	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
	synchronize_srcu_expedited(&kvm->srcu);
	kfree(bus);
2265 2266 2267 2268

	return 0;
}

2269
/* Caller must hold slots_lock. */
M
Marcelo Tosatti 已提交
2270 2271
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			      struct kvm_io_device *dev)
2272
{
M
Marcelo Tosatti 已提交
2273 2274
	int i, r;
	struct kvm_io_bus *new_bus, *bus;
2275

M
Marcelo Tosatti 已提交
2276 2277 2278
	new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
	if (!new_bus)
		return -ENOMEM;
2279

M
Marcelo Tosatti 已提交
2280 2281 2282 2283 2284 2285 2286 2287
	bus = kvm->buses[bus_idx];
	memcpy(new_bus, bus, sizeof(struct kvm_io_bus));

	r = -ENOENT;
	for (i = 0; i < new_bus->dev_count; i++)
		if (new_bus->devs[i] == dev) {
			r = 0;
			new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
2288 2289
			break;
		}
M
Marcelo Tosatti 已提交
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299

	if (r) {
		kfree(new_bus);
		return r;
	}

	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
	synchronize_srcu_expedited(&kvm->srcu);
	kfree(bus);
	return r;
2300 2301
}

A
Avi Kivity 已提交
2302 2303 2304 2305
static struct notifier_block kvm_cpu_notifier = {
	.notifier_call = kvm_cpu_hotplug,
};

2306
static int vm_stat_get(void *_offset, u64 *val)
2307 2308 2309 2310
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;

2311
	*val = 0;
2312 2313
	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
2314
		*val += *(u32 *)((void *)kvm + offset);
2315
	spin_unlock(&kvm_lock);
2316
	return 0;
2317 2318 2319 2320
}

DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");

2321
static int vcpu_stat_get(void *_offset, u64 *val)
A
Avi Kivity 已提交
2322 2323 2324 2325 2326 2327
{
	unsigned offset = (long)_offset;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;

2328
	*val = 0;
A
Avi Kivity 已提交
2329 2330
	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
2331 2332 2333
		kvm_for_each_vcpu(i, vcpu, kvm)
			*val += *(u32 *)((void *)vcpu + offset);

A
Avi Kivity 已提交
2334
	spin_unlock(&kvm_lock);
2335
	return 0;
A
Avi Kivity 已提交
2336 2337
}

2338 2339
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");

2340
static const struct file_operations *stat_fops[] = {
2341 2342 2343
	[KVM_STAT_VCPU] = &vcpu_stat_fops,
	[KVM_STAT_VM]   = &vm_stat_fops,
};
A
Avi Kivity 已提交
2344

2345
static void kvm_init_debug(void)
A
Avi Kivity 已提交
2346 2347 2348
{
	struct kvm_stats_debugfs_item *p;

2349
	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
A
Avi Kivity 已提交
2350
	for (p = debugfs_entries; p->name; ++p)
2351
		p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
A
Avi Kivity 已提交
2352
						(void *)(long)p->offset,
2353
						stat_fops[p->kind]);
A
Avi Kivity 已提交
2354 2355 2356 2357 2358 2359 2360 2361
}

static void kvm_exit_debug(void)
{
	struct kvm_stats_debugfs_item *p;

	for (p = debugfs_entries; p->name; ++p)
		debugfs_remove(p->dentry);
2362
	debugfs_remove(kvm_debugfs_dir);
A
Avi Kivity 已提交
2363 2364
}

2365 2366
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
2367
	if (kvm_usage_count)
2368
		hardware_disable_nolock(NULL);
2369 2370 2371 2372 2373
	return 0;
}

static int kvm_resume(struct sys_device *dev)
{
2374 2375
	if (kvm_usage_count) {
		WARN_ON(spin_is_locked(&kvm_lock));
2376
		hardware_enable_nolock(NULL);
2377
	}
2378 2379 2380 2381
	return 0;
}

static struct sysdev_class kvm_sysdev_class = {
2382
	.name = "kvm",
2383 2384 2385 2386 2387 2388 2389 2390 2391
	.suspend = kvm_suspend,
	.resume = kvm_resume,
};

static struct sys_device kvm_sysdev = {
	.id = 0,
	.cls = &kvm_sysdev_class,
};

2392
struct page *bad_page;
2393
pfn_t bad_pfn;
A
Avi Kivity 已提交
2394

2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
	return container_of(pn, struct kvm_vcpu, preempt_notifier);
}

static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

2405
	kvm_arch_vcpu_load(vcpu, cpu);
2406 2407 2408 2409 2410 2411 2412
}

static void kvm_sched_out(struct preempt_notifier *pn,
			  struct task_struct *next)
{
	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

2413
	kvm_arch_vcpu_put(vcpu);
2414 2415
}

2416
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2417
		  struct module *module)
A
Avi Kivity 已提交
2418 2419
{
	int r;
Y
Yang, Sheng 已提交
2420
	int cpu;
A
Avi Kivity 已提交
2421

2422 2423
	r = kvm_arch_init(opaque);
	if (r)
2424
		goto out_fail;
2425 2426 2427 2428 2429 2430 2431 2432

	bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);

	if (bad_page == NULL) {
		r = -ENOMEM;
		goto out;
	}

2433 2434
	bad_pfn = page_to_pfn(bad_page);

2435 2436 2437 2438 2439 2440 2441 2442 2443
	hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);

	if (hwpoison_page == NULL) {
		r = -ENOMEM;
		goto out_free_0;
	}

	hwpoison_pfn = page_to_pfn(hwpoison_page);

2444 2445 2446 2447 2448 2449 2450 2451 2452
	fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);

	if (fault_page == NULL) {
		r = -ENOMEM;
		goto out_free_0;
	}

	fault_pfn = page_to_pfn(fault_page);

2453
	if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2454 2455 2456 2457
		r = -ENOMEM;
		goto out_free_0;
	}

2458
	r = kvm_arch_hardware_setup();
A
Avi Kivity 已提交
2459
	if (r < 0)
2460
		goto out_free_0a;
A
Avi Kivity 已提交
2461

Y
Yang, Sheng 已提交
2462 2463
	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu,
2464
				kvm_arch_check_processor_compat,
2465
				&r, 1);
Y
Yang, Sheng 已提交
2466
		if (r < 0)
2467
			goto out_free_1;
Y
Yang, Sheng 已提交
2468 2469
	}

A
Avi Kivity 已提交
2470 2471
	r = register_cpu_notifier(&kvm_cpu_notifier);
	if (r)
2472
		goto out_free_2;
A
Avi Kivity 已提交
2473 2474
	register_reboot_notifier(&kvm_reboot_notifier);

2475 2476
	r = sysdev_class_register(&kvm_sysdev_class);
	if (r)
2477
		goto out_free_3;
2478 2479 2480

	r = sysdev_register(&kvm_sysdev);
	if (r)
2481
		goto out_free_4;
2482

2483
	/* A kmem cache lets us meet the alignment requirements of fx_save. */
2484 2485 2486
	if (!vcpu_align)
		vcpu_align = __alignof__(struct kvm_vcpu);
	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
J
Joe Perches 已提交
2487
					   0, NULL);
2488 2489
	if (!kvm_vcpu_cache) {
		r = -ENOMEM;
2490
		goto out_free_5;
2491 2492
	}

2493 2494 2495 2496
	r = kvm_async_pf_init();
	if (r)
		goto out_free;

A
Avi Kivity 已提交
2497
	kvm_chardev_ops.owner = module;
2498 2499
	kvm_vm_fops.owner = module;
	kvm_vcpu_fops.owner = module;
A
Avi Kivity 已提交
2500 2501 2502

	r = misc_register(&kvm_dev);
	if (r) {
M
Mike Day 已提交
2503
		printk(KERN_ERR "kvm: misc device register failed\n");
2504
		goto out_unreg;
A
Avi Kivity 已提交
2505 2506
	}

2507 2508 2509
	kvm_preempt_ops.sched_in = kvm_sched_in;
	kvm_preempt_ops.sched_out = kvm_sched_out;

2510 2511
	kvm_init_debug();

2512
	return 0;
A
Avi Kivity 已提交
2513

2514 2515
out_unreg:
	kvm_async_pf_deinit();
A
Avi Kivity 已提交
2516
out_free:
2517
	kmem_cache_destroy(kvm_vcpu_cache);
2518
out_free_5:
2519
	sysdev_unregister(&kvm_sysdev);
2520
out_free_4:
2521
	sysdev_class_unregister(&kvm_sysdev_class);
2522
out_free_3:
A
Avi Kivity 已提交
2523
	unregister_reboot_notifier(&kvm_reboot_notifier);
A
Avi Kivity 已提交
2524
	unregister_cpu_notifier(&kvm_cpu_notifier);
2525 2526
out_free_2:
out_free_1:
2527
	kvm_arch_hardware_unsetup();
2528 2529
out_free_0a:
	free_cpumask_var(cpus_hardware_enabled);
2530
out_free_0:
2531 2532
	if (fault_page)
		__free_page(fault_page);
2533 2534
	if (hwpoison_page)
		__free_page(hwpoison_page);
2535
	__free_page(bad_page);
2536
out:
2537
	kvm_arch_exit();
2538
out_fail:
A
Avi Kivity 已提交
2539 2540
	return r;
}
2541
EXPORT_SYMBOL_GPL(kvm_init);
A
Avi Kivity 已提交
2542

2543
void kvm_exit(void)
A
Avi Kivity 已提交
2544
{
2545
	kvm_exit_debug();
A
Avi Kivity 已提交
2546
	misc_deregister(&kvm_dev);
2547
	kmem_cache_destroy(kvm_vcpu_cache);
2548
	kvm_async_pf_deinit();
2549 2550
	sysdev_unregister(&kvm_sysdev);
	sysdev_class_unregister(&kvm_sysdev_class);
A
Avi Kivity 已提交
2551
	unregister_reboot_notifier(&kvm_reboot_notifier);
2552
	unregister_cpu_notifier(&kvm_cpu_notifier);
2553
	on_each_cpu(hardware_disable_nolock, NULL, 1);
2554
	kvm_arch_hardware_unsetup();
2555
	kvm_arch_exit();
2556
	free_cpumask_var(cpus_hardware_enabled);
2557
	__free_page(hwpoison_page);
2558
	__free_page(bad_page);
A
Avi Kivity 已提交
2559
}
2560
EXPORT_SYMBOL_GPL(kvm_exit);