mmu.c 38.1 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
A
Avi Kivity 已提交
19 20 21 22

#include "vmx.h"
#include "kvm.h"

A
Avi Kivity 已提交
23 24 25 26 27 28
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>

A
Avi Kivity 已提交
29 30
#include <asm/page.h>
#include <asm/cmpxchg.h>
A
Avi Kivity 已提交
31

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
#undef MMU_DEBUG

#undef AUDIT

#ifdef AUDIT
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
#endif

#ifdef MMU_DEBUG

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)

#else

#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)

#endif

#if defined(MMU_DEBUG) || defined(AUDIT)
static int dbg = 1;
#endif
A
Avi Kivity 已提交
57

58 59 60
#ifndef MMU_DEBUG
#define ASSERT(x) do { } while (0)
#else
A
Avi Kivity 已提交
61 62 63 64 65
#define ASSERT(x)							\
	if (!(x)) {							\
		printk(KERN_WARNING "assertion failed %s:%d: %s\n",	\
		       __FILE__, __LINE__, #x);				\
	}
66
#endif
A
Avi Kivity 已提交
67

68 69 70 71
#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
#define PT32_PT_BITS 10
#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
A
Avi Kivity 已提交
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92

#define PT_WRITABLE_SHIFT 1

#define PT_PRESENT_MASK (1ULL << 0)
#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
#define PT_USER_MASK (1ULL << 2)
#define PT_PWT_MASK (1ULL << 3)
#define PT_PCD_MASK (1ULL << 4)
#define PT_ACCESSED_MASK (1ULL << 5)
#define PT_DIRTY_MASK (1ULL << 6)
#define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7)
#define PT_GLOBAL_MASK (1ULL << 8)
#define PT64_NX_MASK (1ULL << 63)

#define PT_PAT_SHIFT 7
#define PT_DIR_PAT_SHIFT 12
#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)

#define PT32_DIR_PSE36_SIZE 4
#define PT32_DIR_PSE36_SHIFT 13
M
Mike Day 已提交
93 94
#define PT32_DIR_PSE36_MASK \
	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
A
Avi Kivity 已提交
95 96 97 98 99 100 101 102 103 104 105 106


#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52

#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)

#define VALID_PAGE(x) ((x) != INVALID_PAGE)

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
M
Mike Day 已提交
107
		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
A
Avi Kivity 已提交
108 109 110 111 112 113 114 115 116 117 118

#define PT64_LEVEL_MASK(level) \
		(((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
119
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
120 121 122 123 124 125 126 127

#define PT32_LEVEL_MASK(level) \
		(((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


128
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
A
Avi Kivity 已提交
129 130 131 132 133 134 135 136 137 138 139
#define PT64_DIR_BASE_ADDR_MASK \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))


#define PFERR_PRESENT_MASK (1U << 0)
#define PFERR_WRITE_MASK (1U << 1)
#define PFERR_USER_MASK (1U << 2)
140
#define PFERR_FETCH_MASK (1U << 4)
A
Avi Kivity 已提交
141 142 143 144 145 146 147 148

#define PT64_ROOT_LEVEL 4
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3

#define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1

149 150 151 152 153 154 155
#define RMAP_EXT 4

struct kvm_rmap_desc {
	u64 *shadow_ptes[RMAP_EXT];
	struct kvm_rmap_desc *more;
};

156 157
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
158
static struct kmem_cache *mmu_page_header_cache;
159

160 161 162 163 164 165 166 167 168 169
static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;

void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{
	shadow_trap_nonpresent_pte = trap_pte;
	shadow_notrap_nonpresent_pte = notrap_pte;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);

A
Avi Kivity 已提交
170 171
static int is_write_protection(struct kvm_vcpu *vcpu)
{
172
	return vcpu->cr0 & X86_CR0_WP;
A
Avi Kivity 已提交
173 174 175 176 177 178 179
}

static int is_cpuid_PSE36(void)
{
	return 1;
}

180 181 182 183 184
static int is_nx(struct kvm_vcpu *vcpu)
{
	return vcpu->shadow_efer & EFER_NX;
}

A
Avi Kivity 已提交
185 186 187 188 189
static int is_present_pte(unsigned long pte)
{
	return pte & PT_PRESENT_MASK;
}

190 191 192 193 194 195 196
static int is_shadow_present_pte(u64 pte)
{
	pte &= ~PT_SHADOW_IO_MARK;
	return pte != shadow_trap_nonpresent_pte
		&& pte != shadow_notrap_nonpresent_pte;
}

A
Avi Kivity 已提交
197 198 199 200 201
static int is_writeble_pte(unsigned long pte)
{
	return pte & PT_WRITABLE_MASK;
}

202 203 204 205 206
static int is_dirty_pte(unsigned long pte)
{
	return pte & PT_DIRTY_MASK;
}

A
Avi Kivity 已提交
207 208 209 210 211
static int is_io_pte(unsigned long pte)
{
	return pte & PT_SHADOW_IO_MARK;
}

212 213 214 215 216 217
static int is_rmap_pte(u64 pte)
{
	return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
		== (PT_WRITABLE_MASK | PT_PRESENT_MASK);
}

218 219 220 221 222 223 224 225 226
static void set_shadow_pte(u64 *sptep, u64 spte)
{
#ifdef CONFIG_X86_64
	set_64bit((unsigned long *)sptep, spte);
#else
	set_64bit((unsigned long long *)sptep, spte);
#endif
}

227
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
228
				  struct kmem_cache *base_cache, int min)
229 230 231 232
{
	void *obj;

	if (cache->nobjs >= min)
233
		return 0;
234
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
235
		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
236
		if (!obj)
237
			return -ENOMEM;
238 239
		cache->objects[cache->nobjs++] = obj;
	}
240
	return 0;
241 242 243 244 245 246 247 248
}

static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
		kfree(mc->objects[--mc->nobjs]);
}

A
Avi Kivity 已提交
249
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
250
				       int min)
A
Avi Kivity 已提交
251 252 253 254 255 256
{
	struct page *page;

	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
257
		page = alloc_page(GFP_KERNEL);
A
Avi Kivity 已提交
258 259 260 261 262 263 264 265 266 267 268
		if (!page)
			return -ENOMEM;
		set_page_private(page, 0);
		cache->objects[cache->nobjs++] = page_address(page);
	}
	return 0;
}

static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
269
		free_page((unsigned long)mc->objects[--mc->nobjs]);
A
Avi Kivity 已提交
270 271
}

272
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
273
{
274 275
	int r;

276
	kvm_mmu_free_some_pages(vcpu);
277
	r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
278
				   pte_chain_cache, 4);
279 280 281
	if (r)
		goto out;
	r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
282
				   rmap_desc_cache, 1);
283 284
	if (r)
		goto out;
285
	r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
286 287 288
	if (r)
		goto out;
	r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
289
				   mmu_page_header_cache, 4);
290 291
out:
	return r;
292 293 294 295 296 297
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
	mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
	mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
A
Avi Kivity 已提交
298
	mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
299
	mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
				    size_t size)
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	memset(p, 0, size);
	return p;
}

static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
{
	return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
				      sizeof(struct kvm_pte_chain));
}

319
static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
320
{
321
	kfree(pc);
322 323 324 325 326 327 328 329
}

static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
{
	return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
				      sizeof(struct kvm_rmap_desc));
}

330
static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
331
{
332
	kfree(rd);
333 334
}

335 336 337 338 339 340 341 342 343 344 345 346 347
/*
 * Take gfn and return the reverse mapping to it.
 * Note: gfn must be unaliased before this function get called
 */

static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
{
	struct kvm_memory_slot *slot;

	slot = gfn_to_memslot(kvm, gfn);
	return &slot->rmap[gfn - slot->base_gfn];
}

348 349 350
/*
 * Reverse mapping data structures:
 *
351 352
 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
 * that points to page_address(page).
353
 *
354 355
 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
 * containing more mappings.
356
 */
357
static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
358
{
359
	struct kvm_mmu_page *page;
360
	struct kvm_rmap_desc *desc;
361
	unsigned long *rmapp;
362 363 364 365
	int i;

	if (!is_rmap_pte(*spte))
		return;
366 367 368 369 370
	gfn = unalias_gfn(vcpu->kvm, gfn);
	page = page_header(__pa(spte));
	page->gfns[spte - page->spt] = gfn;
	rmapp = gfn_to_rmap(vcpu->kvm, gfn);
	if (!*rmapp) {
371
		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
372 373
		*rmapp = (unsigned long)spte;
	} else if (!(*rmapp & 1)) {
374
		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
375
		desc = mmu_alloc_rmap_desc(vcpu);
376
		desc->shadow_ptes[0] = (u64 *)*rmapp;
377
		desc->shadow_ptes[1] = spte;
378
		*rmapp = (unsigned long)desc | 1;
379 380
	} else {
		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
381
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
382 383 384
		while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
			desc = desc->more;
		if (desc->shadow_ptes[RMAP_EXT-1]) {
385
			desc->more = mmu_alloc_rmap_desc(vcpu);
386 387 388 389 390 391 392 393
			desc = desc->more;
		}
		for (i = 0; desc->shadow_ptes[i]; ++i)
			;
		desc->shadow_ptes[i] = spte;
	}
}

394
static void rmap_desc_remove_entry(unsigned long *rmapp,
395 396 397 398 399 400 401 402 403
				   struct kvm_rmap_desc *desc,
				   int i,
				   struct kvm_rmap_desc *prev_desc)
{
	int j;

	for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
		;
	desc->shadow_ptes[i] = desc->shadow_ptes[j];
A
Al Viro 已提交
404
	desc->shadow_ptes[j] = NULL;
405 406 407
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
408
		*rmapp = (unsigned long)desc->shadow_ptes[0];
409 410 411 412
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
413
			*rmapp = (unsigned long)desc->more | 1;
414
	mmu_free_rmap_desc(desc);
415 416
}

417
static void rmap_remove(struct kvm *kvm, u64 *spte)
418 419 420
{
	struct kvm_rmap_desc *desc;
	struct kvm_rmap_desc *prev_desc;
421 422
	struct kvm_mmu_page *page;
	unsigned long *rmapp;
423 424 425 426
	int i;

	if (!is_rmap_pte(*spte))
		return;
427 428 429
	page = page_header(__pa(spte));
	rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
	if (!*rmapp) {
430 431
		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
		BUG();
432
	} else if (!(*rmapp & 1)) {
433
		rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
434
		if ((u64 *)*rmapp != spte) {
435 436 437 438
			printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
			       spte, *spte);
			BUG();
		}
439
		*rmapp = 0;
440 441
	} else {
		rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
442
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
443 444 445 446
		prev_desc = NULL;
		while (desc) {
			for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
				if (desc->shadow_ptes[i] == spte) {
447
					rmap_desc_remove_entry(rmapp,
448
							       desc, i,
449 450 451 452 453 454 455 456 457 458
							       prev_desc);
					return;
				}
			prev_desc = desc;
			desc = desc->more;
		}
		BUG();
	}
}

459
static void rmap_write_protect(struct kvm *kvm, u64 gfn)
460 461
{
	struct kvm_rmap_desc *desc;
462
	unsigned long *rmapp;
463 464
	u64 *spte;

465 466
	gfn = unalias_gfn(kvm, gfn);
	rmapp = gfn_to_rmap(kvm, gfn);
467

468 469 470
	while (*rmapp) {
		if (!(*rmapp & 1))
			spte = (u64 *)*rmapp;
471
		else {
472
			desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
473 474 475 476 477 478
			spte = desc->shadow_ptes[0];
		}
		BUG_ON(!spte);
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		BUG_ON(!(*spte & PT_WRITABLE_MASK));
		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
479
		rmap_remove(kvm, spte);
480
		set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
481
		kvm_flush_remote_tlbs(kvm);
482 483 484
	}
}

485
#ifdef MMU_DEBUG
486
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
487
{
488 489 490
	u64 *pos;
	u64 *end;

491
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
492
		if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
493 494
			printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
			       pos, *pos);
A
Avi Kivity 已提交
495
			return 0;
496
		}
A
Avi Kivity 已提交
497 498
	return 1;
}
499
#endif
A
Avi Kivity 已提交
500

501
static void kvm_mmu_free_page(struct kvm *kvm,
502
			      struct kvm_mmu_page *page_head)
503
{
504
	ASSERT(is_empty_shadow_page(page_head->spt));
505
	list_del(&page_head->link);
A
Avi Kivity 已提交
506
	__free_page(virt_to_page(page_head->spt));
507
	__free_page(virt_to_page(page_head->gfns));
508 509
	kfree(page_head);
	++kvm->n_free_mmu_pages;
510 511
}

512 513 514 515 516
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
	return gfn;
}

517 518
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
					       u64 *parent_pte)
A
Avi Kivity 已提交
519 520 521
{
	struct kvm_mmu_page *page;

522
	if (!vcpu->kvm->n_free_mmu_pages)
523
		return NULL;
A
Avi Kivity 已提交
524

525 526 527
	page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
				      sizeof *page);
	page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
528
	page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
529 530
	set_page_private(virt_to_page(page->spt), (unsigned long)page);
	list_add(&page->link, &vcpu->kvm->active_mmu_pages);
531
	ASSERT(is_empty_shadow_page(page->spt));
A
Avi Kivity 已提交
532
	page->slot_bitmap = 0;
533
	page->multimapped = 0;
A
Avi Kivity 已提交
534
	page->parent_pte = parent_pte;
A
Avi Kivity 已提交
535
	--vcpu->kvm->n_free_mmu_pages;
536
	return page;
A
Avi Kivity 已提交
537 538
}

539 540
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *page, u64 *parent_pte)
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

	if (!parent_pte)
		return;
	if (!page->multimapped) {
		u64 *old = page->parent_pte;

		if (!old) {
			page->parent_pte = parent_pte;
			return;
		}
		page->multimapped = 1;
556
		pte_chain = mmu_alloc_pte_chain(vcpu);
557 558 559 560 561 562 563 564 565 566 567 568 569
		INIT_HLIST_HEAD(&page->parent_ptes);
		hlist_add_head(&pte_chain->link, &page->parent_ptes);
		pte_chain->parent_ptes[0] = old;
	}
	hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
		if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
			continue;
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
			if (!pte_chain->parent_ptes[i]) {
				pte_chain->parent_ptes[i] = parent_pte;
				return;
			}
	}
570
	pte_chain = mmu_alloc_pte_chain(vcpu);
571 572 573 574 575
	BUG_ON(!pte_chain);
	hlist_add_head(&pte_chain->link, &page->parent_ptes);
	pte_chain->parent_ptes[0] = parent_pte;
}

576
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
				       u64 *parent_pte)
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

	if (!page->multimapped) {
		BUG_ON(page->parent_pte != parent_pte);
		page->parent_pte = NULL;
		return;
	}
	hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
			if (!pte_chain->parent_ptes[i])
				break;
			if (pte_chain->parent_ptes[i] != parent_pte)
				continue;
594 595
			while (i + 1 < NR_PTE_CHAIN_ENTRIES
				&& pte_chain->parent_ptes[i + 1]) {
596 597 598 599 600
				pte_chain->parent_ptes[i]
					= pte_chain->parent_ptes[i + 1];
				++i;
			}
			pte_chain->parent_ptes[i] = NULL;
601 602
			if (i == 0) {
				hlist_del(&pte_chain->link);
603
				mmu_free_pte_chain(pte_chain);
604 605 606 607 608
				if (hlist_empty(&page->parent_ptes)) {
					page->multimapped = 0;
					page->parent_pte = NULL;
				}
			}
609 610 611 612 613
			return;
		}
	BUG();
}

614
static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
615 616 617 618 619 620 621 622 623
						gfn_t gfn)
{
	unsigned index;
	struct hlist_head *bucket;
	struct kvm_mmu_page *page;
	struct hlist_node *node;

	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
624
	bucket = &kvm->mmu_page_hash[index];
625 626 627 628 629 630 631 632 633 634 635 636 637 638
	hlist_for_each_entry(page, node, bucket, hash_link)
		if (page->gfn == gfn && !page->role.metaphysical) {
			pgprintk("%s: found role %x\n",
				 __FUNCTION__, page->role.word);
			return page;
		}
	return NULL;
}

static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
					     int metaphysical,
639
					     unsigned hugepage_access,
640 641 642 643 644 645 646 647 648 649 650 651 652
					     u64 *parent_pte)
{
	union kvm_mmu_page_role role;
	unsigned index;
	unsigned quadrant;
	struct hlist_head *bucket;
	struct kvm_mmu_page *page;
	struct hlist_node *node;

	role.word = 0;
	role.glevels = vcpu->mmu.root_level;
	role.level = level;
	role.metaphysical = metaphysical;
653
	role.hugepage_access = hugepage_access;
654 655 656 657 658 659 660 661 662 663 664
	if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
	pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
		 gfn, role.word);
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &vcpu->kvm->mmu_page_hash[index];
	hlist_for_each_entry(page, node, bucket, hash_link)
		if (page->gfn == gfn && page->role.word == role.word) {
665
			mmu_page_add_parent_pte(vcpu, page, parent_pte);
666 667 668 669 670 671 672 673 674 675
			pgprintk("%s: found\n", __FUNCTION__);
			return page;
		}
	page = kvm_mmu_alloc_page(vcpu, parent_pte);
	if (!page)
		return page;
	pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
	page->gfn = gfn;
	page->role = role;
	hlist_add_head(&page->hash_link, bucket);
676
	vcpu->mmu.prefetch_page(vcpu, page);
677
	if (!metaphysical)
678
		rmap_write_protect(vcpu->kvm, gfn);
679 680 681
	return page;
}

682
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
683 684
					 struct kvm_mmu_page *page)
{
685 686 687 688
	unsigned i;
	u64 *pt;
	u64 ent;

689
	pt = page->spt;
690 691 692

	if (page->role.level == PT_PAGE_TABLE_LEVEL) {
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
693
			if (is_shadow_present_pte(pt[i]))
694
				rmap_remove(kvm, &pt[i]);
695
			pt[i] = shadow_trap_nonpresent_pte;
696
		}
697
		kvm_flush_remote_tlbs(kvm);
698 699 700 701 702 703
		return;
	}

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		ent = pt[i];

704 705
		pt[i] = shadow_trap_nonpresent_pte;
		if (!is_shadow_present_pte(ent))
706 707
			continue;
		ent &= PT64_BASE_ADDR_MASK;
708
		mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
709
	}
710
	kvm_flush_remote_tlbs(kvm);
711 712
}

713
static void kvm_mmu_put_page(struct kvm_mmu_page *page,
714 715
			     u64 *parent_pte)
{
716
	mmu_page_remove_parent_pte(page, parent_pte);
717 718
}

719 720 721 722 723 724 725 726 727
static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
{
	int i;

	for (i = 0; i < KVM_MAX_VCPUS; ++i)
		if (kvm->vcpus[i])
			kvm->vcpus[i]->last_pte_updated = NULL;
}

728
static void kvm_mmu_zap_page(struct kvm *kvm,
729 730 731 732 733 734 735 736 737 738 739 740 741 742
			     struct kvm_mmu_page *page)
{
	u64 *parent_pte;

	while (page->multimapped || page->parent_pte) {
		if (!page->multimapped)
			parent_pte = page->parent_pte;
		else {
			struct kvm_pte_chain *chain;

			chain = container_of(page->parent_ptes.first,
					     struct kvm_pte_chain, link);
			parent_pte = chain->parent_ptes[0];
		}
743
		BUG_ON(!parent_pte);
744
		kvm_mmu_put_page(page, parent_pte);
745
		set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
746
	}
747
	kvm_mmu_page_unlink_children(kvm, page);
748 749
	if (!page->root_count) {
		hlist_del(&page->hash_link);
750
		kvm_mmu_free_page(kvm, page);
A
Avi Kivity 已提交
751
	} else
752
		list_move(&page->link, &kvm->active_mmu_pages);
753
	kvm_mmu_reset_last_pte_updated(kvm);
754 755
}

756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
/*
 * Changing the number of mmu pages allocated to the vm
 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
 */
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
{
	/*
	 * If we set the number of mmu pages to be smaller be than the
	 * number of actived pages , we must to free some mmu pages before we
	 * change the value
	 */

	if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
	    kvm_nr_mmu_pages) {
		int n_used_mmu_pages = kvm->n_alloc_mmu_pages
				       - kvm->n_free_mmu_pages;

		while (n_used_mmu_pages > kvm_nr_mmu_pages) {
			struct kvm_mmu_page *page;

			page = container_of(kvm->active_mmu_pages.prev,
					    struct kvm_mmu_page, link);
			kvm_mmu_zap_page(kvm, page);
			n_used_mmu_pages--;
		}
		kvm->n_free_mmu_pages = 0;
	}
	else
		kvm->n_free_mmu_pages += kvm_nr_mmu_pages
					 - kvm->n_alloc_mmu_pages;

	kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
}

790
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
791 792 793 794 795 796 797 798 799 800
{
	unsigned index;
	struct hlist_head *bucket;
	struct kvm_mmu_page *page;
	struct hlist_node *node, *n;
	int r;

	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
	r = 0;
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
801
	bucket = &kvm->mmu_page_hash[index];
802 803
	hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
		if (page->gfn == gfn && !page->role.metaphysical) {
804 805
			pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
				 page->role.word);
806
			kvm_mmu_zap_page(kvm, page);
807 808 809
			r = 1;
		}
	return r;
810 811
}

812
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
813 814 815
{
	struct kvm_mmu_page *page;

816
	while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
817 818
		pgprintk("%s: zap %lx %x\n",
			 __FUNCTION__, gfn, page->role.word);
819
		kvm_mmu_zap_page(kvm, page);
820 821 822
	}
}

A
Avi Kivity 已提交
823 824 825 826 827 828 829 830
static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
{
	int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
	struct kvm_mmu_page *page_head = page_header(__pa(pte));

	__set_bit(slot, &page_head->slot_bitmap);
}

831
hpa_t safe_gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
A
Avi Kivity 已提交
832
{
833
	hpa_t hpa = gpa_to_hpa(kvm, gpa);
A
Avi Kivity 已提交
834 835 836 837

	return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
}

838
hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
A
Avi Kivity 已提交
839 840 841 842
{
	struct page *page;

	ASSERT((gpa & HPA_ERR_MASK) == 0);
843
	page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
A
Avi Kivity 已提交
844
	if (!page)
A
Avi Kivity 已提交
845 846 847 848 849 850 851 852 853 854 855
		return gpa | HPA_ERR_MASK;
	return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
		| (gpa & (PAGE_SIZE-1));
}

hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
{
	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);

	if (gpa == UNMAPPED_GVA)
		return UNMAPPED_GVA;
856
	return gpa_to_hpa(vcpu->kvm, gpa);
A
Avi Kivity 已提交
857 858
}

859 860 861 862 863 864
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
{
	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);

	if (gpa == UNMAPPED_GVA)
		return NULL;
865
	return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
866 867
}

A
Avi Kivity 已提交
868 869 870 871 872 873 874 875 876 877 878 879
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}

static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
{
	int level = PT32E_ROOT_LEVEL;
	hpa_t table_addr = vcpu->mmu.root_hpa;

	for (; ; level--) {
		u32 index = PT64_INDEX(v, level);
		u64 *table;
880
		u64 pte;
A
Avi Kivity 已提交
881 882 883 884 885

		ASSERT(VALID_PAGE(table_addr));
		table = __va(table_addr);

		if (level == 1) {
886
			pte = table[index];
887
			if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
888
				return 0;
A
Avi Kivity 已提交
889 890 891 892
			mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
			page_header_update_slot(vcpu->kvm, table, v);
			table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
								PT_USER_MASK;
893
			rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
A
Avi Kivity 已提交
894 895 896
			return 0;
		}

897
		if (table[index] == shadow_trap_nonpresent_pte) {
898
			struct kvm_mmu_page *new_table;
899
			gfn_t pseudo_gfn;
A
Avi Kivity 已提交
900

901 902 903 904
			pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
			new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
						     v, level - 1,
905
						     1, 3, &table[index]);
906
			if (!new_table) {
A
Avi Kivity 已提交
907 908 909 910
				pgprintk("nonpaging_map: ENOMEM\n");
				return -ENOMEM;
			}

911
			table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
912
				| PT_WRITABLE_MASK | PT_USER_MASK;
A
Avi Kivity 已提交
913 914 915 916 917
		}
		table_addr = table[index] & PT64_BASE_ADDR_MASK;
	}
}

918 919 920 921 922 923 924 925 926
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp)
{
	int i;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		sp->spt[i] = shadow_trap_nonpresent_pte;
}

927 928 929
static void mmu_free_roots(struct kvm_vcpu *vcpu)
{
	int i;
930
	struct kvm_mmu_page *page;
931

A
Avi Kivity 已提交
932 933
	if (!VALID_PAGE(vcpu->mmu.root_hpa))
		return;
934 935 936 937
#ifdef CONFIG_X86_64
	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->mmu.root_hpa;

938 939
		page = page_header(root);
		--page->root_count;
940 941 942 943 944 945 946
		vcpu->mmu.root_hpa = INVALID_PAGE;
		return;
	}
#endif
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->mmu.pae_root[i];

A
Avi Kivity 已提交
947 948 949 950 951
		if (root) {
			root &= PT64_BASE_ADDR_MASK;
			page = page_header(root);
			--page->root_count;
		}
952 953 954 955 956 957 958 959
		vcpu->mmu.pae_root[i] = INVALID_PAGE;
	}
	vcpu->mmu.root_hpa = INVALID_PAGE;
}

static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
	int i;
960
	gfn_t root_gfn;
961 962
	struct kvm_mmu_page *page;

963
	root_gfn = vcpu->cr3 >> PAGE_SHIFT;
964 965 966 967 968 969

#ifdef CONFIG_X86_64
	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->mmu.root_hpa;

		ASSERT(!VALID_PAGE(root));
970
		page = kvm_mmu_get_page(vcpu, root_gfn, 0,
971
					PT64_ROOT_LEVEL, 0, 0, NULL);
972
		root = __pa(page->spt);
973
		++page->root_count;
974 975 976 977 978 979 980 981
		vcpu->mmu.root_hpa = root;
		return;
	}
#endif
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->mmu.pae_root[i];

		ASSERT(!VALID_PAGE(root));
A
Avi Kivity 已提交
982 983 984 985 986
		if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
			if (!is_present_pte(vcpu->pdptrs[i])) {
				vcpu->mmu.pae_root[i] = 0;
				continue;
			}
987
			root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
A
Avi Kivity 已提交
988
		} else if (vcpu->mmu.root_level == 0)
989
			root_gfn = 0;
990
		page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
991
					PT32_ROOT_LEVEL, !is_paging(vcpu),
992
					0, NULL);
993
		root = __pa(page->spt);
994
		++page->root_count;
995 996 997 998 999
		vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
	}
	vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
}

A
Avi Kivity 已提交
1000 1001 1002 1003 1004 1005 1006 1007 1008
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
{
	return vaddr;
}

static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
			       u32 error_code)
{
	gpa_t addr = gva;
A
Avi Kivity 已提交
1009
	hpa_t paddr;
1010
	int r;
A
Avi Kivity 已提交
1011

1012 1013 1014
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
1015

A
Avi Kivity 已提交
1016 1017 1018 1019
	ASSERT(vcpu);
	ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));


1020
	paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
A
Avi Kivity 已提交
1021

A
Avi Kivity 已提交
1022 1023
	if (is_error_hpa(paddr))
		return 1;
A
Avi Kivity 已提交
1024

A
Avi Kivity 已提交
1025
	return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
A
Avi Kivity 已提交
1026 1027 1028 1029
}

static void nonpaging_free(struct kvm_vcpu *vcpu)
{
1030
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
}

static int nonpaging_init_context(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->mmu;

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
	context->free = nonpaging_free;
1041
	context->prefetch_page = nonpaging_prefetch_page;
1042
	context->root_level = 0;
A
Avi Kivity 已提交
1043
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
1044
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1045 1046 1047 1048 1049
	return 0;
}

static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
A
Avi Kivity 已提交
1050
	++vcpu->stat.tlb_flush;
1051
	kvm_x86_ops->tlb_flush(vcpu);
A
Avi Kivity 已提交
1052 1053 1054 1055
}

static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
1056
	pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
1057
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
1058 1059 1060 1061 1062 1063
}

static void inject_page_fault(struct kvm_vcpu *vcpu,
			      u64 addr,
			      u32 err_code)
{
1064
	kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
A
Avi Kivity 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
}

static void paging_free(struct kvm_vcpu *vcpu)
{
	nonpaging_free(vcpu);
}

#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

1080
static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
A
Avi Kivity 已提交
1081 1082 1083 1084 1085 1086 1087
{
	struct kvm_mmu *context = &vcpu->mmu;

	ASSERT(is_pae(vcpu));
	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
1088
	context->prefetch_page = paging64_prefetch_page;
A
Avi Kivity 已提交
1089
	context->free = paging_free;
1090 1091
	context->root_level = level;
	context->shadow_root_level = level;
A
Avi Kivity 已提交
1092
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1093 1094 1095
	return 0;
}

1096 1097 1098 1099 1100
static int paging64_init_context(struct kvm_vcpu *vcpu)
{
	return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
}

A
Avi Kivity 已提交
1101 1102 1103 1104 1105 1106 1107 1108
static int paging32_init_context(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->mmu;

	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
	context->free = paging_free;
1109
	context->prefetch_page = paging32_prefetch_page;
A
Avi Kivity 已提交
1110 1111
	context->root_level = PT32_ROOT_LEVEL;
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
1112
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1113 1114 1115 1116 1117
	return 0;
}

static int paging32E_init_context(struct kvm_vcpu *vcpu)
{
1118
	return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
1119 1120 1121 1122 1123 1124 1125 1126 1127
}

static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));

	if (!is_paging(vcpu))
		return nonpaging_init_context(vcpu);
A
Avi Kivity 已提交
1128
	else if (is_long_mode(vcpu))
A
Avi Kivity 已提交
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
		return paging64_init_context(vcpu);
	else if (is_pae(vcpu))
		return paging32E_init_context(vcpu);
	else
		return paging32_init_context(vcpu);
}

static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
	if (VALID_PAGE(vcpu->mmu.root_hpa)) {
		vcpu->mmu.free(vcpu);
		vcpu->mmu.root_hpa = INVALID_PAGE;
	}
}

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1146 1147 1148 1149
{
	destroy_kvm_mmu(vcpu);
	return init_kvm_mmu(vcpu);
}
1150
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
1151 1152

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1153
{
1154 1155
	int r;

S
Shaohua Li 已提交
1156
	mutex_lock(&vcpu->kvm->lock);
1157
	r = mmu_topup_memory_caches(vcpu);
A
Avi Kivity 已提交
1158 1159 1160
	if (r)
		goto out;
	mmu_alloc_roots(vcpu);
1161
	kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
A
Avi Kivity 已提交
1162
	kvm_mmu_flush_tlb(vcpu);
1163
out:
S
Shaohua Li 已提交
1164
	mutex_unlock(&vcpu->kvm->lock);
1165
	return r;
A
Avi Kivity 已提交
1166
}
A
Avi Kivity 已提交
1167 1168 1169 1170 1171 1172
EXPORT_SYMBOL_GPL(kvm_mmu_load);

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
	mmu_free_roots(vcpu);
}
A
Avi Kivity 已提交
1173

1174
static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1175 1176 1177 1178 1179 1180 1181
				  struct kvm_mmu_page *page,
				  u64 *spte)
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
1182
	if (is_shadow_present_pte(pte)) {
1183
		if (page->role.level == PT_PAGE_TABLE_LEVEL)
1184
			rmap_remove(vcpu->kvm, spte);
1185 1186
		else {
			child = page_header(pte & PT64_BASE_ADDR_MASK);
1187
			mmu_page_remove_parent_pte(child, spte);
1188 1189
		}
	}
1190
	set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1191
	kvm_flush_remote_tlbs(vcpu->kvm);
1192 1193
}

1194 1195 1196
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *page,
				  u64 *spte,
1197 1198
				  const void *new, int bytes,
				  int offset_in_pte)
1199 1200 1201 1202 1203
{
	if (page->role.level != PT_PAGE_TABLE_LEVEL)
		return;

	if (page->role.glevels == PT32_ROOT_LEVEL)
1204 1205
		paging32_update_pte(vcpu, page, spte, new, bytes,
				    offset_in_pte);
1206
	else
1207 1208
		paging64_update_pte(vcpu, page, spte, new, bytes,
				    offset_in_pte);
1209 1210
}

1211 1212 1213 1214 1215 1216 1217
static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{
	u64 *spte = vcpu->last_pte_updated;

	return !!(spte && (*spte & PT_ACCESSED_MASK));
}

1218
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1219
		       const u8 *new, int bytes)
1220
{
1221 1222
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_mmu_page *page;
1223
	struct hlist_node *node, *n;
1224 1225 1226 1227
	struct hlist_head *bucket;
	unsigned index;
	u64 *spte;
	unsigned offset = offset_in_page(gpa);
1228
	unsigned pte_size;
1229
	unsigned page_offset;
1230
	unsigned misaligned;
1231
	unsigned quadrant;
1232
	int level;
1233
	int flooded = 0;
1234
	int npte;
1235

1236
	pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1237
	kvm_mmu_audit(vcpu, "pre pte write");
1238 1239
	if (gfn == vcpu->last_pt_write_gfn
	    && !last_updated_pte_accessed(vcpu)) {
1240 1241 1242 1243 1244 1245
		++vcpu->last_pt_write_count;
		if (vcpu->last_pt_write_count >= 3)
			flooded = 1;
	} else {
		vcpu->last_pt_write_gfn = gfn;
		vcpu->last_pt_write_count = 1;
1246
		vcpu->last_pte_updated = NULL;
1247
	}
1248 1249
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &vcpu->kvm->mmu_page_hash[index];
1250
	hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1251 1252
		if (page->gfn != gfn || page->role.metaphysical)
			continue;
1253 1254
		pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1255
		misaligned |= bytes < 4;
1256
		if (misaligned || flooded) {
1257 1258 1259 1260
			/*
			 * Misaligned accesses are too much trouble to fix
			 * up; also, they usually indicate a page is not used
			 * as a page table.
1261 1262 1263 1264 1265
			 *
			 * If we're seeing too many writes to a page,
			 * it may no longer be a page table, or we may be
			 * forking, in which case it is better to unmap the
			 * page.
1266 1267 1268
			 */
			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
				 gpa, bytes, page->role.word);
1269
			kvm_mmu_zap_page(vcpu->kvm, page);
1270 1271
			continue;
		}
1272 1273
		page_offset = offset;
		level = page->role.level;
1274
		npte = 1;
1275
		if (page->role.glevels == PT32_ROOT_LEVEL) {
1276 1277 1278 1279 1280 1281 1282
			page_offset <<= 1;	/* 32->64 */
			/*
			 * A 32-bit pde maps 4MB while the shadow pdes map
			 * only 2MB.  So we need to double the offset again
			 * and zap two pdes instead of one.
			 */
			if (level == PT32_ROOT_LEVEL) {
1283
				page_offset &= ~7; /* kill rounding error */
1284 1285 1286
				page_offset <<= 1;
				npte = 2;
			}
1287
			quadrant = page_offset >> PAGE_SHIFT;
1288
			page_offset &= ~PAGE_MASK;
1289 1290
			if (quadrant != page->role.quadrant)
				continue;
1291
		}
1292
		spte = &page->spt[page_offset / sizeof(*spte)];
1293
		while (npte--) {
1294
			mmu_pte_write_zap_pte(vcpu, page, spte);
1295 1296
			mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
					      page_offset & (pte_size - 1));
1297
			++spte;
1298 1299
		}
	}
1300
	kvm_mmu_audit(vcpu, "post pte write");
1301 1302
}

1303 1304 1305 1306
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);

1307
	return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1308 1309
}

1310
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1311 1312 1313 1314 1315 1316
{
	while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
		struct kvm_mmu_page *page;

		page = container_of(vcpu->kvm->active_mmu_pages.prev,
				    struct kvm_mmu_page, link);
1317
		kvm_mmu_zap_page(vcpu->kvm, page);
A
Avi Kivity 已提交
1318 1319 1320
	}
}

A
Avi Kivity 已提交
1321 1322
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
1323
	struct kvm_mmu_page *page;
A
Avi Kivity 已提交
1324

1325 1326 1327
	while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
		page = container_of(vcpu->kvm->active_mmu_pages.next,
				    struct kvm_mmu_page, link);
1328
		kvm_mmu_zap_page(vcpu->kvm, page);
1329
	}
1330
	free_page((unsigned long)vcpu->mmu.pae_root);
A
Avi Kivity 已提交
1331 1332 1333 1334
}

static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
1335
	struct page *page;
A
Avi Kivity 已提交
1336 1337 1338 1339
	int i;

	ASSERT(vcpu);

1340 1341 1342 1343
	if (vcpu->kvm->n_requested_mmu_pages)
		vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
	else
		vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.
	 */
	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
	if (!page)
		goto error_1;
	vcpu->mmu.pae_root = page_address(page);
	for (i = 0; i < 4; ++i)
		vcpu->mmu.pae_root[i] = INVALID_PAGE;

A
Avi Kivity 已提交
1356 1357 1358 1359 1360 1361 1362
	return 0;

error_1:
	free_mmu_pages(vcpu);
	return -ENOMEM;
}

1363
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1364 1365 1366 1367
{
	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));

1368 1369
	return alloc_mmu_pages(vcpu);
}
A
Avi Kivity 已提交
1370

1371 1372 1373 1374
int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1375

1376
	return init_kvm_mmu(vcpu);
A
Avi Kivity 已提交
1377 1378 1379 1380 1381 1382 1383 1384
}

void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);

	destroy_kvm_mmu(vcpu);
	free_mmu_pages(vcpu);
1385
	mmu_free_memory_caches(vcpu);
A
Avi Kivity 已提交
1386 1387
}

1388
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
A
Avi Kivity 已提交
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
{
	struct kvm_mmu_page *page;

	list_for_each_entry(page, &kvm->active_mmu_pages, link) {
		int i;
		u64 *pt;

		if (!test_bit(slot, &page->slot_bitmap))
			continue;

1399
		pt = page->spt;
A
Avi Kivity 已提交
1400 1401
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
			/* avoid RMW */
1402
			if (pt[i] & PT_WRITABLE_MASK) {
1403
				rmap_remove(kvm, &pt[i]);
A
Avi Kivity 已提交
1404
				pt[i] &= ~PT_WRITABLE_MASK;
1405
			}
A
Avi Kivity 已提交
1406 1407
	}
}
1408

1409
void kvm_mmu_zap_all(struct kvm *kvm)
D
Dor Laor 已提交
1410
{
1411
	struct kvm_mmu_page *page, *node;
D
Dor Laor 已提交
1412

1413 1414
	list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
		kvm_mmu_zap_page(kvm, page);
D
Dor Laor 已提交
1415

1416
	kvm_flush_remote_tlbs(kvm);
D
Dor Laor 已提交
1417 1418
}

1419 1420 1421 1422 1423 1424
void kvm_mmu_module_exit(void)
{
	if (pte_chain_cache)
		kmem_cache_destroy(pte_chain_cache);
	if (rmap_desc_cache)
		kmem_cache_destroy(rmap_desc_cache);
1425 1426
	if (mmu_page_header_cache)
		kmem_cache_destroy(mmu_page_header_cache);
1427 1428 1429 1430 1431 1432
}

int kvm_mmu_module_init(void)
{
	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
					    sizeof(struct kvm_pte_chain),
1433
					    0, 0, NULL);
1434 1435 1436 1437
	if (!pte_chain_cache)
		goto nomem;
	rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
					    sizeof(struct kvm_rmap_desc),
1438
					    0, 0, NULL);
1439 1440 1441
	if (!rmap_desc_cache)
		goto nomem;

1442 1443
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
1444
						  0, 0, NULL);
1445 1446 1447
	if (!mmu_page_header_cache)
		goto nomem;

1448 1449 1450 1451 1452 1453 1454
	return 0;

nomem:
	kvm_mmu_module_exit();
	return -ENOMEM;
}

1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
#ifdef AUDIT

static const char *audit_msg;

static gva_t canonicalize(gva_t gva)
{
#ifdef CONFIG_X86_64
	gva = (long long)(gva << 16) >> 16;
#endif
	return gva;
}

static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
				gva_t va, int level)
{
	u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
	int i;
	gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
		u64 ent = pt[i];

1477
		if (ent == shadow_trap_nonpresent_pte)
1478 1479 1480
			continue;

		va = canonicalize(va);
1481 1482 1483 1484 1485 1486 1487
		if (level > 1) {
			if (ent == shadow_notrap_nonpresent_pte)
				printk(KERN_ERR "audit: (%s) nontrapping pte"
				       " in nonleaf level: levels %d gva %lx"
				       " level %d pte %llx\n", audit_msg,
				       vcpu->mmu.root_level, va, level, ent);

1488
			audit_mappings_page(vcpu, ent, va, level - 1);
1489
		} else {
1490 1491 1492
			gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
			hpa_t hpa = gpa_to_hpa(vcpu, gpa);

1493
			if (is_shadow_present_pte(ent)
1494
			    && (ent & PT64_BASE_ADDR_MASK) != hpa)
1495 1496
				printk(KERN_ERR "xx audit error: (%s) levels %d"
				       " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1497
				       audit_msg, vcpu->mmu.root_level,
M
Mike Day 已提交
1498 1499
				       va, gpa, hpa, ent,
				       is_shadow_present_pte(ent));
1500 1501 1502 1503 1504
			else if (ent == shadow_notrap_nonpresent_pte
				 && !is_error_hpa(hpa))
				printk(KERN_ERR "audit: (%s) notrap shadow,"
				       " valid guest gva %lx\n", audit_msg, va);

1505 1506 1507 1508 1509 1510
		}
	}
}

static void audit_mappings(struct kvm_vcpu *vcpu)
{
1511
	unsigned i;
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533

	if (vcpu->mmu.root_level == 4)
		audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
	else
		for (i = 0; i < 4; ++i)
			if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
				audit_mappings_page(vcpu,
						    vcpu->mmu.pae_root[i],
						    i << 30,
						    2);
}

static int count_rmaps(struct kvm_vcpu *vcpu)
{
	int nmaps = 0;
	int i, j, k;

	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
		struct kvm_rmap_desc *d;

		for (j = 0; j < m->npages; ++j) {
1534
			unsigned long *rmapp = &m->rmap[j];
1535

1536
			if (!*rmapp)
1537
				continue;
1538
			if (!(*rmapp & 1)) {
1539 1540 1541
				++nmaps;
				continue;
			}
1542
			d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
			while (d) {
				for (k = 0; k < RMAP_EXT; ++k)
					if (d->shadow_ptes[k])
						++nmaps;
					else
						break;
				d = d->more;
			}
		}
	}
	return nmaps;
}

static int count_writable_mappings(struct kvm_vcpu *vcpu)
{
	int nmaps = 0;
	struct kvm_mmu_page *page;
	int i;

	list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1563
		u64 *pt = page->spt;
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593

		if (page->role.level != PT_PAGE_TABLE_LEVEL)
			continue;

		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
			u64 ent = pt[i];

			if (!(ent & PT_PRESENT_MASK))
				continue;
			if (!(ent & PT_WRITABLE_MASK))
				continue;
			++nmaps;
		}
	}
	return nmaps;
}

static void audit_rmap(struct kvm_vcpu *vcpu)
{
	int n_rmap = count_rmaps(vcpu);
	int n_actual = count_writable_mappings(vcpu);

	if (n_rmap != n_actual)
		printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
		       __FUNCTION__, audit_msg, n_rmap, n_actual);
}

static void audit_write_protection(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_page *page;
1594 1595 1596
	struct kvm_memory_slot *slot;
	unsigned long *rmapp;
	gfn_t gfn;
1597 1598 1599 1600 1601

	list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
		if (page->role.metaphysical)
			continue;

1602 1603 1604 1605
		slot = gfn_to_memslot(vcpu->kvm, page->gfn);
		gfn = unalias_gfn(vcpu->kvm, page->gfn);
		rmapp = &slot->rmap[gfn - slot->base_gfn];
		if (*rmapp)
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
			printk(KERN_ERR "%s: (%s) shadow page has writable"
			       " mappings: gfn %lx role %x\n",
			       __FUNCTION__, audit_msg, page->gfn,
			       page->role.word);
	}
}

static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
{
	int olddbg = dbg;

	dbg = 0;
	audit_msg = msg;
	audit_rmap(vcpu);
	audit_write_protection(vcpu);
	audit_mappings(vcpu);
	dbg = olddbg;
}

#endif