mmu.c 30.4 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
#include <linux/types.h>
#include <linux/string.h>
#include <asm/page.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>

#include "vmx.h"
#include "kvm.h"

29 30
#define pgprintk(x...) do { printk(x); } while (0)
#define rmap_printk(x...) do { printk(x); } while (0)
A
Avi Kivity 已提交
31 32 33 34 35 36 37

#define ASSERT(x)							\
	if (!(x)) {							\
		printk(KERN_WARNING "assertion failed %s:%d: %s\n",	\
		       __FILE__, __LINE__, #x);				\
	}

38 39 40 41
#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
#define PT32_PT_BITS 10
#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
A
Avi Kivity 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

#define PT_WRITABLE_SHIFT 1

#define PT_PRESENT_MASK (1ULL << 0)
#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
#define PT_USER_MASK (1ULL << 2)
#define PT_PWT_MASK (1ULL << 3)
#define PT_PCD_MASK (1ULL << 4)
#define PT_ACCESSED_MASK (1ULL << 5)
#define PT_DIRTY_MASK (1ULL << 6)
#define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7)
#define PT_GLOBAL_MASK (1ULL << 8)
#define PT64_NX_MASK (1ULL << 63)

#define PT_PAT_SHIFT 7
#define PT_DIR_PAT_SHIFT 12
#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)

#define PT32_DIR_PSE36_SIZE 4
#define PT32_DIR_PSE36_SHIFT 13
#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)


#define PT32_PTE_COPY_MASK \
67
	(PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
A
Avi Kivity 已提交
68

69
#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
A
Avi Kivity 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130

#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52

#define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)

#define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
#define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)

#define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
#define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))

#define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)

#define VALID_PAGE(x) ((x) != INVALID_PAGE)

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
		( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )

#define PT64_LEVEL_MASK(level) \
		(((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
		( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )

#define PT32_LEVEL_MASK(level) \
		(((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
#define PT64_DIR_BASE_ADDR_MASK \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))


#define PFERR_PRESENT_MASK (1U << 0)
#define PFERR_WRITE_MASK (1U << 1)
#define PFERR_USER_MASK (1U << 2)

#define PT64_ROOT_LEVEL 4
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3

#define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1

131 132 133 134 135 136 137
#define RMAP_EXT 4

struct kvm_rmap_desc {
	u64 *shadow_ptes[RMAP_EXT];
	struct kvm_rmap_desc *more;
};

A
Avi Kivity 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
static int is_write_protection(struct kvm_vcpu *vcpu)
{
	return vcpu->cr0 & CR0_WP_MASK;
}

static int is_cpuid_PSE36(void)
{
	return 1;
}

static int is_present_pte(unsigned long pte)
{
	return pte & PT_PRESENT_MASK;
}

static int is_writeble_pte(unsigned long pte)
{
	return pte & PT_WRITABLE_MASK;
}

static int is_io_pte(unsigned long pte)
{
	return pte & PT_SHADOW_IO_MARK;
}

163 164 165 166 167 168
static int is_rmap_pte(u64 pte)
{
	return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
		== (PT_WRITABLE_MASK | PT_PRESENT_MASK);
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
static void mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
				   size_t objsize, int min)
{
	void *obj;

	if (cache->nobjs >= min)
		return;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
		obj = kzalloc(objsize, GFP_NOWAIT);
		if (!obj)
			BUG();
		cache->objects[cache->nobjs++] = obj;
	}
}

static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
		kfree(mc->objects[--mc->nobjs]);
}

static void mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
{
	mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
			       sizeof(struct kvm_pte_chain), 4);
	mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
			       sizeof(struct kvm_rmap_desc), 1);
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
	mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
	mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
				    size_t size)
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	memset(p, 0, size);
	return p;
}

static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj)
{
	if (mc->nobjs < KVM_NR_MEM_OBJS)
		mc->objects[mc->nobjs++] = obj;
	else
		kfree(obj);
}

static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
{
	return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
				      sizeof(struct kvm_pte_chain));
}

static void mmu_free_pte_chain(struct kvm_vcpu *vcpu,
			       struct kvm_pte_chain *pc)
{
	mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc);
}

static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
{
	return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
				      sizeof(struct kvm_rmap_desc));
}

static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu,
			       struct kvm_rmap_desc *rd)
{
	mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd);
}

247 248 249 250 251 252 253 254 255
/*
 * Reverse mapping data structures:
 *
 * If page->private bit zero is zero, then page->private points to the
 * shadow page table entry that points to page_address(page).
 *
 * If page->private bit zero is one, (then page->private & ~1) points
 * to a struct kvm_rmap_desc containing more mappings.
 */
256
static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
257 258 259 260 261 262 263 264 265 266 267 268 269
{
	struct page *page;
	struct kvm_rmap_desc *desc;
	int i;

	if (!is_rmap_pte(*spte))
		return;
	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
	if (!page->private) {
		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
		page->private = (unsigned long)spte;
	} else if (!(page->private & 1)) {
		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
270
		desc = mmu_alloc_rmap_desc(vcpu);
271 272 273 274 275 276 277 278 279
		desc->shadow_ptes[0] = (u64 *)page->private;
		desc->shadow_ptes[1] = spte;
		page->private = (unsigned long)desc | 1;
	} else {
		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
		desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
		while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
			desc = desc->more;
		if (desc->shadow_ptes[RMAP_EXT-1]) {
280
			desc->more = mmu_alloc_rmap_desc(vcpu);
281 282 283 284 285 286 287 288
			desc = desc->more;
		}
		for (i = 0; desc->shadow_ptes[i]; ++i)
			;
		desc->shadow_ptes[i] = spte;
	}
}

289 290
static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
				   struct page *page,
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
				   struct kvm_rmap_desc *desc,
				   int i,
				   struct kvm_rmap_desc *prev_desc)
{
	int j;

	for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
		;
	desc->shadow_ptes[i] = desc->shadow_ptes[j];
	desc->shadow_ptes[j] = 0;
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
		page->private = (unsigned long)desc->shadow_ptes[0];
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
			page->private = (unsigned long)desc->more | 1;
310
	mmu_free_rmap_desc(vcpu, desc);
311 312
}

313
static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
{
	struct page *page;
	struct kvm_rmap_desc *desc;
	struct kvm_rmap_desc *prev_desc;
	int i;

	if (!is_rmap_pte(*spte))
		return;
	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
	if (!page->private) {
		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
		BUG();
	} else if (!(page->private & 1)) {
		rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
		if ((u64 *)page->private != spte) {
			printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
			       spte, *spte);
			BUG();
		}
		page->private = 0;
	} else {
		rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
		desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
		prev_desc = NULL;
		while (desc) {
			for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
				if (desc->shadow_ptes[i] == spte) {
341 342
					rmap_desc_remove_entry(vcpu, page,
							       desc, i,
343 344 345 346 347 348 349 350 351 352
							       prev_desc);
					return;
				}
			prev_desc = desc;
			desc = desc->more;
		}
		BUG();
	}
}

353
static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
354
{
355
	struct kvm *kvm = vcpu->kvm;
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
	struct page *page;
	struct kvm_memory_slot *slot;
	struct kvm_rmap_desc *desc;
	u64 *spte;

	slot = gfn_to_memslot(kvm, gfn);
	BUG_ON(!slot);
	page = gfn_to_page(slot, gfn);

	while (page->private) {
		if (!(page->private & 1))
			spte = (u64 *)page->private;
		else {
			desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
			spte = desc->shadow_ptes[0];
		}
		BUG_ON(!spte);
		BUG_ON((*spte & PT64_BASE_ADDR_MASK) !=
		       page_to_pfn(page) << PAGE_SHIFT);
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		BUG_ON(!(*spte & PT_WRITABLE_MASK));
		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
378
		rmap_remove(vcpu, spte);
379 380 381 382
		*spte &= ~(u64)PT_WRITABLE_MASK;
	}
}

A
Avi Kivity 已提交
383 384
static int is_empty_shadow_page(hpa_t page_hpa)
{
385 386 387 388
	u64 *pos;
	u64 *end;

	for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
A
Avi Kivity 已提交
389
		      pos != end; pos++)
390 391 392
		if (*pos != 0) {
			printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
			       pos, *pos);
A
Avi Kivity 已提交
393
			return 0;
394
		}
A
Avi Kivity 已提交
395 396 397
	return 1;
}

398 399 400 401
static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
{
	struct kvm_mmu_page *page_head = page_header(page_hpa);

402
	ASSERT(is_empty_shadow_page(page_hpa));
403 404 405 406 407 408
	list_del(&page_head->link);
	page_head->page_hpa = page_hpa;
	list_add(&page_head->link, &vcpu->free_pages);
	++vcpu->kvm->n_free_mmu_pages;
}

409 410 411 412 413
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
	return gfn;
}

414 415
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
					       u64 *parent_pte)
A
Avi Kivity 已提交
416 417 418 419
{
	struct kvm_mmu_page *page;

	if (list_empty(&vcpu->free_pages))
420
		return NULL;
A
Avi Kivity 已提交
421 422 423 424 425 426 427

	page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
	list_del(&page->link);
	list_add(&page->link, &vcpu->kvm->active_mmu_pages);
	ASSERT(is_empty_shadow_page(page->page_hpa));
	page->slot_bitmap = 0;
	page->global = 1;
428
	page->multimapped = 0;
A
Avi Kivity 已提交
429
	page->parent_pte = parent_pte;
A
Avi Kivity 已提交
430
	--vcpu->kvm->n_free_mmu_pages;
431
	return page;
A
Avi Kivity 已提交
432 433
}

434 435
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *page, u64 *parent_pte)
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

	if (!parent_pte)
		return;
	if (!page->multimapped) {
		u64 *old = page->parent_pte;

		if (!old) {
			page->parent_pte = parent_pte;
			return;
		}
		page->multimapped = 1;
451
		pte_chain = mmu_alloc_pte_chain(vcpu);
452 453 454 455 456 457 458 459 460 461 462 463 464
		INIT_HLIST_HEAD(&page->parent_ptes);
		hlist_add_head(&pte_chain->link, &page->parent_ptes);
		pte_chain->parent_ptes[0] = old;
	}
	hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
		if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
			continue;
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
			if (!pte_chain->parent_ptes[i]) {
				pte_chain->parent_ptes[i] = parent_pte;
				return;
			}
	}
465
	pte_chain = mmu_alloc_pte_chain(vcpu);
466 467 468 469 470
	BUG_ON(!pte_chain);
	hlist_add_head(&pte_chain->link, &page->parent_ptes);
	pte_chain->parent_ptes[0] = parent_pte;
}

471 472
static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
				       struct kvm_mmu_page *page,
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
				       u64 *parent_pte)
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

	if (!page->multimapped) {
		BUG_ON(page->parent_pte != parent_pte);
		page->parent_pte = NULL;
		return;
	}
	hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
			if (!pte_chain->parent_ptes[i])
				break;
			if (pte_chain->parent_ptes[i] != parent_pte)
				continue;
490 491
			while (i + 1 < NR_PTE_CHAIN_ENTRIES
				&& pte_chain->parent_ptes[i + 1]) {
492 493 494 495 496
				pte_chain->parent_ptes[i]
					= pte_chain->parent_ptes[i + 1];
				++i;
			}
			pte_chain->parent_ptes[i] = NULL;
497 498
			if (i == 0) {
				hlist_del(&pte_chain->link);
499
				mmu_free_pte_chain(vcpu, pte_chain);
500 501 502 503 504
				if (hlist_empty(&page->parent_ptes)) {
					page->multimapped = 0;
					page->parent_pte = NULL;
				}
			}
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
			return;
		}
	BUG();
}

static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
						gfn_t gfn)
{
	unsigned index;
	struct hlist_head *bucket;
	struct kvm_mmu_page *page;
	struct hlist_node *node;

	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &vcpu->kvm->mmu_page_hash[index];
	hlist_for_each_entry(page, node, bucket, hash_link)
		if (page->gfn == gfn && !page->role.metaphysical) {
			pgprintk("%s: found role %x\n",
				 __FUNCTION__, page->role.word);
			return page;
		}
	return NULL;
}

static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
					     int metaphysical,
					     u64 *parent_pte)
{
	union kvm_mmu_page_role role;
	unsigned index;
	unsigned quadrant;
	struct hlist_head *bucket;
	struct kvm_mmu_page *page;
	struct hlist_node *node;

	role.word = 0;
	role.glevels = vcpu->mmu.root_level;
	role.level = level;
	role.metaphysical = metaphysical;
	if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
	pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
		 gfn, role.word);
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &vcpu->kvm->mmu_page_hash[index];
	hlist_for_each_entry(page, node, bucket, hash_link)
		if (page->gfn == gfn && page->role.word == role.word) {
559
			mmu_page_add_parent_pte(vcpu, page, parent_pte);
560 561 562 563 564 565 566 567 568 569
			pgprintk("%s: found\n", __FUNCTION__);
			return page;
		}
	page = kvm_mmu_alloc_page(vcpu, parent_pte);
	if (!page)
		return page;
	pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
	page->gfn = gfn;
	page->role = role;
	hlist_add_head(&page->hash_link, bucket);
570
	if (!metaphysical)
571
		rmap_write_protect(vcpu, gfn);
572 573 574
	return page;
}

575 576 577
static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
					 struct kvm_mmu_page *page)
{
578 579 580 581 582 583 584 585 586
	unsigned i;
	u64 *pt;
	u64 ent;

	pt = __va(page->page_hpa);

	if (page->role.level == PT_PAGE_TABLE_LEVEL) {
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
			if (pt[i] & PT_PRESENT_MASK)
587
				rmap_remove(vcpu, &pt[i]);
588 589 590 591 592 593 594 595 596 597 598 599
			pt[i] = 0;
		}
		return;
	}

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		ent = pt[i];

		pt[i] = 0;
		if (!(ent & PT_PRESENT_MASK))
			continue;
		ent &= PT64_BASE_ADDR_MASK;
600
		mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
601
	}
602 603
}

604 605 606 607
static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
			     struct kvm_mmu_page *page,
			     u64 *parent_pte)
{
608
	mmu_page_remove_parent_pte(vcpu, page, parent_pte);
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
}

static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
			     struct kvm_mmu_page *page)
{
	u64 *parent_pte;

	while (page->multimapped || page->parent_pte) {
		if (!page->multimapped)
			parent_pte = page->parent_pte;
		else {
			struct kvm_pte_chain *chain;

			chain = container_of(page->parent_ptes.first,
					     struct kvm_pte_chain, link);
			parent_pte = chain->parent_ptes[0];
		}
626
		BUG_ON(!parent_pte);
627 628 629
		kvm_mmu_put_page(vcpu, page, parent_pte);
		*parent_pte = 0;
	}
630
	kvm_mmu_page_unlink_children(vcpu, page);
631 632 633 634 635 636 637
	if (!page->root_count) {
		hlist_del(&page->hash_link);
		kvm_mmu_free_page(vcpu, page->page_hpa);
	} else {
		list_del(&page->link);
		list_add(&page->link, &vcpu->kvm->active_mmu_pages);
	}
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
}

static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	unsigned index;
	struct hlist_head *bucket;
	struct kvm_mmu_page *page;
	struct hlist_node *node, *n;
	int r;

	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
	r = 0;
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &vcpu->kvm->mmu_page_hash[index];
	hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
		if (page->gfn == gfn && !page->role.metaphysical) {
654 655
			pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
				 page->role.word);
656 657 658 659
			kvm_mmu_zap_page(vcpu, page);
			r = 1;
		}
	return r;
660 661
}

A
Avi Kivity 已提交
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
{
	int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
	struct kvm_mmu_page *page_head = page_header(__pa(pte));

	__set_bit(slot, &page_head->slot_bitmap);
}

hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{
	hpa_t hpa = gpa_to_hpa(vcpu, gpa);

	return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
}

hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{
	struct kvm_memory_slot *slot;
	struct page *page;

	ASSERT((gpa & HPA_ERR_MASK) == 0);
	slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
	if (!slot)
		return gpa | HPA_ERR_MASK;
	page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
	return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
		| (gpa & (PAGE_SIZE-1));
}

hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
{
	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);

	if (gpa == UNMAPPED_GVA)
		return UNMAPPED_GVA;
	return gpa_to_hpa(vcpu, gpa);
}

static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}

static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
{
	int level = PT32E_ROOT_LEVEL;
	hpa_t table_addr = vcpu->mmu.root_hpa;

	for (; ; level--) {
		u32 index = PT64_INDEX(v, level);
		u64 *table;
712
		u64 pte;
A
Avi Kivity 已提交
713 714 715 716 717

		ASSERT(VALID_PAGE(table_addr));
		table = __va(table_addr);

		if (level == 1) {
718 719 720
			pte = table[index];
			if (is_present_pte(pte) && is_writeble_pte(pte))
				return 0;
A
Avi Kivity 已提交
721 722 723 724
			mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
			page_header_update_slot(vcpu->kvm, table, v);
			table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
								PT_USER_MASK;
725
			rmap_add(vcpu, &table[index]);
A
Avi Kivity 已提交
726 727 728 729
			return 0;
		}

		if (table[index] == 0) {
730
			struct kvm_mmu_page *new_table;
731
			gfn_t pseudo_gfn;
A
Avi Kivity 已提交
732

733 734 735 736 737
			pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
			new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
						     v, level - 1,
						     1, &table[index]);
738
			if (!new_table) {
A
Avi Kivity 已提交
739 740 741 742
				pgprintk("nonpaging_map: ENOMEM\n");
				return -ENOMEM;
			}

743 744
			table[index] = new_table->page_hpa | PT_PRESENT_MASK
				| PT_WRITABLE_MASK | PT_USER_MASK;
A
Avi Kivity 已提交
745 746 747 748 749
		}
		table_addr = table[index] & PT64_BASE_ADDR_MASK;
	}
}

750 751 752
static void mmu_free_roots(struct kvm_vcpu *vcpu)
{
	int i;
753
	struct kvm_mmu_page *page;
754 755 756 757 758 759

#ifdef CONFIG_X86_64
	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->mmu.root_hpa;

		ASSERT(VALID_PAGE(root));
760 761
		page = page_header(root);
		--page->root_count;
762 763 764 765 766 767 768 769 770
		vcpu->mmu.root_hpa = INVALID_PAGE;
		return;
	}
#endif
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->mmu.pae_root[i];

		ASSERT(VALID_PAGE(root));
		root &= PT64_BASE_ADDR_MASK;
771 772
		page = page_header(root);
		--page->root_count;
773 774 775 776 777 778 779 780
		vcpu->mmu.pae_root[i] = INVALID_PAGE;
	}
	vcpu->mmu.root_hpa = INVALID_PAGE;
}

static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
	int i;
781
	gfn_t root_gfn;
782 783
	struct kvm_mmu_page *page;

784
	root_gfn = vcpu->cr3 >> PAGE_SHIFT;
785 786 787 788 789 790

#ifdef CONFIG_X86_64
	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->mmu.root_hpa;

		ASSERT(!VALID_PAGE(root));
791 792
		root = kvm_mmu_get_page(vcpu, root_gfn, 0,
					PT64_ROOT_LEVEL, 0, NULL)->page_hpa;
793 794
		page = page_header(root);
		++page->root_count;
795 796 797 798 799 800 801 802
		vcpu->mmu.root_hpa = root;
		return;
	}
#endif
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->mmu.pae_root[i];

		ASSERT(!VALID_PAGE(root));
803 804 805 806 807 808 809
		if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL)
			root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
		else if (vcpu->mmu.root_level == 0)
			root_gfn = 0;
		root = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
					PT32_ROOT_LEVEL, !is_paging(vcpu),
					NULL)->page_hpa;
810 811
		page = page_header(root);
		++page->root_count;
812 813 814 815 816
		vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
	}
	vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
}

A
Avi Kivity 已提交
817 818 819 820 821 822 823 824 825
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
{
	return vaddr;
}

static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
			       u32 error_code)
{
	gpa_t addr = gva;
A
Avi Kivity 已提交
826
	hpa_t paddr;
A
Avi Kivity 已提交
827

828 829
	mmu_topup_memory_caches(vcpu);

A
Avi Kivity 已提交
830 831 832 833
	ASSERT(vcpu);
	ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));


A
Avi Kivity 已提交
834
	paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
A
Avi Kivity 已提交
835

A
Avi Kivity 已提交
836 837
	if (is_error_hpa(paddr))
		return 1;
A
Avi Kivity 已提交
838

A
Avi Kivity 已提交
839
	return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
A
Avi Kivity 已提交
840 841 842 843
}

static void nonpaging_free(struct kvm_vcpu *vcpu)
{
844
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
845 846 847 848 849 850 851 852 853 854
}

static int nonpaging_init_context(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->mmu;

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
	context->free = nonpaging_free;
855
	context->root_level = 0;
A
Avi Kivity 已提交
856
	context->shadow_root_level = PT32E_ROOT_LEVEL;
857
	mmu_alloc_roots(vcpu);
A
Avi Kivity 已提交
858 859 860 861 862 863 864 865 866 867 868 869 870
	ASSERT(VALID_PAGE(context->root_hpa));
	kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
	return 0;
}

static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
	++kvm_stat.tlb_flush;
	kvm_arch_ops->tlb_flush(vcpu);
}

static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
871
	pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
872 873
	mmu_free_roots(vcpu);
	mmu_alloc_roots(vcpu);
A
Avi Kivity 已提交
874
	kvm_mmu_flush_tlb(vcpu);
875
	kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
A
Avi Kivity 已提交
876 877 878 879 880 881 882 883 884 885 886
}

static void mark_pagetable_nonglobal(void *shadow_pte)
{
	page_header(__pa(shadow_pte))->global = 0;
}

static inline void set_pte_common(struct kvm_vcpu *vcpu,
			     u64 *shadow_pte,
			     gpa_t gaddr,
			     int dirty,
887 888
			     u64 access_bits,
			     gfn_t gfn)
A
Avi Kivity 已提交
889 890 891 892 893 894
{
	hpa_t paddr;

	*shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
	if (!dirty)
		access_bits &= ~PT_WRITABLE_MASK;
895

896
	paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
A
Avi Kivity 已提交
897 898 899 900 901 902 903 904 905 906

	*shadow_pte |= access_bits;

	if (!(*shadow_pte & PT_GLOBAL_MASK))
		mark_pagetable_nonglobal(shadow_pte);

	if (is_error_hpa(paddr)) {
		*shadow_pte |= gaddr;
		*shadow_pte |= PT_SHADOW_IO_MARK;
		*shadow_pte &= ~PT_PRESENT_MASK;
907
		return;
A
Avi Kivity 已提交
908
	}
909 910 911 912 913 914

	*shadow_pte |= paddr;

	if (access_bits & PT_WRITABLE_MASK) {
		struct kvm_mmu_page *shadow;

915
		shadow = kvm_mmu_lookup_page(vcpu, gfn);
916 917
		if (shadow) {
			pgprintk("%s: found shadow page for %lx, marking ro\n",
918
				 __FUNCTION__, gfn);
919 920 921 922 923 924 925 926 927
			access_bits &= ~PT_WRITABLE_MASK;
			*shadow_pte &= ~PT_WRITABLE_MASK;
		}
	}

	if (access_bits & PT_WRITABLE_MASK)
		mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);

	page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
928
	rmap_add(vcpu, shadow_pte);
A
Avi Kivity 已提交
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
}

static void inject_page_fault(struct kvm_vcpu *vcpu,
			      u64 addr,
			      u32 err_code)
{
	kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
}

static inline int fix_read_pf(u64 *shadow_ent)
{
	if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
	    !(*shadow_ent & PT_USER_MASK)) {
		/*
		 * If supervisor write protect is disabled, we shadow kernel
		 * pages as user pages so we can trap the write access.
		 */
		*shadow_ent |= PT_USER_MASK;
		*shadow_ent &= ~PT_WRITABLE_MASK;

		return 1;

	}
	return 0;
}

static int may_access(u64 pte, int write, int user)
{

	if (user && !(pte & PT_USER_MASK))
		return 0;
	if (write && !(pte & PT_WRITABLE_MASK))
		return 0;
	return 1;
}

static void paging_free(struct kvm_vcpu *vcpu)
{
	nonpaging_free(vcpu);
}

#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

978
static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
A
Avi Kivity 已提交
979 980 981 982 983 984 985 986
{
	struct kvm_mmu *context = &vcpu->mmu;

	ASSERT(is_pae(vcpu));
	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
	context->free = paging_free;
987 988 989
	context->root_level = level;
	context->shadow_root_level = level;
	mmu_alloc_roots(vcpu);
A
Avi Kivity 已提交
990 991 992 993 994 995
	ASSERT(VALID_PAGE(context->root_hpa));
	kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
		    (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
	return 0;
}

996 997 998 999 1000
static int paging64_init_context(struct kvm_vcpu *vcpu)
{
	return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
}

A
Avi Kivity 已提交
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
static int paging32_init_context(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->mmu;

	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
	context->free = paging_free;
	context->root_level = PT32_ROOT_LEVEL;
	context->shadow_root_level = PT32E_ROOT_LEVEL;
1011
	mmu_alloc_roots(vcpu);
A
Avi Kivity 已提交
1012 1013 1014 1015 1016 1017 1018 1019
	ASSERT(VALID_PAGE(context->root_hpa));
	kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
		    (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
	return 0;
}

static int paging32E_init_context(struct kvm_vcpu *vcpu)
{
1020
	return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
1021 1022 1023 1024 1025 1026 1027 1028 1029
}

static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));

	if (!is_paging(vcpu))
		return nonpaging_init_context(vcpu);
A
Avi Kivity 已提交
1030
	else if (is_long_mode(vcpu))
A
Avi Kivity 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
		return paging64_init_context(vcpu);
	else if (is_pae(vcpu))
		return paging32E_init_context(vcpu);
	else
		return paging32_init_context(vcpu);
}

static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
	if (VALID_PAGE(vcpu->mmu.root_hpa)) {
		vcpu->mmu.free(vcpu);
		vcpu->mmu.root_hpa = INVALID_PAGE;
	}
}

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
1049 1050
	int r;

A
Avi Kivity 已提交
1051
	destroy_kvm_mmu(vcpu);
1052 1053 1054 1055 1056 1057
	r = init_kvm_mmu(vcpu);
	if (r < 0)
		goto out;
	mmu_topup_memory_caches(vcpu);
out:
	return r;
A
Avi Kivity 已提交
1058 1059
}

1060 1061
void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
{
1062 1063 1064
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_mmu_page *page;
	struct kvm_mmu_page *child;
1065
	struct hlist_node *node, *n;
1066 1067 1068 1069 1070
	struct hlist_head *bucket;
	unsigned index;
	u64 *spte;
	u64 pte;
	unsigned offset = offset_in_page(gpa);
1071
	unsigned pte_size;
1072
	unsigned page_offset;
1073
	unsigned misaligned;
1074
	int level;
1075
	int flooded = 0;
1076

1077
	pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1078 1079 1080 1081 1082 1083 1084 1085
	if (gfn == vcpu->last_pt_write_gfn) {
		++vcpu->last_pt_write_count;
		if (vcpu->last_pt_write_count >= 3)
			flooded = 1;
	} else {
		vcpu->last_pt_write_gfn = gfn;
		vcpu->last_pt_write_count = 1;
	}
1086 1087
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &vcpu->kvm->mmu_page_hash[index];
1088
	hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1089 1090
		if (page->gfn != gfn || page->role.metaphysical)
			continue;
1091 1092
		pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1093
		if (misaligned || flooded) {
1094 1095 1096 1097
			/*
			 * Misaligned accesses are too much trouble to fix
			 * up; also, they usually indicate a page is not used
			 * as a page table.
1098 1099 1100 1101 1102
			 *
			 * If we're seeing too many writes to a page,
			 * it may no longer be a page table, or we may be
			 * forking, in which case it is better to unmap the
			 * page.
1103 1104 1105 1106 1107 1108
			 */
			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
				 gpa, bytes, page->role.word);
			kvm_mmu_zap_page(vcpu, page);
			continue;
		}
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
		page_offset = offset;
		level = page->role.level;
		if (page->role.glevels == PT32_ROOT_LEVEL) {
			page_offset <<= 1;          /* 32->64 */
			page_offset &= ~PAGE_MASK;
		}
		spte = __va(page->page_hpa);
		spte += page_offset / sizeof(*spte);
		pte = *spte;
		if (is_present_pte(pte)) {
			if (level == PT_PAGE_TABLE_LEVEL)
1120
				rmap_remove(vcpu, spte);
1121 1122
			else {
				child = page_header(pte & PT64_BASE_ADDR_MASK);
1123
				mmu_page_remove_parent_pte(vcpu, child, spte);
1124 1125 1126 1127
			}
		}
		*spte = 0;
	}
1128 1129 1130 1131 1132 1133
}

void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
{
}

1134 1135 1136 1137 1138 1139 1140
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);

	return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
}

A
Avi Kivity 已提交
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
	while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
		struct kvm_mmu_page *page;

		page = container_of(vcpu->kvm->active_mmu_pages.prev,
				    struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu, page);
	}
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);

A
Avi Kivity 已提交
1153 1154
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
1155
	struct kvm_mmu_page *page;
A
Avi Kivity 已提交
1156

1157 1158 1159 1160 1161 1162
	while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
		page = container_of(vcpu->kvm->active_mmu_pages.next,
				    struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu, page);
	}
	while (!list_empty(&vcpu->free_pages)) {
A
Avi Kivity 已提交
1163 1164 1165 1166 1167 1168
		page = list_entry(vcpu->free_pages.next,
				  struct kvm_mmu_page, link);
		list_del(&page->link);
		__free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
		page->page_hpa = INVALID_PAGE;
	}
1169
	free_page((unsigned long)vcpu->mmu.pae_root);
A
Avi Kivity 已提交
1170 1171 1172 1173
}

static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
1174
	struct page *page;
A
Avi Kivity 已提交
1175 1176 1177 1178 1179 1180 1181 1182
	int i;

	ASSERT(vcpu);

	for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
		struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];

		INIT_LIST_HEAD(&page_header->link);
1183
		if ((page = alloc_page(GFP_KERNEL)) == NULL)
A
Avi Kivity 已提交
1184 1185 1186 1187 1188
			goto error_1;
		page->private = (unsigned long)page_header;
		page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
		memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
		list_add(&page_header->link, &vcpu->free_pages);
A
Avi Kivity 已提交
1189
		++vcpu->kvm->n_free_mmu_pages;
A
Avi Kivity 已提交
1190
	}
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203

	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.
	 */
	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
	if (!page)
		goto error_1;
	vcpu->mmu.pae_root = page_address(page);
	for (i = 0; i < 4; ++i)
		vcpu->mmu.pae_root[i] = INVALID_PAGE;

A
Avi Kivity 已提交
1204 1205 1206 1207 1208 1209 1210
	return 0;

error_1:
	free_mmu_pages(vcpu);
	return -ENOMEM;
}

1211
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1212 1213 1214 1215 1216
{
	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
	ASSERT(list_empty(&vcpu->free_pages));

1217 1218
	return alloc_mmu_pages(vcpu);
}
A
Avi Kivity 已提交
1219

1220 1221 1222 1223 1224
int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
	ASSERT(!list_empty(&vcpu->free_pages));
1225

1226
	return init_kvm_mmu(vcpu);
A
Avi Kivity 已提交
1227 1228 1229 1230 1231 1232 1233 1234
}

void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);

	destroy_kvm_mmu(vcpu);
	free_mmu_pages(vcpu);
1235
	mmu_free_memory_caches(vcpu);
A
Avi Kivity 已提交
1236 1237
}

1238
void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
A
Avi Kivity 已提交
1239
{
1240
	struct kvm *kvm = vcpu->kvm;
A
Avi Kivity 已提交
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	struct kvm_mmu_page *page;

	list_for_each_entry(page, &kvm->active_mmu_pages, link) {
		int i;
		u64 *pt;

		if (!test_bit(slot, &page->slot_bitmap))
			continue;

		pt = __va(page->page_hpa);
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
			/* avoid RMW */
1253
			if (pt[i] & PT_WRITABLE_MASK) {
1254
				rmap_remove(vcpu, &pt[i]);
A
Avi Kivity 已提交
1255
				pt[i] &= ~PT_WRITABLE_MASK;
1256
			}
A
Avi Kivity 已提交
1257 1258
	}
}