mmu.c 54.1 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
A
Avi Kivity 已提交
19 20

#include "vmx.h"
21
#include "mmu.h"
A
Avi Kivity 已提交
22

23
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
24 25 26 27 28
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>
29
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
30
#include <linux/hugetlb.h>
31
#include <linux/compiler.h>
A
Avi Kivity 已提交
32

A
Avi Kivity 已提交
33 34
#include <asm/page.h>
#include <asm/cmpxchg.h>
35
#include <asm/io.h>
A
Avi Kivity 已提交
36

37 38 39 40 41 42 43
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
44
bool tdp_enabled = false;
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
#undef MMU_DEBUG

#undef AUDIT

#ifdef AUDIT
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
#endif

#ifdef MMU_DEBUG

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)

#else

#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)

#endif

#if defined(MMU_DEBUG) || defined(AUDIT)
static int dbg = 1;
#endif
A
Avi Kivity 已提交
71

72 73 74
#ifndef MMU_DEBUG
#define ASSERT(x) do { } while (0)
#else
A
Avi Kivity 已提交
75 76 77 78 79
#define ASSERT(x)							\
	if (!(x)) {							\
		printk(KERN_WARNING "assertion failed %s:%d: %s\n",	\
		       __FILE__, __LINE__, #x);				\
	}
80
#endif
A
Avi Kivity 已提交
81

82 83 84 85
#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
#define PT32_PT_BITS 10
#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
A
Avi Kivity 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98

#define PT_WRITABLE_SHIFT 1

#define PT_PRESENT_MASK (1ULL << 0)
#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
#define PT_USER_MASK (1ULL << 2)
#define PT_PWT_MASK (1ULL << 3)
#define PT_PCD_MASK (1ULL << 4)
#define PT_ACCESSED_MASK (1ULL << 5)
#define PT_DIRTY_MASK (1ULL << 6)
#define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7)
#define PT_GLOBAL_MASK (1ULL << 8)
99 100
#define PT64_NX_SHIFT 63
#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
A
Avi Kivity 已提交
101 102 103 104 105 106 107

#define PT_PAT_SHIFT 7
#define PT_DIR_PAT_SHIFT 12
#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)

#define PT32_DIR_PSE36_SIZE 4
#define PT32_DIR_PSE36_SHIFT 13
M
Mike Day 已提交
108 109
#define PT32_DIR_PSE36_MASK \
	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
A
Avi Kivity 已提交
110 111 112 113 114 115 116 117 118 119


#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52

#define VALID_PAGE(x) ((x) != INVALID_PAGE)

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
M
Mike Day 已提交
120
		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
A
Avi Kivity 已提交
121 122 123 124 125 126 127 128 129 130 131

#define PT64_LEVEL_MASK(level) \
		(((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
132
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
133 134 135 136 137 138 139 140

#define PT32_LEVEL_MASK(level) \
		(((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


141
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
A
Avi Kivity 已提交
142 143 144 145 146 147 148
#define PT64_DIR_BASE_ADDR_MASK \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))

149 150
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
			| PT64_NX_MASK)
A
Avi Kivity 已提交
151 152 153 154

#define PFERR_PRESENT_MASK (1U << 0)
#define PFERR_WRITE_MASK (1U << 1)
#define PFERR_USER_MASK (1U << 2)
155
#define PFERR_FETCH_MASK (1U << 4)
A
Avi Kivity 已提交
156 157 158 159 160 161 162 163

#define PT64_ROOT_LEVEL 4
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3

#define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1

164 165
#define RMAP_EXT 4

166 167 168 169 170
#define ACC_EXEC_MASK    1
#define ACC_WRITE_MASK   PT_WRITABLE_MASK
#define ACC_USER_MASK    PT_USER_MASK
#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)

171 172 173 174 175 176 177
struct kvm_pv_mmu_op_buffer {
	void *ptr;
	unsigned len;
	unsigned processed;
	char buf[512] __aligned(sizeof(long));
};

178 179 180 181 182
struct kvm_rmap_desc {
	u64 *shadow_ptes[RMAP_EXT];
	struct kvm_rmap_desc *more;
};

183 184
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
185
static struct kmem_cache *mmu_page_header_cache;
186

187 188 189 190 191 192 193 194 195 196
static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;

void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{
	shadow_trap_nonpresent_pte = trap_pte;
	shadow_notrap_nonpresent_pte = notrap_pte;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);

A
Avi Kivity 已提交
197 198
static int is_write_protection(struct kvm_vcpu *vcpu)
{
199
	return vcpu->arch.cr0 & X86_CR0_WP;
A
Avi Kivity 已提交
200 201 202 203 204 205 206
}

static int is_cpuid_PSE36(void)
{
	return 1;
}

207 208
static int is_nx(struct kvm_vcpu *vcpu)
{
209
	return vcpu->arch.shadow_efer & EFER_NX;
210 211
}

A
Avi Kivity 已提交
212 213 214 215 216
static int is_present_pte(unsigned long pte)
{
	return pte & PT_PRESENT_MASK;
}

217 218 219 220 221 222
static int is_shadow_present_pte(u64 pte)
{
	return pte != shadow_trap_nonpresent_pte
		&& pte != shadow_notrap_nonpresent_pte;
}

M
Marcelo Tosatti 已提交
223 224 225 226 227
static int is_large_pte(u64 pte)
{
	return pte & PT_PAGE_SIZE_MASK;
}

A
Avi Kivity 已提交
228 229 230 231 232
static int is_writeble_pte(unsigned long pte)
{
	return pte & PT_WRITABLE_MASK;
}

233 234 235 236 237
static int is_dirty_pte(unsigned long pte)
{
	return pte & PT_DIRTY_MASK;
}

238 239
static int is_rmap_pte(u64 pte)
{
240
	return is_shadow_present_pte(pte);
241 242
}

243 244 245 246 247 248 249
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

250 251 252 253 254 255 256 257 258
static void set_shadow_pte(u64 *sptep, u64 spte)
{
#ifdef CONFIG_X86_64
	set_64bit((unsigned long *)sptep, spte);
#else
	set_64bit((unsigned long long *)sptep, spte);
#endif
}

259
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
260
				  struct kmem_cache *base_cache, int min)
261 262 263 264
{
	void *obj;

	if (cache->nobjs >= min)
265
		return 0;
266
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
267
		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
268
		if (!obj)
269
			return -ENOMEM;
270 271
		cache->objects[cache->nobjs++] = obj;
	}
272
	return 0;
273 274 275 276 277 278 279 280
}

static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
		kfree(mc->objects[--mc->nobjs]);
}

A
Avi Kivity 已提交
281
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
282
				       int min)
A
Avi Kivity 已提交
283 284 285 286 287 288
{
	struct page *page;

	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
289
		page = alloc_page(GFP_KERNEL);
A
Avi Kivity 已提交
290 291 292 293 294 295 296 297 298 299 300
		if (!page)
			return -ENOMEM;
		set_page_private(page, 0);
		cache->objects[cache->nobjs++] = page_address(page);
	}
	return 0;
}

static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
301
		free_page((unsigned long)mc->objects[--mc->nobjs]);
A
Avi Kivity 已提交
302 303
}

304
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
305
{
306 307
	int r;

308
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
309
				   pte_chain_cache, 4);
310 311
	if (r)
		goto out;
312
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
313
				   rmap_desc_cache, 1);
314 315
	if (r)
		goto out;
316
	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
317 318
	if (r)
		goto out;
319
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
320
				   mmu_page_header_cache, 4);
321 322
out:
	return r;
323 324 325 326
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
327 328 329 330
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
				    size_t size)
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	memset(p, 0, size);
	return p;
}

static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
{
346
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
347 348 349
				      sizeof(struct kvm_pte_chain));
}

350
static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
351
{
352
	kfree(pc);
353 354 355 356
}

static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
{
357
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
358 359 360
				      sizeof(struct kvm_rmap_desc));
}

361
static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
362
{
363
	kfree(rd);
364 365
}

M
Marcelo Tosatti 已提交
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
/*
 * Return the pointer to the largepage write count for a given
 * gfn, handling slots that are not large page aligned.
 */
static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
{
	unsigned long idx;

	idx = (gfn / KVM_PAGES_PER_HPAGE) -
	      (slot->base_gfn / KVM_PAGES_PER_HPAGE);
	return &slot->lpage_info[idx].write_count;
}

static void account_shadowed(struct kvm *kvm, gfn_t gfn)
{
	int *write_count;

	write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
	*write_count += 1;
	WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
}

static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
{
	int *write_count;

	write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
	*write_count -= 1;
	WARN_ON(*write_count < 0);
}

static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
{
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
	int *largepage_idx;

	if (slot) {
		largepage_idx = slot_largepage_idx(gfn, slot);
		return *largepage_idx;
	}

	return 1;
}

static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
{
	struct vm_area_struct *vma;
	unsigned long addr;

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return 0;

	vma = find_vma(current->mm, addr);
	if (vma && is_vm_hugetlb_page(vma))
		return 1;

	return 0;
}

static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
{
	struct kvm_memory_slot *slot;

	if (has_wrprotected_page(vcpu->kvm, large_gfn))
		return 0;

	if (!host_largepage_backed(vcpu->kvm, large_gfn))
		return 0;

	slot = gfn_to_memslot(vcpu->kvm, large_gfn);
	if (slot && slot->dirty_bitmap)
		return 0;

	return 1;
}

443 444 445 446 447
/*
 * Take gfn and return the reverse mapping to it.
 * Note: gfn must be unaliased before this function get called
 */

M
Marcelo Tosatti 已提交
448
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
449 450
{
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
451
	unsigned long idx;
452 453

	slot = gfn_to_memslot(kvm, gfn);
M
Marcelo Tosatti 已提交
454 455 456 457 458 459 460
	if (!lpage)
		return &slot->rmap[gfn - slot->base_gfn];

	idx = (gfn / KVM_PAGES_PER_HPAGE) -
	      (slot->base_gfn / KVM_PAGES_PER_HPAGE);

	return &slot->lpage_info[idx].rmap_pde;
461 462
}

463 464 465
/*
 * Reverse mapping data structures:
 *
466 467
 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
 * that points to page_address(page).
468
 *
469 470
 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
 * containing more mappings.
471
 */
M
Marcelo Tosatti 已提交
472
static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
473
{
474
	struct kvm_mmu_page *sp;
475
	struct kvm_rmap_desc *desc;
476
	unsigned long *rmapp;
477 478 479 480
	int i;

	if (!is_rmap_pte(*spte))
		return;
481
	gfn = unalias_gfn(vcpu->kvm, gfn);
482 483
	sp = page_header(__pa(spte));
	sp->gfns[spte - sp->spt] = gfn;
M
Marcelo Tosatti 已提交
484
	rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
485
	if (!*rmapp) {
486
		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
487 488
		*rmapp = (unsigned long)spte;
	} else if (!(*rmapp & 1)) {
489
		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
490
		desc = mmu_alloc_rmap_desc(vcpu);
491
		desc->shadow_ptes[0] = (u64 *)*rmapp;
492
		desc->shadow_ptes[1] = spte;
493
		*rmapp = (unsigned long)desc | 1;
494 495
	} else {
		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
496
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
497 498 499
		while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
			desc = desc->more;
		if (desc->shadow_ptes[RMAP_EXT-1]) {
500
			desc->more = mmu_alloc_rmap_desc(vcpu);
501 502 503 504 505 506 507 508
			desc = desc->more;
		}
		for (i = 0; desc->shadow_ptes[i]; ++i)
			;
		desc->shadow_ptes[i] = spte;
	}
}

509
static void rmap_desc_remove_entry(unsigned long *rmapp,
510 511 512 513 514 515 516 517 518
				   struct kvm_rmap_desc *desc,
				   int i,
				   struct kvm_rmap_desc *prev_desc)
{
	int j;

	for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
		;
	desc->shadow_ptes[i] = desc->shadow_ptes[j];
A
Al Viro 已提交
519
	desc->shadow_ptes[j] = NULL;
520 521 522
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
523
		*rmapp = (unsigned long)desc->shadow_ptes[0];
524 525 526 527
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
528
			*rmapp = (unsigned long)desc->more | 1;
529
	mmu_free_rmap_desc(desc);
530 531
}

532
static void rmap_remove(struct kvm *kvm, u64 *spte)
533 534 535
{
	struct kvm_rmap_desc *desc;
	struct kvm_rmap_desc *prev_desc;
536
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
537
	struct page *page;
538
	unsigned long *rmapp;
539 540 541 542
	int i;

	if (!is_rmap_pte(*spte))
		return;
543
	sp = page_header(__pa(spte));
A
Avi Kivity 已提交
544
	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
545
	mark_page_accessed(page);
546
	if (is_writeble_pte(*spte))
A
Avi Kivity 已提交
547
		kvm_release_page_dirty(page);
548
	else
A
Avi Kivity 已提交
549
		kvm_release_page_clean(page);
M
Marcelo Tosatti 已提交
550
	rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
551
	if (!*rmapp) {
552 553
		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
		BUG();
554
	} else if (!(*rmapp & 1)) {
555
		rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
556
		if ((u64 *)*rmapp != spte) {
557 558 559 560
			printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
			       spte, *spte);
			BUG();
		}
561
		*rmapp = 0;
562 563
	} else {
		rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
564
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
565 566 567 568
		prev_desc = NULL;
		while (desc) {
			for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
				if (desc->shadow_ptes[i] == spte) {
569
					rmap_desc_remove_entry(rmapp,
570
							       desc, i,
571 572 573 574 575 576 577 578 579 580
							       prev_desc);
					return;
				}
			prev_desc = desc;
			desc = desc->more;
		}
		BUG();
	}
}

581
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
582 583
{
	struct kvm_rmap_desc *desc;
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
	struct kvm_rmap_desc *prev_desc;
	u64 *prev_spte;
	int i;

	if (!*rmapp)
		return NULL;
	else if (!(*rmapp & 1)) {
		if (!spte)
			return (u64 *)*rmapp;
		return NULL;
	}
	desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
	prev_desc = NULL;
	prev_spte = NULL;
	while (desc) {
		for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
			if (prev_spte == spte)
				return desc->shadow_ptes[i];
			prev_spte = desc->shadow_ptes[i];
		}
		desc = desc->more;
	}
	return NULL;
}

static void rmap_write_protect(struct kvm *kvm, u64 gfn)
{
611
	unsigned long *rmapp;
612
	u64 *spte;
613
	int write_protected = 0;
614

615
	gfn = unalias_gfn(kvm, gfn);
M
Marcelo Tosatti 已提交
616
	rmapp = gfn_to_rmap(kvm, gfn, 0);
617

618 619
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
620 621 622
		BUG_ON(!spte);
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
623
		if (is_writeble_pte(*spte)) {
624
			set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
625 626
			write_protected = 1;
		}
627
		spte = rmap_next(kvm, rmapp, spte);
628
	}
M
Marcelo Tosatti 已提交
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
	/* check for huge page mappings */
	rmapp = gfn_to_rmap(kvm, gfn, 1);
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
		BUG_ON(!spte);
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
		pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
		if (is_writeble_pte(*spte)) {
			rmap_remove(kvm, spte);
			--kvm->stat.lpages;
			set_shadow_pte(spte, shadow_trap_nonpresent_pte);
			write_protected = 1;
		}
		spte = rmap_next(kvm, rmapp, spte);
	}

646 647
	if (write_protected)
		kvm_flush_remote_tlbs(kvm);
M
Marcelo Tosatti 已提交
648 649

	account_shadowed(kvm, gfn);
650 651
}

652
#ifdef MMU_DEBUG
653
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
654
{
655 656 657
	u64 *pos;
	u64 *end;

658
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
659
		if (*pos != shadow_trap_nonpresent_pte) {
660
			printk(KERN_ERR "%s: %p %llx\n", __func__,
661
			       pos, *pos);
A
Avi Kivity 已提交
662
			return 0;
663
		}
A
Avi Kivity 已提交
664 665
	return 1;
}
666
#endif
A
Avi Kivity 已提交
667

668
static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
669
{
670 671 672 673 674
	ASSERT(is_empty_shadow_page(sp->spt));
	list_del(&sp->link);
	__free_page(virt_to_page(sp->spt));
	__free_page(virt_to_page(sp->gfns));
	kfree(sp);
675
	++kvm->arch.n_free_mmu_pages;
676 677
}

678 679
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
680
	return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
681 682
}

683 684
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
					       u64 *parent_pte)
A
Avi Kivity 已提交
685
{
686
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
687

688 689 690
	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
	sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
691
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
692
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
693 694 695 696
	ASSERT(is_empty_shadow_page(sp->spt));
	sp->slot_bitmap = 0;
	sp->multimapped = 0;
	sp->parent_pte = parent_pte;
697
	--vcpu->kvm->arch.n_free_mmu_pages;
698
	return sp;
A
Avi Kivity 已提交
699 700
}

701
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
702
				    struct kvm_mmu_page *sp, u64 *parent_pte)
703 704 705 706 707 708 709
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

	if (!parent_pte)
		return;
710 711
	if (!sp->multimapped) {
		u64 *old = sp->parent_pte;
712 713

		if (!old) {
714
			sp->parent_pte = parent_pte;
715 716
			return;
		}
717
		sp->multimapped = 1;
718
		pte_chain = mmu_alloc_pte_chain(vcpu);
719 720
		INIT_HLIST_HEAD(&sp->parent_ptes);
		hlist_add_head(&pte_chain->link, &sp->parent_ptes);
721 722
		pte_chain->parent_ptes[0] = old;
	}
723
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
724 725 726 727 728 729 730 731
		if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
			continue;
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
			if (!pte_chain->parent_ptes[i]) {
				pte_chain->parent_ptes[i] = parent_pte;
				return;
			}
	}
732
	pte_chain = mmu_alloc_pte_chain(vcpu);
733
	BUG_ON(!pte_chain);
734
	hlist_add_head(&pte_chain->link, &sp->parent_ptes);
735 736 737
	pte_chain->parent_ptes[0] = parent_pte;
}

738
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
739 740 741 742 743 744
				       u64 *parent_pte)
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

745 746 747
	if (!sp->multimapped) {
		BUG_ON(sp->parent_pte != parent_pte);
		sp->parent_pte = NULL;
748 749
		return;
	}
750
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
751 752 753 754 755
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
			if (!pte_chain->parent_ptes[i])
				break;
			if (pte_chain->parent_ptes[i] != parent_pte)
				continue;
756 757
			while (i + 1 < NR_PTE_CHAIN_ENTRIES
				&& pte_chain->parent_ptes[i + 1]) {
758 759 760 761 762
				pte_chain->parent_ptes[i]
					= pte_chain->parent_ptes[i + 1];
				++i;
			}
			pte_chain->parent_ptes[i] = NULL;
763 764
			if (i == 0) {
				hlist_del(&pte_chain->link);
765
				mmu_free_pte_chain(pte_chain);
766 767 768
				if (hlist_empty(&sp->parent_ptes)) {
					sp->multimapped = 0;
					sp->parent_pte = NULL;
769 770
				}
			}
771 772 773 774 775
			return;
		}
	BUG();
}

776
static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
777 778 779
{
	unsigned index;
	struct hlist_head *bucket;
780
	struct kvm_mmu_page *sp;
781 782
	struct hlist_node *node;

783
	pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
784
	index = kvm_page_table_hashfn(gfn);
785
	bucket = &kvm->arch.mmu_page_hash[index];
786
	hlist_for_each_entry(sp, node, bucket, hash_link)
787 788
		if (sp->gfn == gfn && !sp->role.metaphysical
		    && !sp->role.invalid) {
789
			pgprintk("%s: found role %x\n",
790
				 __func__, sp->role.word);
791
			return sp;
792 793 794 795 796 797 798 799 800
		}
	return NULL;
}

static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
					     int metaphysical,
801
					     unsigned access,
802
					     u64 *parent_pte)
803 804 805 806 807
{
	union kvm_mmu_page_role role;
	unsigned index;
	unsigned quadrant;
	struct hlist_head *bucket;
808
	struct kvm_mmu_page *sp;
809 810 811
	struct hlist_node *node;

	role.word = 0;
812
	role.glevels = vcpu->arch.mmu.root_level;
813 814
	role.level = level;
	role.metaphysical = metaphysical;
815
	role.access = access;
816
	if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
817 818 819 820
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
821
	pgprintk("%s: looking gfn %lx role %x\n", __func__,
822
		 gfn, role.word);
823
	index = kvm_page_table_hashfn(gfn);
824
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
825 826 827
	hlist_for_each_entry(sp, node, bucket, hash_link)
		if (sp->gfn == gfn && sp->role.word == role.word) {
			mmu_page_add_parent_pte(vcpu, sp, parent_pte);
828
			pgprintk("%s: found\n", __func__);
829
			return sp;
830
		}
A
Avi Kivity 已提交
831
	++vcpu->kvm->stat.mmu_cache_miss;
832 833 834
	sp = kvm_mmu_alloc_page(vcpu, parent_pte);
	if (!sp)
		return sp;
835
	pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
836 837 838
	sp->gfn = gfn;
	sp->role = role;
	hlist_add_head(&sp->hash_link, bucket);
839
	vcpu->arch.mmu.prefetch_page(vcpu, sp);
840
	if (!metaphysical)
841
		rmap_write_protect(vcpu->kvm, gfn);
842
	return sp;
843 844
}

845
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
846
					 struct kvm_mmu_page *sp)
847
{
848 849 850 851
	unsigned i;
	u64 *pt;
	u64 ent;

852
	pt = sp->spt;
853

854
	if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
855
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
856
			if (is_shadow_present_pte(pt[i]))
857
				rmap_remove(kvm, &pt[i]);
858
			pt[i] = shadow_trap_nonpresent_pte;
859
		}
860
		kvm_flush_remote_tlbs(kvm);
861 862 863 864 865 866
		return;
	}

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		ent = pt[i];

M
Marcelo Tosatti 已提交
867 868 869 870 871 872 873 874 875 876
		if (is_shadow_present_pte(ent)) {
			if (!is_large_pte(ent)) {
				ent &= PT64_BASE_ADDR_MASK;
				mmu_page_remove_parent_pte(page_header(ent),
							   &pt[i]);
			} else {
				--kvm->stat.lpages;
				rmap_remove(kvm, &pt[i]);
			}
		}
877
		pt[i] = shadow_trap_nonpresent_pte;
878
	}
879
	kvm_flush_remote_tlbs(kvm);
880 881
}

882
static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
883
{
884
	mmu_page_remove_parent_pte(sp, parent_pte);
885 886
}

887 888 889 890 891 892
static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
{
	int i;

	for (i = 0; i < KVM_MAX_VCPUS; ++i)
		if (kvm->vcpus[i])
893
			kvm->vcpus[i]->arch.last_pte_updated = NULL;
894 895
}

896
static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
897 898 899
{
	u64 *parent_pte;

A
Avi Kivity 已提交
900
	++kvm->stat.mmu_shadow_zapped;
901 902 903
	while (sp->multimapped || sp->parent_pte) {
		if (!sp->multimapped)
			parent_pte = sp->parent_pte;
904 905 906
		else {
			struct kvm_pte_chain *chain;

907
			chain = container_of(sp->parent_ptes.first,
908 909 910
					     struct kvm_pte_chain, link);
			parent_pte = chain->parent_ptes[0];
		}
911
		BUG_ON(!parent_pte);
912
		kvm_mmu_put_page(sp, parent_pte);
913
		set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
914
	}
915 916
	kvm_mmu_page_unlink_children(kvm, sp);
	if (!sp->root_count) {
M
Marcelo Tosatti 已提交
917 918
		if (!sp->role.metaphysical)
			unaccount_shadowed(kvm, sp->gfn);
919 920
		hlist_del(&sp->hash_link);
		kvm_mmu_free_page(kvm, sp);
921
	} else {
922
		list_move(&sp->link, &kvm->arch.active_mmu_pages);
923 924 925
		sp->role.invalid = 1;
		kvm_reload_remote_mmus(kvm);
	}
926
	kvm_mmu_reset_last_pte_updated(kvm);
927 928
}

929 930 931 932 933 934 935 936 937 938 939 940
/*
 * Changing the number of mmu pages allocated to the vm
 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
 */
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
{
	/*
	 * If we set the number of mmu pages to be smaller be than the
	 * number of actived pages , we must to free some mmu pages before we
	 * change the value
	 */

941
	if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
942
	    kvm_nr_mmu_pages) {
943 944
		int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
				       - kvm->arch.n_free_mmu_pages;
945 946 947 948

		while (n_used_mmu_pages > kvm_nr_mmu_pages) {
			struct kvm_mmu_page *page;

949
			page = container_of(kvm->arch.active_mmu_pages.prev,
950 951 952 953
					    struct kvm_mmu_page, link);
			kvm_mmu_zap_page(kvm, page);
			n_used_mmu_pages--;
		}
954
		kvm->arch.n_free_mmu_pages = 0;
955 956
	}
	else
957 958
		kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
					 - kvm->arch.n_alloc_mmu_pages;
959

960
	kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
961 962
}

963
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
964 965 966
{
	unsigned index;
	struct hlist_head *bucket;
967
	struct kvm_mmu_page *sp;
968 969 970
	struct hlist_node *node, *n;
	int r;

971
	pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
972
	r = 0;
973
	index = kvm_page_table_hashfn(gfn);
974
	bucket = &kvm->arch.mmu_page_hash[index];
975 976
	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
		if (sp->gfn == gfn && !sp->role.metaphysical) {
977
			pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
978 979
				 sp->role.word);
			kvm_mmu_zap_page(kvm, sp);
980 981 982
			r = 1;
		}
	return r;
983 984
}

985
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
986
{
987
	struct kvm_mmu_page *sp;
988

989
	while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
990
		pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
991
		kvm_mmu_zap_page(kvm, sp);
992 993 994
	}
}

995
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
A
Avi Kivity 已提交
996
{
997
	int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
998
	struct kvm_mmu_page *sp = page_header(__pa(pte));
A
Avi Kivity 已提交
999

1000
	__set_bit(slot, &sp->slot_bitmap);
A
Avi Kivity 已提交
1001 1002
}

1003 1004
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
{
1005 1006
	struct page *page;

1007
	gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1008 1009 1010

	if (gpa == UNMAPPED_GVA)
		return NULL;
1011 1012 1013 1014 1015 1016

	down_read(&current->mm->mmap_sem);
	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
	up_read(&current->mm->mmap_sem);

	return page;
1017 1018
}

1019 1020 1021
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
			 unsigned pt_access, unsigned pte_access,
			 int user_fault, int write_fault, int dirty,
M
Marcelo Tosatti 已提交
1022 1023
			 int *ptwrite, int largepage, gfn_t gfn,
			 struct page *page)
1024 1025
{
	u64 spte;
1026
	int was_rmapped = 0;
1027
	int was_writeble = is_writeble_pte(*shadow_pte);
1028
	hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1029

1030
	pgprintk("%s: spte %llx access %x write_fault %d"
1031
		 " user_fault %d gfn %lx\n",
1032
		 __func__, *shadow_pte, pt_access,
1033 1034
		 write_fault, user_fault, gfn);

1035
	if (is_rmap_pte(*shadow_pte)) {
M
Marcelo Tosatti 已提交
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
		if (largepage && !is_large_pte(*shadow_pte)) {
			struct kvm_mmu_page *child;
			u64 pte = *shadow_pte;

			child = page_header(pte & PT64_BASE_ADDR_MASK);
			mmu_page_remove_parent_pte(child, shadow_pte);
		} else if (host_pfn != page_to_pfn(page)) {
1047 1048 1049
			pgprintk("hfn old %lx new %lx\n",
				 host_pfn, page_to_pfn(page));
			rmap_remove(vcpu->kvm, shadow_pte);
M
Marcelo Tosatti 已提交
1050 1051 1052 1053 1054
		} else {
			if (largepage)
				was_rmapped = is_large_pte(*shadow_pte);
			else
				was_rmapped = 1;
1055 1056 1057
		}
	}

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
	/*
	 * We don't set the accessed bit, since we sometimes want to see
	 * whether the guest actually used the pte (in order to detect
	 * demand paging).
	 */
	spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
	if (!dirty)
		pte_access &= ~ACC_WRITE_MASK;
	if (!(pte_access & ACC_EXEC_MASK))
		spte |= PT64_NX_MASK;

	spte |= PT_PRESENT_MASK;
	if (pte_access & ACC_USER_MASK)
		spte |= PT_USER_MASK;
M
Marcelo Tosatti 已提交
1072 1073
	if (largepage)
		spte |= PT_PAGE_SIZE_MASK;
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087

	spte |= page_to_phys(page);

	if ((pte_access & ACC_WRITE_MASK)
	    || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
		struct kvm_mmu_page *shadow;

		spte |= PT_WRITABLE_MASK;
		if (user_fault) {
			mmu_unshadow(vcpu->kvm, gfn);
			goto unshadowed;
		}

		shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
M
Marcelo Tosatti 已提交
1088 1089
		if (shadow ||
		   (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1090
			pgprintk("%s: found shadow page for %lx, marking ro\n",
1091
				 __func__, gfn);
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
			pte_access &= ~ACC_WRITE_MASK;
			if (is_writeble_pte(spte)) {
				spte &= ~PT_WRITABLE_MASK;
				kvm_x86_ops->tlb_flush(vcpu);
			}
			if (write_fault)
				*ptwrite = 1;
		}
	}

unshadowed:

	if (pte_access & ACC_WRITE_MASK)
		mark_page_dirty(vcpu->kvm, gfn);

1107
	pgprintk("%s: setting spte %llx\n", __func__, spte);
M
Marcelo Tosatti 已提交
1108 1109 1110
	pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
		 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
		 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1111
	set_shadow_pte(shadow_pte, spte);
M
Marcelo Tosatti 已提交
1112 1113 1114 1115
	if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
	    && (spte & PT_PRESENT_MASK))
		++vcpu->kvm->stat.lpages;

1116 1117
	page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
	if (!was_rmapped) {
M
Marcelo Tosatti 已提交
1118
		rmap_add(vcpu, shadow_pte, gfn, largepage);
1119 1120
		if (!is_rmap_pte(*shadow_pte))
			kvm_release_page_clean(page);
1121 1122 1123 1124 1125
	} else {
		if (was_writeble)
			kvm_release_page_dirty(page);
		else
			kvm_release_page_clean(page);
1126 1127
	}
	if (!ptwrite || !*ptwrite)
1128
		vcpu->arch.last_pte_updated = shadow_pte;
1129 1130
}

A
Avi Kivity 已提交
1131 1132 1133 1134
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}

1135
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
M
Marcelo Tosatti 已提交
1136 1137
			   int largepage, gfn_t gfn, struct page *page,
			   int level)
A
Avi Kivity 已提交
1138
{
1139
	hpa_t table_addr = vcpu->arch.mmu.root_hpa;
1140
	int pt_write = 0;
A
Avi Kivity 已提交
1141 1142 1143 1144 1145 1146 1147 1148 1149

	for (; ; level--) {
		u32 index = PT64_INDEX(v, level);
		u64 *table;

		ASSERT(VALID_PAGE(table_addr));
		table = __va(table_addr);

		if (level == 1) {
1150
			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
M
Marcelo Tosatti 已提交
1151 1152 1153 1154 1155 1156 1157
				     0, write, 1, &pt_write, 0, gfn, page);
			return pt_write;
		}

		if (largepage && level == 2) {
			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
				    0, write, 1, &pt_write, 1, gfn, page);
1158
			return pt_write;
A
Avi Kivity 已提交
1159 1160
		}

1161
		if (table[index] == shadow_trap_nonpresent_pte) {
1162
			struct kvm_mmu_page *new_table;
1163
			gfn_t pseudo_gfn;
A
Avi Kivity 已提交
1164

1165 1166 1167 1168
			pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
			new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
						     v, level - 1,
1169
						     1, ACC_ALL, &table[index]);
1170
			if (!new_table) {
A
Avi Kivity 已提交
1171
				pgprintk("nonpaging_map: ENOMEM\n");
1172
				kvm_release_page_clean(page);
A
Avi Kivity 已提交
1173 1174 1175
				return -ENOMEM;
			}

1176
			table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
1177
				| PT_WRITABLE_MASK | PT_USER_MASK;
A
Avi Kivity 已提交
1178 1179 1180 1181 1182
		}
		table_addr = table[index] & PT64_BASE_ADDR_MASK;
	}
}

1183 1184 1185
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
{
	int r;
M
Marcelo Tosatti 已提交
1186
	int largepage = 0;
1187

1188 1189
	struct page *page;

1190 1191
	down_read(&vcpu->kvm->slots_lock);

1192
	down_read(&current->mm->mmap_sem);
M
Marcelo Tosatti 已提交
1193 1194 1195 1196 1197
	if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		largepage = 1;
	}

1198
	page = gfn_to_page(vcpu->kvm, gfn);
1199
	up_read(&current->mm->mmap_sem);
1200

1201 1202 1203 1204 1205 1206 1207
	/* mmio */
	if (is_error_page(page)) {
		kvm_release_page_clean(page);
		up_read(&vcpu->kvm->slots_lock);
		return 1;
	}

1208
	spin_lock(&vcpu->kvm->mmu_lock);
1209
	kvm_mmu_free_some_pages(vcpu);
M
Marcelo Tosatti 已提交
1210 1211
	r = __direct_map(vcpu, v, write, largepage, gfn, page,
			 PT32E_ROOT_LEVEL);
1212 1213
	spin_unlock(&vcpu->kvm->mmu_lock);

1214
	up_read(&vcpu->kvm->slots_lock);
1215

1216 1217 1218 1219
	return r;
}


1220 1221 1222 1223 1224 1225 1226 1227 1228
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp)
{
	int i;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		sp->spt[i] = shadow_trap_nonpresent_pte;
}

1229 1230 1231
static void mmu_free_roots(struct kvm_vcpu *vcpu)
{
	int i;
1232
	struct kvm_mmu_page *sp;
1233

1234
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
A
Avi Kivity 已提交
1235
		return;
1236
	spin_lock(&vcpu->kvm->mmu_lock);
1237
#ifdef CONFIG_X86_64
1238 1239
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
1240

1241 1242
		sp = page_header(root);
		--sp->root_count;
1243 1244
		if (!sp->root_count && sp->role.invalid)
			kvm_mmu_zap_page(vcpu->kvm, sp);
1245
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1246
		spin_unlock(&vcpu->kvm->mmu_lock);
1247 1248 1249 1250
		return;
	}
#endif
	for (i = 0; i < 4; ++i) {
1251
		hpa_t root = vcpu->arch.mmu.pae_root[i];
1252

A
Avi Kivity 已提交
1253 1254
		if (root) {
			root &= PT64_BASE_ADDR_MASK;
1255 1256
			sp = page_header(root);
			--sp->root_count;
1257 1258
			if (!sp->root_count && sp->role.invalid)
				kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
1259
		}
1260
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1261
	}
1262
	spin_unlock(&vcpu->kvm->mmu_lock);
1263
	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1264 1265 1266 1267 1268
}

static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
	int i;
1269
	gfn_t root_gfn;
1270
	struct kvm_mmu_page *sp;
1271
	int metaphysical = 0;
1272

1273
	root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1274 1275

#ifdef CONFIG_X86_64
1276 1277
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
1278 1279

		ASSERT(!VALID_PAGE(root));
1280 1281
		if (tdp_enabled)
			metaphysical = 1;
1282
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1283 1284
				      PT64_ROOT_LEVEL, metaphysical,
				      ACC_ALL, NULL);
1285 1286
		root = __pa(sp->spt);
		++sp->root_count;
1287
		vcpu->arch.mmu.root_hpa = root;
1288 1289 1290
		return;
	}
#endif
1291 1292 1293
	metaphysical = !is_paging(vcpu);
	if (tdp_enabled)
		metaphysical = 1;
1294
	for (i = 0; i < 4; ++i) {
1295
		hpa_t root = vcpu->arch.mmu.pae_root[i];
1296 1297

		ASSERT(!VALID_PAGE(root));
1298 1299 1300
		if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
			if (!is_present_pte(vcpu->arch.pdptrs[i])) {
				vcpu->arch.mmu.pae_root[i] = 0;
A
Avi Kivity 已提交
1301 1302
				continue;
			}
1303 1304
			root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
		} else if (vcpu->arch.mmu.root_level == 0)
1305
			root_gfn = 0;
1306
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1307
				      PT32_ROOT_LEVEL, metaphysical,
1308
				      ACC_ALL, NULL);
1309 1310
		root = __pa(sp->spt);
		++sp->root_count;
1311
		vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1312
	}
1313
	vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1314 1315
}

A
Avi Kivity 已提交
1316 1317 1318 1319 1320 1321
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
{
	return vaddr;
}

static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
A
Avi Kivity 已提交
1322
				u32 error_code)
A
Avi Kivity 已提交
1323
{
1324
	gfn_t gfn;
1325
	int r;
A
Avi Kivity 已提交
1326

1327
	pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
1328 1329 1330
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
1331

A
Avi Kivity 已提交
1332
	ASSERT(vcpu);
1333
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
1334

1335
	gfn = gva >> PAGE_SHIFT;
A
Avi Kivity 已提交
1336

1337 1338
	return nonpaging_map(vcpu, gva & PAGE_MASK,
			     error_code & PFERR_WRITE_MASK, gfn);
A
Avi Kivity 已提交
1339 1340
}

1341 1342 1343 1344 1345
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
				u32 error_code)
{
	struct page *page;
	int r;
M
Marcelo Tosatti 已提交
1346 1347
	int largepage = 0;
	gfn_t gfn = gpa >> PAGE_SHIFT;
1348 1349 1350 1351 1352 1353 1354 1355 1356

	ASSERT(vcpu);
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

	down_read(&current->mm->mmap_sem);
M
Marcelo Tosatti 已提交
1357 1358 1359 1360 1361
	if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		largepage = 1;
	}
	page = gfn_to_page(vcpu->kvm, gfn);
1362 1363 1364 1365 1366 1367 1368 1369
	if (is_error_page(page)) {
		kvm_release_page_clean(page);
		up_read(&current->mm->mmap_sem);
		return 1;
	}
	spin_lock(&vcpu->kvm->mmu_lock);
	kvm_mmu_free_some_pages(vcpu);
	r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
M
Marcelo Tosatti 已提交
1370
			 largepage, gfn, page, TDP_ROOT_LEVEL);
1371 1372 1373 1374 1375 1376
	spin_unlock(&vcpu->kvm->mmu_lock);
	up_read(&current->mm->mmap_sem);

	return r;
}

A
Avi Kivity 已提交
1377 1378
static void nonpaging_free(struct kvm_vcpu *vcpu)
{
1379
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
1380 1381 1382 1383
}

static int nonpaging_init_context(struct kvm_vcpu *vcpu)
{
1384
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
1385 1386 1387 1388 1389

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
	context->free = nonpaging_free;
1390
	context->prefetch_page = nonpaging_prefetch_page;
1391
	context->root_level = 0;
A
Avi Kivity 已提交
1392
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
1393
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1394 1395 1396
	return 0;
}

1397
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1398
{
A
Avi Kivity 已提交
1399
	++vcpu->stat.tlb_flush;
1400
	kvm_x86_ops->tlb_flush(vcpu);
A
Avi Kivity 已提交
1401 1402 1403 1404
}

static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
1405
	pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
1406
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
1407 1408 1409 1410 1411 1412
}

static void inject_page_fault(struct kvm_vcpu *vcpu,
			      u64 addr,
			      u32 err_code)
{
1413
	kvm_inject_page_fault(vcpu, addr, err_code);
A
Avi Kivity 已提交
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
}

static void paging_free(struct kvm_vcpu *vcpu)
{
	nonpaging_free(vcpu);
}

#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

1429
static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
A
Avi Kivity 已提交
1430
{
1431
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
1432 1433 1434 1435 1436

	ASSERT(is_pae(vcpu));
	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
1437
	context->prefetch_page = paging64_prefetch_page;
A
Avi Kivity 已提交
1438
	context->free = paging_free;
1439 1440
	context->root_level = level;
	context->shadow_root_level = level;
A
Avi Kivity 已提交
1441
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1442 1443 1444
	return 0;
}

1445 1446 1447 1448 1449
static int paging64_init_context(struct kvm_vcpu *vcpu)
{
	return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
}

A
Avi Kivity 已提交
1450 1451
static int paging32_init_context(struct kvm_vcpu *vcpu)
{
1452
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
1453 1454 1455 1456 1457

	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
	context->free = paging_free;
1458
	context->prefetch_page = paging32_prefetch_page;
A
Avi Kivity 已提交
1459 1460
	context->root_level = PT32_ROOT_LEVEL;
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
1461
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1462 1463 1464 1465 1466
	return 0;
}

static int paging32E_init_context(struct kvm_vcpu *vcpu)
{
1467
	return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
1468 1469
}

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->arch.mmu;

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = tdp_page_fault;
	context->free = nonpaging_free;
	context->prefetch_page = nonpaging_prefetch_page;
	context->shadow_root_level = TDP_ROOT_LEVEL;
	context->root_hpa = INVALID_PAGE;

	if (!is_paging(vcpu)) {
		context->gva_to_gpa = nonpaging_gva_to_gpa;
		context->root_level = 0;
	} else if (is_long_mode(vcpu)) {
		context->gva_to_gpa = paging64_gva_to_gpa;
		context->root_level = PT64_ROOT_LEVEL;
	} else if (is_pae(vcpu)) {
		context->gva_to_gpa = paging64_gva_to_gpa;
		context->root_level = PT32E_ROOT_LEVEL;
	} else {
		context->gva_to_gpa = paging32_gva_to_gpa;
		context->root_level = PT32_ROOT_LEVEL;
	}

	return 0;
}

static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1499 1500
{
	ASSERT(vcpu);
1501
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
1502 1503 1504

	if (!is_paging(vcpu))
		return nonpaging_init_context(vcpu);
A
Avi Kivity 已提交
1505
	else if (is_long_mode(vcpu))
A
Avi Kivity 已提交
1506 1507 1508 1509 1510 1511 1512
		return paging64_init_context(vcpu);
	else if (is_pae(vcpu))
		return paging32E_init_context(vcpu);
	else
		return paging32_init_context(vcpu);
}

1513 1514 1515 1516 1517 1518 1519 1520
static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{
	if (tdp_enabled)
		return init_kvm_tdp_mmu(vcpu);
	else
		return init_kvm_softmmu(vcpu);
}

A
Avi Kivity 已提交
1521 1522 1523
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
1524 1525 1526
	if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
		vcpu->arch.mmu.free(vcpu);
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1527 1528 1529 1530
	}
}

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1531 1532 1533 1534
{
	destroy_kvm_mmu(vcpu);
	return init_kvm_mmu(vcpu);
}
1535
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
1536 1537

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1538
{
1539 1540
	int r;

1541
	r = mmu_topup_memory_caches(vcpu);
A
Avi Kivity 已提交
1542 1543
	if (r)
		goto out;
1544
	spin_lock(&vcpu->kvm->mmu_lock);
1545
	kvm_mmu_free_some_pages(vcpu);
A
Avi Kivity 已提交
1546
	mmu_alloc_roots(vcpu);
1547
	spin_unlock(&vcpu->kvm->mmu_lock);
1548
	kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
A
Avi Kivity 已提交
1549
	kvm_mmu_flush_tlb(vcpu);
1550 1551
out:
	return r;
A
Avi Kivity 已提交
1552
}
A
Avi Kivity 已提交
1553 1554 1555 1556 1557 1558
EXPORT_SYMBOL_GPL(kvm_mmu_load);

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
	mmu_free_roots(vcpu);
}
A
Avi Kivity 已提交
1559

1560
static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1561
				  struct kvm_mmu_page *sp,
1562 1563 1564 1565 1566 1567
				  u64 *spte)
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
1568
	if (is_shadow_present_pte(pte)) {
M
Marcelo Tosatti 已提交
1569 1570
		if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
		    is_large_pte(pte))
1571
			rmap_remove(vcpu->kvm, spte);
1572 1573
		else {
			child = page_header(pte & PT64_BASE_ADDR_MASK);
1574
			mmu_page_remove_parent_pte(child, spte);
1575 1576
		}
	}
1577
	set_shadow_pte(spte, shadow_trap_nonpresent_pte);
M
Marcelo Tosatti 已提交
1578 1579
	if (is_large_pte(pte))
		--vcpu->kvm->stat.lpages;
1580 1581
}

1582
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1583
				  struct kvm_mmu_page *sp,
1584
				  u64 *spte,
1585
				  const void *new)
1586
{
M
Marcelo Tosatti 已提交
1587 1588
	if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
	    && !vcpu->arch.update_pte.largepage) {
A
Avi Kivity 已提交
1589
		++vcpu->kvm->stat.mmu_pde_zapped;
1590
		return;
A
Avi Kivity 已提交
1591
	}
1592

A
Avi Kivity 已提交
1593
	++vcpu->kvm->stat.mmu_pte_updated;
1594
	if (sp->role.glevels == PT32_ROOT_LEVEL)
1595
		paging32_update_pte(vcpu, sp, spte, new);
1596
	else
1597
		paging64_update_pte(vcpu, sp, spte, new);
1598 1599
}

1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
	old ^= PT64_NX_MASK;
	new ^= PT64_NX_MASK;
	return (old & ~new & PT64_PERM_MASK) != 0;
}

static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
{
	if (need_remote_flush(old, new))
		kvm_flush_remote_tlbs(vcpu->kvm);
	else
		kvm_mmu_flush_tlb(vcpu);
}

1621 1622
static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{
1623
	u64 *spte = vcpu->arch.last_pte_updated;
1624 1625 1626 1627

	return !!(spte && (*spte & PT_ACCESSED_MASK));
}

1628 1629 1630 1631 1632 1633
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
					  const u8 *new, int bytes)
{
	gfn_t gfn;
	int r;
	u64 gpte = 0;
1634
	struct page *page;
1635

M
Marcelo Tosatti 已提交
1636 1637
	vcpu->arch.update_pte.largepage = 0;

1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
	if (bytes != 4 && bytes != 8)
		return;

	/*
	 * Assume that the pte write on a page table of the same type
	 * as the current vcpu paging mode.  This is nearly always true
	 * (might be false while changing modes).  Note it is verified later
	 * by update_pte().
	 */
	if (is_pae(vcpu)) {
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
		if ((bytes == 4) && (gpa % 4 == 0)) {
			r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
			if (r)
				return;
			memcpy((void *)&gpte + (gpa % 8), new, 4);
		} else if ((bytes == 8) && (gpa % 8 == 0)) {
			memcpy((void *)&gpte, new, 8);
		}
	} else {
		if ((bytes == 4) && (gpa % 4 == 0))
			memcpy((void *)&gpte, new, 4);
	}
	if (!is_present_pte(gpte))
		return;
	gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1664

M
Marcelo Tosatti 已提交
1665 1666 1667 1668 1669
	down_read(&current->mm->mmap_sem);
	if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		vcpu->arch.update_pte.largepage = 1;
	}
1670
	page = gfn_to_page(vcpu->kvm, gfn);
M
Marcelo Tosatti 已提交
1671
	up_read(&current->mm->mmap_sem);
1672

1673 1674 1675 1676
	if (is_error_page(page)) {
		kvm_release_page_clean(page);
		return;
	}
1677
	vcpu->arch.update_pte.gfn = gfn;
1678
	vcpu->arch.update_pte.page = page;
1679 1680
}

1681
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1682
		       const u8 *new, int bytes)
1683
{
1684
	gfn_t gfn = gpa >> PAGE_SHIFT;
1685
	struct kvm_mmu_page *sp;
1686
	struct hlist_node *node, *n;
1687 1688
	struct hlist_head *bucket;
	unsigned index;
1689
	u64 entry, gentry;
1690 1691
	u64 *spte;
	unsigned offset = offset_in_page(gpa);
1692
	unsigned pte_size;
1693
	unsigned page_offset;
1694
	unsigned misaligned;
1695
	unsigned quadrant;
1696
	int level;
1697
	int flooded = 0;
1698
	int npte;
1699
	int r;
1700

1701
	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
1702
	mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1703
	spin_lock(&vcpu->kvm->mmu_lock);
1704
	kvm_mmu_free_some_pages(vcpu);
A
Avi Kivity 已提交
1705
	++vcpu->kvm->stat.mmu_pte_write;
1706
	kvm_mmu_audit(vcpu, "pre pte write");
1707
	if (gfn == vcpu->arch.last_pt_write_gfn
1708
	    && !last_updated_pte_accessed(vcpu)) {
1709 1710
		++vcpu->arch.last_pt_write_count;
		if (vcpu->arch.last_pt_write_count >= 3)
1711 1712
			flooded = 1;
	} else {
1713 1714 1715
		vcpu->arch.last_pt_write_gfn = gfn;
		vcpu->arch.last_pt_write_count = 1;
		vcpu->arch.last_pte_updated = NULL;
1716
	}
1717
	index = kvm_page_table_hashfn(gfn);
1718
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1719 1720
	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
		if (sp->gfn != gfn || sp->role.metaphysical)
1721
			continue;
1722
		pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1723
		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1724
		misaligned |= bytes < 4;
1725
		if (misaligned || flooded) {
1726 1727 1728 1729
			/*
			 * Misaligned accesses are too much trouble to fix
			 * up; also, they usually indicate a page is not used
			 * as a page table.
1730 1731 1732 1733 1734
			 *
			 * If we're seeing too many writes to a page,
			 * it may no longer be a page table, or we may be
			 * forking, in which case it is better to unmap the
			 * page.
1735 1736
			 */
			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1737 1738
				 gpa, bytes, sp->role.word);
			kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
1739
			++vcpu->kvm->stat.mmu_flooded;
1740 1741
			continue;
		}
1742
		page_offset = offset;
1743
		level = sp->role.level;
1744
		npte = 1;
1745
		if (sp->role.glevels == PT32_ROOT_LEVEL) {
1746 1747 1748 1749 1750 1751 1752
			page_offset <<= 1;	/* 32->64 */
			/*
			 * A 32-bit pde maps 4MB while the shadow pdes map
			 * only 2MB.  So we need to double the offset again
			 * and zap two pdes instead of one.
			 */
			if (level == PT32_ROOT_LEVEL) {
1753
				page_offset &= ~7; /* kill rounding error */
1754 1755 1756
				page_offset <<= 1;
				npte = 2;
			}
1757
			quadrant = page_offset >> PAGE_SHIFT;
1758
			page_offset &= ~PAGE_MASK;
1759
			if (quadrant != sp->role.quadrant)
1760
				continue;
1761
		}
1762
		spte = &sp->spt[page_offset / sizeof(*spte)];
1763 1764 1765 1766 1767 1768 1769 1770 1771
		if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
			gentry = 0;
			r = kvm_read_guest_atomic(vcpu->kvm,
						  gpa & ~(u64)(pte_size - 1),
						  &gentry, pte_size);
			new = (const void *)&gentry;
			if (r < 0)
				new = NULL;
		}
1772
		while (npte--) {
1773
			entry = *spte;
1774
			mmu_pte_write_zap_pte(vcpu, sp, spte);
1775 1776
			if (new)
				mmu_pte_write_new_pte(vcpu, sp, spte, new);
1777
			mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1778
			++spte;
1779 1780
		}
	}
1781
	kvm_mmu_audit(vcpu, "post pte write");
1782
	spin_unlock(&vcpu->kvm->mmu_lock);
1783 1784 1785 1786
	if (vcpu->arch.update_pte.page) {
		kvm_release_page_clean(vcpu->arch.update_pte.page);
		vcpu->arch.update_pte.page = NULL;
	}
1787 1788
}

1789 1790
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
1791 1792
	gpa_t gpa;
	int r;
1793

1794
	down_read(&vcpu->kvm->slots_lock);
1795
	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1796
	up_read(&vcpu->kvm->slots_lock);
1797

1798
	spin_lock(&vcpu->kvm->mmu_lock);
1799
	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1800
	spin_unlock(&vcpu->kvm->mmu_lock);
1801
	return r;
1802 1803
}

1804
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1805
{
1806
	while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
1807
		struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
1808

1809
		sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
1810 1811
				  struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
1812
		++vcpu->kvm->stat.mmu_recycled;
A
Avi Kivity 已提交
1813 1814 1815
	}
}

1816 1817 1818 1819 1820
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
{
	int r;
	enum emulation_result er;

1821
	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
1822 1823 1824 1825 1826 1827 1828 1829
	if (r < 0)
		goto out;

	if (!r) {
		r = 1;
		goto out;
	}

1830 1831 1832 1833
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		goto out;

1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852
	er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);

	switch (er) {
	case EMULATE_DONE:
		return 1;
	case EMULATE_DO_MMIO:
		++vcpu->stat.mmio_exits;
		return 0;
	case EMULATE_FAIL:
		kvm_report_emulation_failure(vcpu, "pagetable");
		return 1;
	default:
		BUG();
	}
out:
	return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

1853 1854 1855 1856 1857 1858
void kvm_enable_tdp(void)
{
	tdp_enabled = true;
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);

A
Avi Kivity 已提交
1859 1860
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
1861
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
1862

1863 1864
	while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
		sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
1865 1866
				  struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu->kvm, sp);
1867
	}
1868
	free_page((unsigned long)vcpu->arch.mmu.pae_root);
A
Avi Kivity 已提交
1869 1870 1871 1872
}

static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
1873
	struct page *page;
A
Avi Kivity 已提交
1874 1875 1876 1877
	int i;

	ASSERT(vcpu);

1878 1879 1880
	if (vcpu->kvm->arch.n_requested_mmu_pages)
		vcpu->kvm->arch.n_free_mmu_pages =
					vcpu->kvm->arch.n_requested_mmu_pages;
1881
	else
1882 1883
		vcpu->kvm->arch.n_free_mmu_pages =
					vcpu->kvm->arch.n_alloc_mmu_pages;
1884 1885 1886 1887 1888 1889 1890 1891
	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.
	 */
	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
	if (!page)
		goto error_1;
1892
	vcpu->arch.mmu.pae_root = page_address(page);
1893
	for (i = 0; i < 4; ++i)
1894
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1895

A
Avi Kivity 已提交
1896 1897 1898 1899 1900 1901 1902
	return 0;

error_1:
	free_mmu_pages(vcpu);
	return -ENOMEM;
}

1903
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1904 1905
{
	ASSERT(vcpu);
1906
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
1907

1908 1909
	return alloc_mmu_pages(vcpu);
}
A
Avi Kivity 已提交
1910

1911 1912 1913
int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
1914
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1915

1916
	return init_kvm_mmu(vcpu);
A
Avi Kivity 已提交
1917 1918 1919 1920 1921 1922 1923 1924
}

void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);

	destroy_kvm_mmu(vcpu);
	free_mmu_pages(vcpu);
1925
	mmu_free_memory_caches(vcpu);
A
Avi Kivity 已提交
1926 1927
}

1928
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
A
Avi Kivity 已提交
1929
{
1930
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
1931

1932
	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
A
Avi Kivity 已提交
1933 1934 1935
		int i;
		u64 *pt;

1936
		if (!test_bit(slot, &sp->slot_bitmap))
A
Avi Kivity 已提交
1937 1938
			continue;

1939
		pt = sp->spt;
A
Avi Kivity 已提交
1940 1941
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
			/* avoid RMW */
1942
			if (pt[i] & PT_WRITABLE_MASK)
A
Avi Kivity 已提交
1943 1944 1945
				pt[i] &= ~PT_WRITABLE_MASK;
	}
}
1946

1947
void kvm_mmu_zap_all(struct kvm *kvm)
D
Dor Laor 已提交
1948
{
1949
	struct kvm_mmu_page *sp, *node;
D
Dor Laor 已提交
1950

1951
	spin_lock(&kvm->mmu_lock);
1952
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
1953
		kvm_mmu_zap_page(kvm, sp);
1954
	spin_unlock(&kvm->mmu_lock);
D
Dor Laor 已提交
1955

1956
	kvm_flush_remote_tlbs(kvm);
D
Dor Laor 已提交
1957 1958
}

1959 1960 1961 1962 1963 1964
void kvm_mmu_module_exit(void)
{
	if (pte_chain_cache)
		kmem_cache_destroy(pte_chain_cache);
	if (rmap_desc_cache)
		kmem_cache_destroy(rmap_desc_cache);
1965 1966
	if (mmu_page_header_cache)
		kmem_cache_destroy(mmu_page_header_cache);
1967 1968 1969 1970 1971 1972
}

int kvm_mmu_module_init(void)
{
	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
					    sizeof(struct kvm_pte_chain),
1973
					    0, 0, NULL);
1974 1975 1976 1977
	if (!pte_chain_cache)
		goto nomem;
	rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
					    sizeof(struct kvm_rmap_desc),
1978
					    0, 0, NULL);
1979 1980 1981
	if (!rmap_desc_cache)
		goto nomem;

1982 1983
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
1984
						  0, 0, NULL);
1985 1986 1987
	if (!mmu_page_header_cache)
		goto nomem;

1988 1989 1990 1991 1992 1993 1994
	return 0;

nomem:
	kvm_mmu_module_exit();
	return -ENOMEM;
}

1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
/*
 * Caculate mmu pages needed for kvm.
 */
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
	int i;
	unsigned int nr_mmu_pages;
	unsigned int  nr_pages = 0;

	for (i = 0; i < kvm->nmemslots; i++)
		nr_pages += kvm->memslots[i].npages;

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
	nr_mmu_pages = max(nr_mmu_pages,
			(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);

	return nr_mmu_pages;
}

2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139
static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
				unsigned len)
{
	if (len > buffer->len)
		return NULL;
	return buffer->ptr;
}

static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
				unsigned len)
{
	void *ret;

	ret = pv_mmu_peek_buffer(buffer, len);
	if (!ret)
		return ret;
	buffer->ptr += len;
	buffer->len -= len;
	buffer->processed += len;
	return ret;
}

static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
			     gpa_t addr, gpa_t value)
{
	int bytes = 8;
	int r;

	if (!is_long_mode(vcpu) && !is_pae(vcpu))
		bytes = 4;

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

	if (!__emulator_write_phys(vcpu, addr, &value, bytes))
		return -EFAULT;

	return 1;
}

static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
	kvm_x86_ops->tlb_flush(vcpu);
	return 1;
}

static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
{
	spin_lock(&vcpu->kvm->mmu_lock);
	mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
	spin_unlock(&vcpu->kvm->mmu_lock);
	return 1;
}

static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
			     struct kvm_pv_mmu_op_buffer *buffer)
{
	struct kvm_mmu_op_header *header;

	header = pv_mmu_peek_buffer(buffer, sizeof *header);
	if (!header)
		return 0;
	switch (header->op) {
	case KVM_MMU_OP_WRITE_PTE: {
		struct kvm_mmu_op_write_pte *wpte;

		wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
		if (!wpte)
			return 0;
		return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
					wpte->pte_val);
	}
	case KVM_MMU_OP_FLUSH_TLB: {
		struct kvm_mmu_op_flush_tlb *ftlb;

		ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
		if (!ftlb)
			return 0;
		return kvm_pv_mmu_flush_tlb(vcpu);
	}
	case KVM_MMU_OP_RELEASE_PT: {
		struct kvm_mmu_op_release_pt *rpt;

		rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
		if (!rpt)
			return 0;
		return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
	}
	default: return 0;
	}
}

int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
		  gpa_t addr, unsigned long *ret)
{
	int r;
	struct kvm_pv_mmu_op_buffer buffer;

	down_read(&vcpu->kvm->slots_lock);
	down_read(&current->mm->mmap_sem);

	buffer.ptr = buffer.buf;
	buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
	buffer.processed = 0;

	r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
	if (r)
		goto out;

	while (buffer.len) {
		r = kvm_pv_mmu_op_one(vcpu, &buffer);
		if (r < 0)
			goto out;
		if (r == 0)
			break;
	}

	r = 1;
out:
	*ret = buffer.processed;
	up_read(&current->mm->mmap_sem);
	up_read(&vcpu->kvm->slots_lock);
	return r;
}

2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
#ifdef AUDIT

static const char *audit_msg;

static gva_t canonicalize(gva_t gva)
{
#ifdef CONFIG_X86_64
	gva = (long long)(gva << 16) >> 16;
#endif
	return gva;
}

static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
				gva_t va, int level)
{
	u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
	int i;
	gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
		u64 ent = pt[i];

2162
		if (ent == shadow_trap_nonpresent_pte)
2163 2164 2165
			continue;

		va = canonicalize(va);
2166 2167 2168 2169 2170
		if (level > 1) {
			if (ent == shadow_notrap_nonpresent_pte)
				printk(KERN_ERR "audit: (%s) nontrapping pte"
				       " in nonleaf level: levels %d gva %lx"
				       " level %d pte %llx\n", audit_msg,
2171
				       vcpu->arch.mmu.root_level, va, level, ent);
2172

2173
			audit_mappings_page(vcpu, ent, va, level - 1);
2174
		} else {
2175
			gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
A
Avi Kivity 已提交
2176 2177
			struct page *page = gpa_to_page(vcpu, gpa);
			hpa_t hpa = page_to_phys(page);
2178

2179
			if (is_shadow_present_pte(ent)
2180
			    && (ent & PT64_BASE_ADDR_MASK) != hpa)
2181 2182
				printk(KERN_ERR "xx audit error: (%s) levels %d"
				       " gva %lx gpa %llx hpa %llx ent %llx %d\n",
2183
				       audit_msg, vcpu->arch.mmu.root_level,
M
Mike Day 已提交
2184 2185
				       va, gpa, hpa, ent,
				       is_shadow_present_pte(ent));
2186 2187 2188 2189
			else if (ent == shadow_notrap_nonpresent_pte
				 && !is_error_hpa(hpa))
				printk(KERN_ERR "audit: (%s) notrap shadow,"
				       " valid guest gva %lx\n", audit_msg, va);
2190
			kvm_release_page_clean(page);
2191

2192 2193 2194 2195 2196 2197
		}
	}
}

static void audit_mappings(struct kvm_vcpu *vcpu)
{
2198
	unsigned i;
2199

2200 2201
	if (vcpu->arch.mmu.root_level == 4)
		audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
2202 2203
	else
		for (i = 0; i < 4; ++i)
2204
			if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
2205
				audit_mappings_page(vcpu,
2206
						    vcpu->arch.mmu.pae_root[i],
2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
						    i << 30,
						    2);
}

static int count_rmaps(struct kvm_vcpu *vcpu)
{
	int nmaps = 0;
	int i, j, k;

	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
		struct kvm_rmap_desc *d;

		for (j = 0; j < m->npages; ++j) {
2221
			unsigned long *rmapp = &m->rmap[j];
2222

2223
			if (!*rmapp)
2224
				continue;
2225
			if (!(*rmapp & 1)) {
2226 2227 2228
				++nmaps;
				continue;
			}
2229
			d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
			while (d) {
				for (k = 0; k < RMAP_EXT; ++k)
					if (d->shadow_ptes[k])
						++nmaps;
					else
						break;
				d = d->more;
			}
		}
	}
	return nmaps;
}

static int count_writable_mappings(struct kvm_vcpu *vcpu)
{
	int nmaps = 0;
2246
	struct kvm_mmu_page *sp;
2247 2248
	int i;

2249
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2250
		u64 *pt = sp->spt;
2251

2252
		if (sp->role.level != PT_PAGE_TABLE_LEVEL)
2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
			continue;

		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
			u64 ent = pt[i];

			if (!(ent & PT_PRESENT_MASK))
				continue;
			if (!(ent & PT_WRITABLE_MASK))
				continue;
			++nmaps;
		}
	}
	return nmaps;
}

static void audit_rmap(struct kvm_vcpu *vcpu)
{
	int n_rmap = count_rmaps(vcpu);
	int n_actual = count_writable_mappings(vcpu);

	if (n_rmap != n_actual)
		printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
2275
		       __func__, audit_msg, n_rmap, n_actual);
2276 2277 2278 2279
}

static void audit_write_protection(struct kvm_vcpu *vcpu)
{
2280
	struct kvm_mmu_page *sp;
2281 2282 2283
	struct kvm_memory_slot *slot;
	unsigned long *rmapp;
	gfn_t gfn;
2284

2285
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2286
		if (sp->role.metaphysical)
2287 2288
			continue;

2289 2290
		slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
		gfn = unalias_gfn(vcpu->kvm, sp->gfn);
2291 2292
		rmapp = &slot->rmap[gfn - slot->base_gfn];
		if (*rmapp)
2293 2294
			printk(KERN_ERR "%s: (%s) shadow page has writable"
			       " mappings: gfn %lx role %x\n",
2295
			       __func__, audit_msg, sp->gfn,
2296
			       sp->role.word);
2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
	}
}

static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
{
	int olddbg = dbg;

	dbg = 0;
	audit_msg = msg;
	audit_rmap(vcpu);
	audit_write_protection(vcpu);
	audit_mappings(vcpu);
	dbg = olddbg;
}

#endif