mmu.c 54.4 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
A
Avi Kivity 已提交
19 20

#include "vmx.h"
21
#include "mmu.h"
A
Avi Kivity 已提交
22

23
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
24 25 26 27 28
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>
29
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
30
#include <linux/hugetlb.h>
31
#include <linux/compiler.h>
A
Avi Kivity 已提交
32

A
Avi Kivity 已提交
33 34
#include <asm/page.h>
#include <asm/cmpxchg.h>
35
#include <asm/io.h>
A
Avi Kivity 已提交
36

37 38 39 40 41 42 43
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
44
bool tdp_enabled = false;
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
#undef MMU_DEBUG

#undef AUDIT

#ifdef AUDIT
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
#endif

#ifdef MMU_DEBUG

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)

#else

#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)

#endif

#if defined(MMU_DEBUG) || defined(AUDIT)
static int dbg = 1;
#endif
A
Avi Kivity 已提交
71

72 73 74
#ifndef MMU_DEBUG
#define ASSERT(x) do { } while (0)
#else
A
Avi Kivity 已提交
75 76 77 78 79
#define ASSERT(x)							\
	if (!(x)) {							\
		printk(KERN_WARNING "assertion failed %s:%d: %s\n",	\
		       __FILE__, __LINE__, #x);				\
	}
80
#endif
A
Avi Kivity 已提交
81

82 83 84 85
#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
#define PT32_PT_BITS 10
#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
A
Avi Kivity 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98

#define PT_WRITABLE_SHIFT 1

#define PT_PRESENT_MASK (1ULL << 0)
#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
#define PT_USER_MASK (1ULL << 2)
#define PT_PWT_MASK (1ULL << 3)
#define PT_PCD_MASK (1ULL << 4)
#define PT_ACCESSED_MASK (1ULL << 5)
#define PT_DIRTY_MASK (1ULL << 6)
#define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7)
#define PT_GLOBAL_MASK (1ULL << 8)
99 100
#define PT64_NX_SHIFT 63
#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
A
Avi Kivity 已提交
101 102 103 104 105 106 107

#define PT_PAT_SHIFT 7
#define PT_DIR_PAT_SHIFT 12
#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)

#define PT32_DIR_PSE36_SIZE 4
#define PT32_DIR_PSE36_SHIFT 13
M
Mike Day 已提交
108 109
#define PT32_DIR_PSE36_MASK \
	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
A
Avi Kivity 已提交
110 111 112 113 114 115 116 117 118 119


#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52

#define VALID_PAGE(x) ((x) != INVALID_PAGE)

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
M
Mike Day 已提交
120
		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
A
Avi Kivity 已提交
121 122 123 124 125 126 127 128 129 130 131

#define PT64_LEVEL_MASK(level) \
		(((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
132
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
133 134 135 136 137 138 139 140

#define PT32_LEVEL_MASK(level) \
		(((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


141
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
A
Avi Kivity 已提交
142 143 144 145 146 147 148
#define PT64_DIR_BASE_ADDR_MASK \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))

149 150
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
			| PT64_NX_MASK)
A
Avi Kivity 已提交
151 152 153 154

#define PFERR_PRESENT_MASK (1U << 0)
#define PFERR_WRITE_MASK (1U << 1)
#define PFERR_USER_MASK (1U << 2)
155
#define PFERR_FETCH_MASK (1U << 4)
A
Avi Kivity 已提交
156 157 158 159 160 161 162 163

#define PT64_ROOT_LEVEL 4
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3

#define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1

164 165
#define RMAP_EXT 4

166 167 168 169 170
#define ACC_EXEC_MASK    1
#define ACC_WRITE_MASK   PT_WRITABLE_MASK
#define ACC_USER_MASK    PT_USER_MASK
#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)

171 172 173 174 175 176 177
struct kvm_pv_mmu_op_buffer {
	void *ptr;
	unsigned len;
	unsigned processed;
	char buf[512] __aligned(sizeof(long));
};

178 179 180 181 182
struct kvm_rmap_desc {
	u64 *shadow_ptes[RMAP_EXT];
	struct kvm_rmap_desc *more;
};

183 184
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
185
static struct kmem_cache *mmu_page_header_cache;
186

187 188 189 190 191 192 193 194 195 196
static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;

void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{
	shadow_trap_nonpresent_pte = trap_pte;
	shadow_notrap_nonpresent_pte = notrap_pte;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);

A
Avi Kivity 已提交
197 198
static int is_write_protection(struct kvm_vcpu *vcpu)
{
199
	return vcpu->arch.cr0 & X86_CR0_WP;
A
Avi Kivity 已提交
200 201 202 203 204 205 206
}

static int is_cpuid_PSE36(void)
{
	return 1;
}

207 208
static int is_nx(struct kvm_vcpu *vcpu)
{
209
	return vcpu->arch.shadow_efer & EFER_NX;
210 211
}

A
Avi Kivity 已提交
212 213 214 215 216
static int is_present_pte(unsigned long pte)
{
	return pte & PT_PRESENT_MASK;
}

217 218 219 220 221 222
static int is_shadow_present_pte(u64 pte)
{
	return pte != shadow_trap_nonpresent_pte
		&& pte != shadow_notrap_nonpresent_pte;
}

M
Marcelo Tosatti 已提交
223 224 225 226 227
static int is_large_pte(u64 pte)
{
	return pte & PT_PAGE_SIZE_MASK;
}

A
Avi Kivity 已提交
228 229 230 231 232
static int is_writeble_pte(unsigned long pte)
{
	return pte & PT_WRITABLE_MASK;
}

233 234 235 236 237
static int is_dirty_pte(unsigned long pte)
{
	return pte & PT_DIRTY_MASK;
}

238 239
static int is_rmap_pte(u64 pte)
{
240
	return is_shadow_present_pte(pte);
241 242
}

243 244 245 246 247 248 249
static struct page *spte_to_page(u64 pte)
{
	hfn_t hfn = (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;

	return pfn_to_page(hfn);
}

250 251 252 253 254 255 256
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

257 258 259 260 261 262 263 264 265
static void set_shadow_pte(u64 *sptep, u64 spte)
{
#ifdef CONFIG_X86_64
	set_64bit((unsigned long *)sptep, spte);
#else
	set_64bit((unsigned long long *)sptep, spte);
#endif
}

266
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
267
				  struct kmem_cache *base_cache, int min)
268 269 270 271
{
	void *obj;

	if (cache->nobjs >= min)
272
		return 0;
273
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
274
		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
275
		if (!obj)
276
			return -ENOMEM;
277 278
		cache->objects[cache->nobjs++] = obj;
	}
279
	return 0;
280 281 282 283 284 285 286 287
}

static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
		kfree(mc->objects[--mc->nobjs]);
}

A
Avi Kivity 已提交
288
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
289
				       int min)
A
Avi Kivity 已提交
290 291 292 293 294 295
{
	struct page *page;

	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
296
		page = alloc_page(GFP_KERNEL);
A
Avi Kivity 已提交
297 298 299 300 301 302 303 304 305 306 307
		if (!page)
			return -ENOMEM;
		set_page_private(page, 0);
		cache->objects[cache->nobjs++] = page_address(page);
	}
	return 0;
}

static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
308
		free_page((unsigned long)mc->objects[--mc->nobjs]);
A
Avi Kivity 已提交
309 310
}

311
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
312
{
313 314
	int r;

315
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
316
				   pte_chain_cache, 4);
317 318
	if (r)
		goto out;
319
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
320
				   rmap_desc_cache, 1);
321 322
	if (r)
		goto out;
323
	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
324 325
	if (r)
		goto out;
326
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
327
				   mmu_page_header_cache, 4);
328 329
out:
	return r;
330 331 332 333
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
334 335 336 337
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
				    size_t size)
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	memset(p, 0, size);
	return p;
}

static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
{
353
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
354 355 356
				      sizeof(struct kvm_pte_chain));
}

357
static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
358
{
359
	kfree(pc);
360 361 362 363
}

static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
{
364
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
365 366 367
				      sizeof(struct kvm_rmap_desc));
}

368
static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
369
{
370
	kfree(rd);
371 372
}

M
Marcelo Tosatti 已提交
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
/*
 * Return the pointer to the largepage write count for a given
 * gfn, handling slots that are not large page aligned.
 */
static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
{
	unsigned long idx;

	idx = (gfn / KVM_PAGES_PER_HPAGE) -
	      (slot->base_gfn / KVM_PAGES_PER_HPAGE);
	return &slot->lpage_info[idx].write_count;
}

static void account_shadowed(struct kvm *kvm, gfn_t gfn)
{
	int *write_count;

	write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
	*write_count += 1;
	WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
}

static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
{
	int *write_count;

	write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
	*write_count -= 1;
	WARN_ON(*write_count < 0);
}

static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
{
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
	int *largepage_idx;

	if (slot) {
		largepage_idx = slot_largepage_idx(gfn, slot);
		return *largepage_idx;
	}

	return 1;
}

static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
{
	struct vm_area_struct *vma;
	unsigned long addr;

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return 0;

	vma = find_vma(current->mm, addr);
	if (vma && is_vm_hugetlb_page(vma))
		return 1;

	return 0;
}

static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
{
	struct kvm_memory_slot *slot;

	if (has_wrprotected_page(vcpu->kvm, large_gfn))
		return 0;

	if (!host_largepage_backed(vcpu->kvm, large_gfn))
		return 0;

	slot = gfn_to_memslot(vcpu->kvm, large_gfn);
	if (slot && slot->dirty_bitmap)
		return 0;

	return 1;
}

450 451 452 453 454
/*
 * Take gfn and return the reverse mapping to it.
 * Note: gfn must be unaliased before this function get called
 */

M
Marcelo Tosatti 已提交
455
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
456 457
{
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
458
	unsigned long idx;
459 460

	slot = gfn_to_memslot(kvm, gfn);
M
Marcelo Tosatti 已提交
461 462 463 464 465 466 467
	if (!lpage)
		return &slot->rmap[gfn - slot->base_gfn];

	idx = (gfn / KVM_PAGES_PER_HPAGE) -
	      (slot->base_gfn / KVM_PAGES_PER_HPAGE);

	return &slot->lpage_info[idx].rmap_pde;
468 469
}

470 471 472
/*
 * Reverse mapping data structures:
 *
473 474
 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
 * that points to page_address(page).
475
 *
476 477
 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
 * containing more mappings.
478
 */
M
Marcelo Tosatti 已提交
479
static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
480
{
481
	struct kvm_mmu_page *sp;
482
	struct kvm_rmap_desc *desc;
483
	unsigned long *rmapp;
484 485 486 487
	int i;

	if (!is_rmap_pte(*spte))
		return;
488
	gfn = unalias_gfn(vcpu->kvm, gfn);
489 490
	sp = page_header(__pa(spte));
	sp->gfns[spte - sp->spt] = gfn;
M
Marcelo Tosatti 已提交
491
	rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
492
	if (!*rmapp) {
493
		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
494 495
		*rmapp = (unsigned long)spte;
	} else if (!(*rmapp & 1)) {
496
		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
497
		desc = mmu_alloc_rmap_desc(vcpu);
498
		desc->shadow_ptes[0] = (u64 *)*rmapp;
499
		desc->shadow_ptes[1] = spte;
500
		*rmapp = (unsigned long)desc | 1;
501 502
	} else {
		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
503
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
504 505 506
		while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
			desc = desc->more;
		if (desc->shadow_ptes[RMAP_EXT-1]) {
507
			desc->more = mmu_alloc_rmap_desc(vcpu);
508 509 510 511 512 513 514 515
			desc = desc->more;
		}
		for (i = 0; desc->shadow_ptes[i]; ++i)
			;
		desc->shadow_ptes[i] = spte;
	}
}

516
static void rmap_desc_remove_entry(unsigned long *rmapp,
517 518 519 520 521 522 523 524 525
				   struct kvm_rmap_desc *desc,
				   int i,
				   struct kvm_rmap_desc *prev_desc)
{
	int j;

	for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
		;
	desc->shadow_ptes[i] = desc->shadow_ptes[j];
A
Al Viro 已提交
526
	desc->shadow_ptes[j] = NULL;
527 528 529
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
530
		*rmapp = (unsigned long)desc->shadow_ptes[0];
531 532 533 534
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
535
			*rmapp = (unsigned long)desc->more | 1;
536
	mmu_free_rmap_desc(desc);
537 538
}

539
static void rmap_remove(struct kvm *kvm, u64 *spte)
540 541 542
{
	struct kvm_rmap_desc *desc;
	struct kvm_rmap_desc *prev_desc;
543
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
544
	struct page *page;
545
	unsigned long *rmapp;
546 547 548 549
	int i;

	if (!is_rmap_pte(*spte))
		return;
550
	sp = page_header(__pa(spte));
551
	page = spte_to_page(*spte);
552
	mark_page_accessed(page);
553
	if (is_writeble_pte(*spte))
A
Avi Kivity 已提交
554
		kvm_release_page_dirty(page);
555
	else
A
Avi Kivity 已提交
556
		kvm_release_page_clean(page);
M
Marcelo Tosatti 已提交
557
	rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
558
	if (!*rmapp) {
559 560
		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
		BUG();
561
	} else if (!(*rmapp & 1)) {
562
		rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
563
		if ((u64 *)*rmapp != spte) {
564 565 566 567
			printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
			       spte, *spte);
			BUG();
		}
568
		*rmapp = 0;
569 570
	} else {
		rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
571
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
572 573 574 575
		prev_desc = NULL;
		while (desc) {
			for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
				if (desc->shadow_ptes[i] == spte) {
576
					rmap_desc_remove_entry(rmapp,
577
							       desc, i,
578 579 580 581 582 583 584 585 586 587
							       prev_desc);
					return;
				}
			prev_desc = desc;
			desc = desc->more;
		}
		BUG();
	}
}

588
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
589 590
{
	struct kvm_rmap_desc *desc;
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
	struct kvm_rmap_desc *prev_desc;
	u64 *prev_spte;
	int i;

	if (!*rmapp)
		return NULL;
	else if (!(*rmapp & 1)) {
		if (!spte)
			return (u64 *)*rmapp;
		return NULL;
	}
	desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
	prev_desc = NULL;
	prev_spte = NULL;
	while (desc) {
		for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
			if (prev_spte == spte)
				return desc->shadow_ptes[i];
			prev_spte = desc->shadow_ptes[i];
		}
		desc = desc->more;
	}
	return NULL;
}

static void rmap_write_protect(struct kvm *kvm, u64 gfn)
{
618
	unsigned long *rmapp;
619
	u64 *spte;
620
	int write_protected = 0;
621

622
	gfn = unalias_gfn(kvm, gfn);
M
Marcelo Tosatti 已提交
623
	rmapp = gfn_to_rmap(kvm, gfn, 0);
624

625 626
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
627 628 629
		BUG_ON(!spte);
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
630
		if (is_writeble_pte(*spte)) {
631
			set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
632 633
			write_protected = 1;
		}
634
		spte = rmap_next(kvm, rmapp, spte);
635
	}
636 637 638 639
	if (write_protected) {
		struct page *page;

		spte = rmap_next(kvm, rmapp, NULL);
640
		page = spte_to_page(*spte);
641 642 643
		SetPageDirty(page);
	}

M
Marcelo Tosatti 已提交
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
	/* check for huge page mappings */
	rmapp = gfn_to_rmap(kvm, gfn, 1);
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
		BUG_ON(!spte);
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
		pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
		if (is_writeble_pte(*spte)) {
			rmap_remove(kvm, spte);
			--kvm->stat.lpages;
			set_shadow_pte(spte, shadow_trap_nonpresent_pte);
			write_protected = 1;
		}
		spte = rmap_next(kvm, rmapp, spte);
	}

661 662
	if (write_protected)
		kvm_flush_remote_tlbs(kvm);
M
Marcelo Tosatti 已提交
663 664

	account_shadowed(kvm, gfn);
665 666
}

667
#ifdef MMU_DEBUG
668
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
669
{
670 671 672
	u64 *pos;
	u64 *end;

673
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
674
		if (*pos != shadow_trap_nonpresent_pte) {
675
			printk(KERN_ERR "%s: %p %llx\n", __func__,
676
			       pos, *pos);
A
Avi Kivity 已提交
677
			return 0;
678
		}
A
Avi Kivity 已提交
679 680
	return 1;
}
681
#endif
A
Avi Kivity 已提交
682

683
static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
684
{
685 686 687 688 689
	ASSERT(is_empty_shadow_page(sp->spt));
	list_del(&sp->link);
	__free_page(virt_to_page(sp->spt));
	__free_page(virt_to_page(sp->gfns));
	kfree(sp);
690
	++kvm->arch.n_free_mmu_pages;
691 692
}

693 694
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
695
	return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
696 697
}

698 699
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
					       u64 *parent_pte)
A
Avi Kivity 已提交
700
{
701
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
702

703 704 705
	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
	sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
706
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
707
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
708 709 710 711
	ASSERT(is_empty_shadow_page(sp->spt));
	sp->slot_bitmap = 0;
	sp->multimapped = 0;
	sp->parent_pte = parent_pte;
712
	--vcpu->kvm->arch.n_free_mmu_pages;
713
	return sp;
A
Avi Kivity 已提交
714 715
}

716
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
717
				    struct kvm_mmu_page *sp, u64 *parent_pte)
718 719 720 721 722 723 724
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

	if (!parent_pte)
		return;
725 726
	if (!sp->multimapped) {
		u64 *old = sp->parent_pte;
727 728

		if (!old) {
729
			sp->parent_pte = parent_pte;
730 731
			return;
		}
732
		sp->multimapped = 1;
733
		pte_chain = mmu_alloc_pte_chain(vcpu);
734 735
		INIT_HLIST_HEAD(&sp->parent_ptes);
		hlist_add_head(&pte_chain->link, &sp->parent_ptes);
736 737
		pte_chain->parent_ptes[0] = old;
	}
738
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
739 740 741 742 743 744 745 746
		if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
			continue;
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
			if (!pte_chain->parent_ptes[i]) {
				pte_chain->parent_ptes[i] = parent_pte;
				return;
			}
	}
747
	pte_chain = mmu_alloc_pte_chain(vcpu);
748
	BUG_ON(!pte_chain);
749
	hlist_add_head(&pte_chain->link, &sp->parent_ptes);
750 751 752
	pte_chain->parent_ptes[0] = parent_pte;
}

753
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
754 755 756 757 758 759
				       u64 *parent_pte)
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

760 761 762
	if (!sp->multimapped) {
		BUG_ON(sp->parent_pte != parent_pte);
		sp->parent_pte = NULL;
763 764
		return;
	}
765
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
766 767 768 769 770
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
			if (!pte_chain->parent_ptes[i])
				break;
			if (pte_chain->parent_ptes[i] != parent_pte)
				continue;
771 772
			while (i + 1 < NR_PTE_CHAIN_ENTRIES
				&& pte_chain->parent_ptes[i + 1]) {
773 774 775 776 777
				pte_chain->parent_ptes[i]
					= pte_chain->parent_ptes[i + 1];
				++i;
			}
			pte_chain->parent_ptes[i] = NULL;
778 779
			if (i == 0) {
				hlist_del(&pte_chain->link);
780
				mmu_free_pte_chain(pte_chain);
781 782 783
				if (hlist_empty(&sp->parent_ptes)) {
					sp->multimapped = 0;
					sp->parent_pte = NULL;
784 785
				}
			}
786 787 788 789 790
			return;
		}
	BUG();
}

791
static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
792 793 794
{
	unsigned index;
	struct hlist_head *bucket;
795
	struct kvm_mmu_page *sp;
796 797
	struct hlist_node *node;

798
	pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
799
	index = kvm_page_table_hashfn(gfn);
800
	bucket = &kvm->arch.mmu_page_hash[index];
801
	hlist_for_each_entry(sp, node, bucket, hash_link)
802 803
		if (sp->gfn == gfn && !sp->role.metaphysical
		    && !sp->role.invalid) {
804
			pgprintk("%s: found role %x\n",
805
				 __func__, sp->role.word);
806
			return sp;
807 808 809 810 811 812 813 814 815
		}
	return NULL;
}

static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
					     int metaphysical,
816
					     unsigned access,
817
					     u64 *parent_pte)
818 819 820 821 822
{
	union kvm_mmu_page_role role;
	unsigned index;
	unsigned quadrant;
	struct hlist_head *bucket;
823
	struct kvm_mmu_page *sp;
824 825 826
	struct hlist_node *node;

	role.word = 0;
827
	role.glevels = vcpu->arch.mmu.root_level;
828 829
	role.level = level;
	role.metaphysical = metaphysical;
830
	role.access = access;
831
	if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
832 833 834 835
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
836
	pgprintk("%s: looking gfn %lx role %x\n", __func__,
837
		 gfn, role.word);
838
	index = kvm_page_table_hashfn(gfn);
839
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
840 841 842
	hlist_for_each_entry(sp, node, bucket, hash_link)
		if (sp->gfn == gfn && sp->role.word == role.word) {
			mmu_page_add_parent_pte(vcpu, sp, parent_pte);
843
			pgprintk("%s: found\n", __func__);
844
			return sp;
845
		}
A
Avi Kivity 已提交
846
	++vcpu->kvm->stat.mmu_cache_miss;
847 848 849
	sp = kvm_mmu_alloc_page(vcpu, parent_pte);
	if (!sp)
		return sp;
850
	pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
851 852 853
	sp->gfn = gfn;
	sp->role = role;
	hlist_add_head(&sp->hash_link, bucket);
854
	vcpu->arch.mmu.prefetch_page(vcpu, sp);
855
	if (!metaphysical)
856
		rmap_write_protect(vcpu->kvm, gfn);
857
	return sp;
858 859
}

860
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
861
					 struct kvm_mmu_page *sp)
862
{
863 864 865 866
	unsigned i;
	u64 *pt;
	u64 ent;

867
	pt = sp->spt;
868

869
	if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
870
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
871
			if (is_shadow_present_pte(pt[i]))
872
				rmap_remove(kvm, &pt[i]);
873
			pt[i] = shadow_trap_nonpresent_pte;
874
		}
875
		kvm_flush_remote_tlbs(kvm);
876 877 878 879 880 881
		return;
	}

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		ent = pt[i];

M
Marcelo Tosatti 已提交
882 883 884 885 886 887 888 889 890 891
		if (is_shadow_present_pte(ent)) {
			if (!is_large_pte(ent)) {
				ent &= PT64_BASE_ADDR_MASK;
				mmu_page_remove_parent_pte(page_header(ent),
							   &pt[i]);
			} else {
				--kvm->stat.lpages;
				rmap_remove(kvm, &pt[i]);
			}
		}
892
		pt[i] = shadow_trap_nonpresent_pte;
893
	}
894
	kvm_flush_remote_tlbs(kvm);
895 896
}

897
static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
898
{
899
	mmu_page_remove_parent_pte(sp, parent_pte);
900 901
}

902 903 904 905 906 907
static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
{
	int i;

	for (i = 0; i < KVM_MAX_VCPUS; ++i)
		if (kvm->vcpus[i])
908
			kvm->vcpus[i]->arch.last_pte_updated = NULL;
909 910
}

911
static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
912 913 914
{
	u64 *parent_pte;

A
Avi Kivity 已提交
915
	++kvm->stat.mmu_shadow_zapped;
916 917 918
	while (sp->multimapped || sp->parent_pte) {
		if (!sp->multimapped)
			parent_pte = sp->parent_pte;
919 920 921
		else {
			struct kvm_pte_chain *chain;

922
			chain = container_of(sp->parent_ptes.first,
923 924 925
					     struct kvm_pte_chain, link);
			parent_pte = chain->parent_ptes[0];
		}
926
		BUG_ON(!parent_pte);
927
		kvm_mmu_put_page(sp, parent_pte);
928
		set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
929
	}
930 931
	kvm_mmu_page_unlink_children(kvm, sp);
	if (!sp->root_count) {
M
Marcelo Tosatti 已提交
932 933
		if (!sp->role.metaphysical)
			unaccount_shadowed(kvm, sp->gfn);
934 935
		hlist_del(&sp->hash_link);
		kvm_mmu_free_page(kvm, sp);
936
	} else {
937
		list_move(&sp->link, &kvm->arch.active_mmu_pages);
938 939 940
		sp->role.invalid = 1;
		kvm_reload_remote_mmus(kvm);
	}
941
	kvm_mmu_reset_last_pte_updated(kvm);
942 943
}

944 945 946 947 948 949 950 951 952 953 954 955
/*
 * Changing the number of mmu pages allocated to the vm
 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
 */
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
{
	/*
	 * If we set the number of mmu pages to be smaller be than the
	 * number of actived pages , we must to free some mmu pages before we
	 * change the value
	 */

956
	if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
957
	    kvm_nr_mmu_pages) {
958 959
		int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
				       - kvm->arch.n_free_mmu_pages;
960 961 962 963

		while (n_used_mmu_pages > kvm_nr_mmu_pages) {
			struct kvm_mmu_page *page;

964
			page = container_of(kvm->arch.active_mmu_pages.prev,
965 966 967 968
					    struct kvm_mmu_page, link);
			kvm_mmu_zap_page(kvm, page);
			n_used_mmu_pages--;
		}
969
		kvm->arch.n_free_mmu_pages = 0;
970 971
	}
	else
972 973
		kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
					 - kvm->arch.n_alloc_mmu_pages;
974

975
	kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
976 977
}

978
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
979 980 981
{
	unsigned index;
	struct hlist_head *bucket;
982
	struct kvm_mmu_page *sp;
983 984 985
	struct hlist_node *node, *n;
	int r;

986
	pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
987
	r = 0;
988
	index = kvm_page_table_hashfn(gfn);
989
	bucket = &kvm->arch.mmu_page_hash[index];
990 991
	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
		if (sp->gfn == gfn && !sp->role.metaphysical) {
992
			pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
993 994
				 sp->role.word);
			kvm_mmu_zap_page(kvm, sp);
995 996 997
			r = 1;
		}
	return r;
998 999
}

1000
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1001
{
1002
	struct kvm_mmu_page *sp;
1003

1004
	while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
1005
		pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
1006
		kvm_mmu_zap_page(kvm, sp);
1007 1008 1009
	}
}

1010
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
A
Avi Kivity 已提交
1011
{
1012
	int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1013
	struct kvm_mmu_page *sp = page_header(__pa(pte));
A
Avi Kivity 已提交
1014

1015
	__set_bit(slot, &sp->slot_bitmap);
A
Avi Kivity 已提交
1016 1017
}

1018 1019
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
{
1020 1021
	struct page *page;

1022
	gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1023 1024 1025

	if (gpa == UNMAPPED_GVA)
		return NULL;
1026 1027 1028 1029 1030 1031

	down_read(&current->mm->mmap_sem);
	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
	up_read(&current->mm->mmap_sem);

	return page;
1032 1033
}

1034 1035 1036
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
			 unsigned pt_access, unsigned pte_access,
			 int user_fault, int write_fault, int dirty,
M
Marcelo Tosatti 已提交
1037
			 int *ptwrite, int largepage, gfn_t gfn,
1038
			 struct page *page, bool speculative)
1039 1040
{
	u64 spte;
1041
	int was_rmapped = 0;
1042
	int was_writeble = is_writeble_pte(*shadow_pte);
1043

1044
	pgprintk("%s: spte %llx access %x write_fault %d"
1045
		 " user_fault %d gfn %lx\n",
1046
		 __func__, *shadow_pte, pt_access,
1047 1048
		 write_fault, user_fault, gfn);

1049
	if (is_rmap_pte(*shadow_pte)) {
M
Marcelo Tosatti 已提交
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
		if (largepage && !is_large_pte(*shadow_pte)) {
			struct kvm_mmu_page *child;
			u64 pte = *shadow_pte;

			child = page_header(pte & PT64_BASE_ADDR_MASK);
			mmu_page_remove_parent_pte(child, shadow_pte);
1060
		} else if (page != spte_to_page(*shadow_pte)) {
1061
			pgprintk("hfn old %lx new %lx\n",
1062 1063
				 page_to_pfn(spte_to_page(*shadow_pte)),
				 page_to_pfn(page));
1064
			rmap_remove(vcpu->kvm, shadow_pte);
M
Marcelo Tosatti 已提交
1065 1066 1067 1068 1069
		} else {
			if (largepage)
				was_rmapped = is_large_pte(*shadow_pte);
			else
				was_rmapped = 1;
1070 1071 1072
		}
	}

1073 1074 1075 1076 1077 1078
	/*
	 * We don't set the accessed bit, since we sometimes want to see
	 * whether the guest actually used the pte (in order to detect
	 * demand paging).
	 */
	spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
1079 1080
	if (!speculative)
		pte_access |= PT_ACCESSED_MASK;
1081 1082 1083 1084 1085 1086 1087 1088
	if (!dirty)
		pte_access &= ~ACC_WRITE_MASK;
	if (!(pte_access & ACC_EXEC_MASK))
		spte |= PT64_NX_MASK;

	spte |= PT_PRESENT_MASK;
	if (pte_access & ACC_USER_MASK)
		spte |= PT_USER_MASK;
M
Marcelo Tosatti 已提交
1089 1090
	if (largepage)
		spte |= PT_PAGE_SIZE_MASK;
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104

	spte |= page_to_phys(page);

	if ((pte_access & ACC_WRITE_MASK)
	    || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
		struct kvm_mmu_page *shadow;

		spte |= PT_WRITABLE_MASK;
		if (user_fault) {
			mmu_unshadow(vcpu->kvm, gfn);
			goto unshadowed;
		}

		shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
M
Marcelo Tosatti 已提交
1105 1106
		if (shadow ||
		   (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1107
			pgprintk("%s: found shadow page for %lx, marking ro\n",
1108
				 __func__, gfn);
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
			pte_access &= ~ACC_WRITE_MASK;
			if (is_writeble_pte(spte)) {
				spte &= ~PT_WRITABLE_MASK;
				kvm_x86_ops->tlb_flush(vcpu);
			}
			if (write_fault)
				*ptwrite = 1;
		}
	}

unshadowed:

	if (pte_access & ACC_WRITE_MASK)
		mark_page_dirty(vcpu->kvm, gfn);

1124
	pgprintk("%s: setting spte %llx\n", __func__, spte);
M
Marcelo Tosatti 已提交
1125 1126 1127
	pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
		 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
		 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1128
	set_shadow_pte(shadow_pte, spte);
M
Marcelo Tosatti 已提交
1129 1130 1131 1132
	if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
	    && (spte & PT_PRESENT_MASK))
		++vcpu->kvm->stat.lpages;

1133 1134
	page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
	if (!was_rmapped) {
M
Marcelo Tosatti 已提交
1135
		rmap_add(vcpu, shadow_pte, gfn, largepage);
1136 1137
		if (!is_rmap_pte(*shadow_pte))
			kvm_release_page_clean(page);
1138 1139 1140 1141 1142
	} else {
		if (was_writeble)
			kvm_release_page_dirty(page);
		else
			kvm_release_page_clean(page);
1143 1144
	}
	if (!ptwrite || !*ptwrite)
1145
		vcpu->arch.last_pte_updated = shadow_pte;
1146 1147
}

A
Avi Kivity 已提交
1148 1149 1150 1151
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}

1152
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
M
Marcelo Tosatti 已提交
1153 1154
			   int largepage, gfn_t gfn, struct page *page,
			   int level)
A
Avi Kivity 已提交
1155
{
1156
	hpa_t table_addr = vcpu->arch.mmu.root_hpa;
1157
	int pt_write = 0;
A
Avi Kivity 已提交
1158 1159 1160 1161 1162 1163 1164 1165 1166

	for (; ; level--) {
		u32 index = PT64_INDEX(v, level);
		u64 *table;

		ASSERT(VALID_PAGE(table_addr));
		table = __va(table_addr);

		if (level == 1) {
1167
			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1168
				     0, write, 1, &pt_write, 0, gfn, page, false);
M
Marcelo Tosatti 已提交
1169 1170 1171 1172 1173
			return pt_write;
		}

		if (largepage && level == 2) {
			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1174
				     0, write, 1, &pt_write, 1, gfn, page, false);
1175
			return pt_write;
A
Avi Kivity 已提交
1176 1177
		}

1178
		if (table[index] == shadow_trap_nonpresent_pte) {
1179
			struct kvm_mmu_page *new_table;
1180
			gfn_t pseudo_gfn;
A
Avi Kivity 已提交
1181

1182 1183 1184 1185
			pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
			new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
						     v, level - 1,
1186
						     1, ACC_ALL, &table[index]);
1187
			if (!new_table) {
A
Avi Kivity 已提交
1188
				pgprintk("nonpaging_map: ENOMEM\n");
1189
				kvm_release_page_clean(page);
A
Avi Kivity 已提交
1190 1191 1192
				return -ENOMEM;
			}

1193
			table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
1194
				| PT_WRITABLE_MASK | PT_USER_MASK;
A
Avi Kivity 已提交
1195 1196 1197 1198 1199
		}
		table_addr = table[index] & PT64_BASE_ADDR_MASK;
	}
}

1200 1201 1202
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
{
	int r;
M
Marcelo Tosatti 已提交
1203
	int largepage = 0;
1204

1205 1206
	struct page *page;

1207 1208
	down_read(&vcpu->kvm->slots_lock);

1209
	down_read(&current->mm->mmap_sem);
M
Marcelo Tosatti 已提交
1210 1211 1212 1213 1214
	if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		largepage = 1;
	}

1215
	page = gfn_to_page(vcpu->kvm, gfn);
1216
	up_read(&current->mm->mmap_sem);
1217

1218 1219 1220 1221 1222 1223 1224
	/* mmio */
	if (is_error_page(page)) {
		kvm_release_page_clean(page);
		up_read(&vcpu->kvm->slots_lock);
		return 1;
	}

1225
	spin_lock(&vcpu->kvm->mmu_lock);
1226
	kvm_mmu_free_some_pages(vcpu);
M
Marcelo Tosatti 已提交
1227 1228
	r = __direct_map(vcpu, v, write, largepage, gfn, page,
			 PT32E_ROOT_LEVEL);
1229 1230
	spin_unlock(&vcpu->kvm->mmu_lock);

1231
	up_read(&vcpu->kvm->slots_lock);
1232

1233 1234 1235 1236
	return r;
}


1237 1238 1239 1240 1241 1242 1243 1244 1245
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp)
{
	int i;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		sp->spt[i] = shadow_trap_nonpresent_pte;
}

1246 1247 1248
static void mmu_free_roots(struct kvm_vcpu *vcpu)
{
	int i;
1249
	struct kvm_mmu_page *sp;
1250

1251
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
A
Avi Kivity 已提交
1252
		return;
1253
	spin_lock(&vcpu->kvm->mmu_lock);
1254
#ifdef CONFIG_X86_64
1255 1256
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
1257

1258 1259
		sp = page_header(root);
		--sp->root_count;
1260 1261
		if (!sp->root_count && sp->role.invalid)
			kvm_mmu_zap_page(vcpu->kvm, sp);
1262
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1263
		spin_unlock(&vcpu->kvm->mmu_lock);
1264 1265 1266 1267
		return;
	}
#endif
	for (i = 0; i < 4; ++i) {
1268
		hpa_t root = vcpu->arch.mmu.pae_root[i];
1269

A
Avi Kivity 已提交
1270 1271
		if (root) {
			root &= PT64_BASE_ADDR_MASK;
1272 1273
			sp = page_header(root);
			--sp->root_count;
1274 1275
			if (!sp->root_count && sp->role.invalid)
				kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
1276
		}
1277
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1278
	}
1279
	spin_unlock(&vcpu->kvm->mmu_lock);
1280
	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1281 1282 1283 1284 1285
}

static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
	int i;
1286
	gfn_t root_gfn;
1287
	struct kvm_mmu_page *sp;
1288
	int metaphysical = 0;
1289

1290
	root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1291 1292

#ifdef CONFIG_X86_64
1293 1294
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
1295 1296

		ASSERT(!VALID_PAGE(root));
1297 1298
		if (tdp_enabled)
			metaphysical = 1;
1299
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1300 1301
				      PT64_ROOT_LEVEL, metaphysical,
				      ACC_ALL, NULL);
1302 1303
		root = __pa(sp->spt);
		++sp->root_count;
1304
		vcpu->arch.mmu.root_hpa = root;
1305 1306 1307
		return;
	}
#endif
1308 1309 1310
	metaphysical = !is_paging(vcpu);
	if (tdp_enabled)
		metaphysical = 1;
1311
	for (i = 0; i < 4; ++i) {
1312
		hpa_t root = vcpu->arch.mmu.pae_root[i];
1313 1314

		ASSERT(!VALID_PAGE(root));
1315 1316 1317
		if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
			if (!is_present_pte(vcpu->arch.pdptrs[i])) {
				vcpu->arch.mmu.pae_root[i] = 0;
A
Avi Kivity 已提交
1318 1319
				continue;
			}
1320 1321
			root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
		} else if (vcpu->arch.mmu.root_level == 0)
1322
			root_gfn = 0;
1323
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1324
				      PT32_ROOT_LEVEL, metaphysical,
1325
				      ACC_ALL, NULL);
1326 1327
		root = __pa(sp->spt);
		++sp->root_count;
1328
		vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1329
	}
1330
	vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1331 1332
}

A
Avi Kivity 已提交
1333 1334 1335 1336 1337 1338
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
{
	return vaddr;
}

static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
A
Avi Kivity 已提交
1339
				u32 error_code)
A
Avi Kivity 已提交
1340
{
1341
	gfn_t gfn;
1342
	int r;
A
Avi Kivity 已提交
1343

1344
	pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
1345 1346 1347
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
1348

A
Avi Kivity 已提交
1349
	ASSERT(vcpu);
1350
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
1351

1352
	gfn = gva >> PAGE_SHIFT;
A
Avi Kivity 已提交
1353

1354 1355
	return nonpaging_map(vcpu, gva & PAGE_MASK,
			     error_code & PFERR_WRITE_MASK, gfn);
A
Avi Kivity 已提交
1356 1357
}

1358 1359 1360 1361 1362
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
				u32 error_code)
{
	struct page *page;
	int r;
M
Marcelo Tosatti 已提交
1363 1364
	int largepage = 0;
	gfn_t gfn = gpa >> PAGE_SHIFT;
1365 1366 1367 1368 1369 1370 1371 1372 1373

	ASSERT(vcpu);
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

	down_read(&current->mm->mmap_sem);
M
Marcelo Tosatti 已提交
1374 1375 1376 1377 1378
	if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		largepage = 1;
	}
	page = gfn_to_page(vcpu->kvm, gfn);
1379 1380 1381 1382 1383 1384 1385 1386
	if (is_error_page(page)) {
		kvm_release_page_clean(page);
		up_read(&current->mm->mmap_sem);
		return 1;
	}
	spin_lock(&vcpu->kvm->mmu_lock);
	kvm_mmu_free_some_pages(vcpu);
	r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
M
Marcelo Tosatti 已提交
1387
			 largepage, gfn, page, TDP_ROOT_LEVEL);
1388 1389 1390 1391 1392 1393
	spin_unlock(&vcpu->kvm->mmu_lock);
	up_read(&current->mm->mmap_sem);

	return r;
}

A
Avi Kivity 已提交
1394 1395
static void nonpaging_free(struct kvm_vcpu *vcpu)
{
1396
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
1397 1398 1399 1400
}

static int nonpaging_init_context(struct kvm_vcpu *vcpu)
{
1401
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
1402 1403 1404 1405 1406

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
	context->free = nonpaging_free;
1407
	context->prefetch_page = nonpaging_prefetch_page;
1408
	context->root_level = 0;
A
Avi Kivity 已提交
1409
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
1410
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1411 1412 1413
	return 0;
}

1414
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1415
{
A
Avi Kivity 已提交
1416
	++vcpu->stat.tlb_flush;
1417
	kvm_x86_ops->tlb_flush(vcpu);
A
Avi Kivity 已提交
1418 1419 1420 1421
}

static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
1422
	pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
1423
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
1424 1425 1426 1427 1428 1429
}

static void inject_page_fault(struct kvm_vcpu *vcpu,
			      u64 addr,
			      u32 err_code)
{
1430
	kvm_inject_page_fault(vcpu, addr, err_code);
A
Avi Kivity 已提交
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
}

static void paging_free(struct kvm_vcpu *vcpu)
{
	nonpaging_free(vcpu);
}

#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

1446
static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
A
Avi Kivity 已提交
1447
{
1448
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
1449 1450 1451 1452 1453

	ASSERT(is_pae(vcpu));
	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
1454
	context->prefetch_page = paging64_prefetch_page;
A
Avi Kivity 已提交
1455
	context->free = paging_free;
1456 1457
	context->root_level = level;
	context->shadow_root_level = level;
A
Avi Kivity 已提交
1458
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1459 1460 1461
	return 0;
}

1462 1463 1464 1465 1466
static int paging64_init_context(struct kvm_vcpu *vcpu)
{
	return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
}

A
Avi Kivity 已提交
1467 1468
static int paging32_init_context(struct kvm_vcpu *vcpu)
{
1469
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
1470 1471 1472 1473 1474

	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
	context->free = paging_free;
1475
	context->prefetch_page = paging32_prefetch_page;
A
Avi Kivity 已提交
1476 1477
	context->root_level = PT32_ROOT_LEVEL;
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
1478
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1479 1480 1481 1482 1483
	return 0;
}

static int paging32E_init_context(struct kvm_vcpu *vcpu)
{
1484
	return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
1485 1486
}

1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->arch.mmu;

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = tdp_page_fault;
	context->free = nonpaging_free;
	context->prefetch_page = nonpaging_prefetch_page;
	context->shadow_root_level = TDP_ROOT_LEVEL;
	context->root_hpa = INVALID_PAGE;

	if (!is_paging(vcpu)) {
		context->gva_to_gpa = nonpaging_gva_to_gpa;
		context->root_level = 0;
	} else if (is_long_mode(vcpu)) {
		context->gva_to_gpa = paging64_gva_to_gpa;
		context->root_level = PT64_ROOT_LEVEL;
	} else if (is_pae(vcpu)) {
		context->gva_to_gpa = paging64_gva_to_gpa;
		context->root_level = PT32E_ROOT_LEVEL;
	} else {
		context->gva_to_gpa = paging32_gva_to_gpa;
		context->root_level = PT32_ROOT_LEVEL;
	}

	return 0;
}

static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1516 1517
{
	ASSERT(vcpu);
1518
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
1519 1520 1521

	if (!is_paging(vcpu))
		return nonpaging_init_context(vcpu);
A
Avi Kivity 已提交
1522
	else if (is_long_mode(vcpu))
A
Avi Kivity 已提交
1523 1524 1525 1526 1527 1528 1529
		return paging64_init_context(vcpu);
	else if (is_pae(vcpu))
		return paging32E_init_context(vcpu);
	else
		return paging32_init_context(vcpu);
}

1530 1531 1532 1533 1534 1535 1536 1537
static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{
	if (tdp_enabled)
		return init_kvm_tdp_mmu(vcpu);
	else
		return init_kvm_softmmu(vcpu);
}

A
Avi Kivity 已提交
1538 1539 1540
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
1541 1542 1543
	if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
		vcpu->arch.mmu.free(vcpu);
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1544 1545 1546 1547
	}
}

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1548 1549 1550 1551
{
	destroy_kvm_mmu(vcpu);
	return init_kvm_mmu(vcpu);
}
1552
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
1553 1554

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1555
{
1556 1557
	int r;

1558
	r = mmu_topup_memory_caches(vcpu);
A
Avi Kivity 已提交
1559 1560
	if (r)
		goto out;
1561
	spin_lock(&vcpu->kvm->mmu_lock);
1562
	kvm_mmu_free_some_pages(vcpu);
A
Avi Kivity 已提交
1563
	mmu_alloc_roots(vcpu);
1564
	spin_unlock(&vcpu->kvm->mmu_lock);
1565
	kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
A
Avi Kivity 已提交
1566
	kvm_mmu_flush_tlb(vcpu);
1567 1568
out:
	return r;
A
Avi Kivity 已提交
1569
}
A
Avi Kivity 已提交
1570 1571 1572 1573 1574 1575
EXPORT_SYMBOL_GPL(kvm_mmu_load);

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
	mmu_free_roots(vcpu);
}
A
Avi Kivity 已提交
1576

1577
static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1578
				  struct kvm_mmu_page *sp,
1579 1580 1581 1582 1583 1584
				  u64 *spte)
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
1585
	if (is_shadow_present_pte(pte)) {
M
Marcelo Tosatti 已提交
1586 1587
		if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
		    is_large_pte(pte))
1588
			rmap_remove(vcpu->kvm, spte);
1589 1590
		else {
			child = page_header(pte & PT64_BASE_ADDR_MASK);
1591
			mmu_page_remove_parent_pte(child, spte);
1592 1593
		}
	}
1594
	set_shadow_pte(spte, shadow_trap_nonpresent_pte);
M
Marcelo Tosatti 已提交
1595 1596
	if (is_large_pte(pte))
		--vcpu->kvm->stat.lpages;
1597 1598
}

1599
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1600
				  struct kvm_mmu_page *sp,
1601
				  u64 *spte,
1602
				  const void *new)
1603
{
M
Marcelo Tosatti 已提交
1604 1605
	if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
	    && !vcpu->arch.update_pte.largepage) {
A
Avi Kivity 已提交
1606
		++vcpu->kvm->stat.mmu_pde_zapped;
1607
		return;
A
Avi Kivity 已提交
1608
	}
1609

A
Avi Kivity 已提交
1610
	++vcpu->kvm->stat.mmu_pte_updated;
1611
	if (sp->role.glevels == PT32_ROOT_LEVEL)
1612
		paging32_update_pte(vcpu, sp, spte, new);
1613
	else
1614
		paging64_update_pte(vcpu, sp, spte, new);
1615 1616
}

1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
	old ^= PT64_NX_MASK;
	new ^= PT64_NX_MASK;
	return (old & ~new & PT64_PERM_MASK) != 0;
}

static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
{
	if (need_remote_flush(old, new))
		kvm_flush_remote_tlbs(vcpu->kvm);
	else
		kvm_mmu_flush_tlb(vcpu);
}

1638 1639
static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{
1640
	u64 *spte = vcpu->arch.last_pte_updated;
1641 1642 1643 1644

	return !!(spte && (*spte & PT_ACCESSED_MASK));
}

1645 1646 1647 1648 1649 1650
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
					  const u8 *new, int bytes)
{
	gfn_t gfn;
	int r;
	u64 gpte = 0;
1651
	struct page *page;
1652

M
Marcelo Tosatti 已提交
1653 1654
	vcpu->arch.update_pte.largepage = 0;

1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
	if (bytes != 4 && bytes != 8)
		return;

	/*
	 * Assume that the pte write on a page table of the same type
	 * as the current vcpu paging mode.  This is nearly always true
	 * (might be false while changing modes).  Note it is verified later
	 * by update_pte().
	 */
	if (is_pae(vcpu)) {
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
		if ((bytes == 4) && (gpa % 4 == 0)) {
			r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
			if (r)
				return;
			memcpy((void *)&gpte + (gpa % 8), new, 4);
		} else if ((bytes == 8) && (gpa % 8 == 0)) {
			memcpy((void *)&gpte, new, 8);
		}
	} else {
		if ((bytes == 4) && (gpa % 4 == 0))
			memcpy((void *)&gpte, new, 4);
	}
	if (!is_present_pte(gpte))
		return;
	gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1681

M
Marcelo Tosatti 已提交
1682 1683 1684 1685 1686
	down_read(&current->mm->mmap_sem);
	if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		vcpu->arch.update_pte.largepage = 1;
	}
1687
	page = gfn_to_page(vcpu->kvm, gfn);
M
Marcelo Tosatti 已提交
1688
	up_read(&current->mm->mmap_sem);
1689

1690 1691 1692 1693
	if (is_error_page(page)) {
		kvm_release_page_clean(page);
		return;
	}
1694
	vcpu->arch.update_pte.gfn = gfn;
1695
	vcpu->arch.update_pte.page = page;
1696 1697
}

1698
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1699
		       const u8 *new, int bytes)
1700
{
1701
	gfn_t gfn = gpa >> PAGE_SHIFT;
1702
	struct kvm_mmu_page *sp;
1703
	struct hlist_node *node, *n;
1704 1705
	struct hlist_head *bucket;
	unsigned index;
1706
	u64 entry, gentry;
1707 1708
	u64 *spte;
	unsigned offset = offset_in_page(gpa);
1709
	unsigned pte_size;
1710
	unsigned page_offset;
1711
	unsigned misaligned;
1712
	unsigned quadrant;
1713
	int level;
1714
	int flooded = 0;
1715
	int npte;
1716
	int r;
1717

1718
	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
1719
	mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1720
	spin_lock(&vcpu->kvm->mmu_lock);
1721
	kvm_mmu_free_some_pages(vcpu);
A
Avi Kivity 已提交
1722
	++vcpu->kvm->stat.mmu_pte_write;
1723
	kvm_mmu_audit(vcpu, "pre pte write");
1724
	if (gfn == vcpu->arch.last_pt_write_gfn
1725
	    && !last_updated_pte_accessed(vcpu)) {
1726 1727
		++vcpu->arch.last_pt_write_count;
		if (vcpu->arch.last_pt_write_count >= 3)
1728 1729
			flooded = 1;
	} else {
1730 1731 1732
		vcpu->arch.last_pt_write_gfn = gfn;
		vcpu->arch.last_pt_write_count = 1;
		vcpu->arch.last_pte_updated = NULL;
1733
	}
1734
	index = kvm_page_table_hashfn(gfn);
1735
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1736 1737
	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
		if (sp->gfn != gfn || sp->role.metaphysical)
1738
			continue;
1739
		pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1740
		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1741
		misaligned |= bytes < 4;
1742
		if (misaligned || flooded) {
1743 1744 1745 1746
			/*
			 * Misaligned accesses are too much trouble to fix
			 * up; also, they usually indicate a page is not used
			 * as a page table.
1747 1748 1749 1750 1751
			 *
			 * If we're seeing too many writes to a page,
			 * it may no longer be a page table, or we may be
			 * forking, in which case it is better to unmap the
			 * page.
1752 1753
			 */
			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1754 1755
				 gpa, bytes, sp->role.word);
			kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
1756
			++vcpu->kvm->stat.mmu_flooded;
1757 1758
			continue;
		}
1759
		page_offset = offset;
1760
		level = sp->role.level;
1761
		npte = 1;
1762
		if (sp->role.glevels == PT32_ROOT_LEVEL) {
1763 1764 1765 1766 1767 1768 1769
			page_offset <<= 1;	/* 32->64 */
			/*
			 * A 32-bit pde maps 4MB while the shadow pdes map
			 * only 2MB.  So we need to double the offset again
			 * and zap two pdes instead of one.
			 */
			if (level == PT32_ROOT_LEVEL) {
1770
				page_offset &= ~7; /* kill rounding error */
1771 1772 1773
				page_offset <<= 1;
				npte = 2;
			}
1774
			quadrant = page_offset >> PAGE_SHIFT;
1775
			page_offset &= ~PAGE_MASK;
1776
			if (quadrant != sp->role.quadrant)
1777
				continue;
1778
		}
1779
		spte = &sp->spt[page_offset / sizeof(*spte)];
1780 1781 1782 1783 1784 1785 1786 1787 1788
		if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
			gentry = 0;
			r = kvm_read_guest_atomic(vcpu->kvm,
						  gpa & ~(u64)(pte_size - 1),
						  &gentry, pte_size);
			new = (const void *)&gentry;
			if (r < 0)
				new = NULL;
		}
1789
		while (npte--) {
1790
			entry = *spte;
1791
			mmu_pte_write_zap_pte(vcpu, sp, spte);
1792 1793
			if (new)
				mmu_pte_write_new_pte(vcpu, sp, spte, new);
1794
			mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1795
			++spte;
1796 1797
		}
	}
1798
	kvm_mmu_audit(vcpu, "post pte write");
1799
	spin_unlock(&vcpu->kvm->mmu_lock);
1800 1801 1802 1803
	if (vcpu->arch.update_pte.page) {
		kvm_release_page_clean(vcpu->arch.update_pte.page);
		vcpu->arch.update_pte.page = NULL;
	}
1804 1805
}

1806 1807
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
1808 1809
	gpa_t gpa;
	int r;
1810

1811
	down_read(&vcpu->kvm->slots_lock);
1812
	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1813
	up_read(&vcpu->kvm->slots_lock);
1814

1815
	spin_lock(&vcpu->kvm->mmu_lock);
1816
	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1817
	spin_unlock(&vcpu->kvm->mmu_lock);
1818
	return r;
1819 1820
}

1821
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1822
{
1823
	while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
1824
		struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
1825

1826
		sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
1827 1828
				  struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
1829
		++vcpu->kvm->stat.mmu_recycled;
A
Avi Kivity 已提交
1830 1831 1832
	}
}

1833 1834 1835 1836 1837
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
{
	int r;
	enum emulation_result er;

1838
	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
1839 1840 1841 1842 1843 1844 1845 1846
	if (r < 0)
		goto out;

	if (!r) {
		r = 1;
		goto out;
	}

1847 1848 1849 1850
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		goto out;

1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
	er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);

	switch (er) {
	case EMULATE_DONE:
		return 1;
	case EMULATE_DO_MMIO:
		++vcpu->stat.mmio_exits;
		return 0;
	case EMULATE_FAIL:
		kvm_report_emulation_failure(vcpu, "pagetable");
		return 1;
	default:
		BUG();
	}
out:
	return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

1870 1871 1872 1873 1874 1875
void kvm_enable_tdp(void)
{
	tdp_enabled = true;
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);

A
Avi Kivity 已提交
1876 1877
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
1878
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
1879

1880 1881
	while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
		sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
1882 1883
				  struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu->kvm, sp);
1884
	}
1885
	free_page((unsigned long)vcpu->arch.mmu.pae_root);
A
Avi Kivity 已提交
1886 1887 1888 1889
}

static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
1890
	struct page *page;
A
Avi Kivity 已提交
1891 1892 1893 1894
	int i;

	ASSERT(vcpu);

1895 1896 1897
	if (vcpu->kvm->arch.n_requested_mmu_pages)
		vcpu->kvm->arch.n_free_mmu_pages =
					vcpu->kvm->arch.n_requested_mmu_pages;
1898
	else
1899 1900
		vcpu->kvm->arch.n_free_mmu_pages =
					vcpu->kvm->arch.n_alloc_mmu_pages;
1901 1902 1903 1904 1905 1906 1907 1908
	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.
	 */
	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
	if (!page)
		goto error_1;
1909
	vcpu->arch.mmu.pae_root = page_address(page);
1910
	for (i = 0; i < 4; ++i)
1911
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1912

A
Avi Kivity 已提交
1913 1914 1915 1916 1917 1918 1919
	return 0;

error_1:
	free_mmu_pages(vcpu);
	return -ENOMEM;
}

1920
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1921 1922
{
	ASSERT(vcpu);
1923
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
1924

1925 1926
	return alloc_mmu_pages(vcpu);
}
A
Avi Kivity 已提交
1927

1928 1929 1930
int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
1931
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1932

1933
	return init_kvm_mmu(vcpu);
A
Avi Kivity 已提交
1934 1935 1936 1937 1938 1939 1940 1941
}

void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);

	destroy_kvm_mmu(vcpu);
	free_mmu_pages(vcpu);
1942
	mmu_free_memory_caches(vcpu);
A
Avi Kivity 已提交
1943 1944
}

1945
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
A
Avi Kivity 已提交
1946
{
1947
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
1948

1949
	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
A
Avi Kivity 已提交
1950 1951 1952
		int i;
		u64 *pt;

1953
		if (!test_bit(slot, &sp->slot_bitmap))
A
Avi Kivity 已提交
1954 1955
			continue;

1956
		pt = sp->spt;
A
Avi Kivity 已提交
1957 1958
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
			/* avoid RMW */
1959
			if (pt[i] & PT_WRITABLE_MASK)
A
Avi Kivity 已提交
1960 1961 1962
				pt[i] &= ~PT_WRITABLE_MASK;
	}
}
1963

1964
void kvm_mmu_zap_all(struct kvm *kvm)
D
Dor Laor 已提交
1965
{
1966
	struct kvm_mmu_page *sp, *node;
D
Dor Laor 已提交
1967

1968
	spin_lock(&kvm->mmu_lock);
1969
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
1970
		kvm_mmu_zap_page(kvm, sp);
1971
	spin_unlock(&kvm->mmu_lock);
D
Dor Laor 已提交
1972

1973
	kvm_flush_remote_tlbs(kvm);
D
Dor Laor 已提交
1974 1975
}

1976 1977 1978 1979 1980 1981
void kvm_mmu_module_exit(void)
{
	if (pte_chain_cache)
		kmem_cache_destroy(pte_chain_cache);
	if (rmap_desc_cache)
		kmem_cache_destroy(rmap_desc_cache);
1982 1983
	if (mmu_page_header_cache)
		kmem_cache_destroy(mmu_page_header_cache);
1984 1985 1986 1987 1988 1989
}

int kvm_mmu_module_init(void)
{
	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
					    sizeof(struct kvm_pte_chain),
1990
					    0, 0, NULL);
1991 1992 1993 1994
	if (!pte_chain_cache)
		goto nomem;
	rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
					    sizeof(struct kvm_rmap_desc),
1995
					    0, 0, NULL);
1996 1997 1998
	if (!rmap_desc_cache)
		goto nomem;

1999 2000
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
2001
						  0, 0, NULL);
2002 2003 2004
	if (!mmu_page_header_cache)
		goto nomem;

2005 2006 2007 2008 2009 2010 2011
	return 0;

nomem:
	kvm_mmu_module_exit();
	return -ENOMEM;
}

2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
/*
 * Caculate mmu pages needed for kvm.
 */
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
	int i;
	unsigned int nr_mmu_pages;
	unsigned int  nr_pages = 0;

	for (i = 0; i < kvm->nmemslots; i++)
		nr_pages += kvm->memslots[i].npages;

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
	nr_mmu_pages = max(nr_mmu_pages,
			(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);

	return nr_mmu_pages;
}

2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
				unsigned len)
{
	if (len > buffer->len)
		return NULL;
	return buffer->ptr;
}

static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
				unsigned len)
{
	void *ret;

	ret = pv_mmu_peek_buffer(buffer, len);
	if (!ret)
		return ret;
	buffer->ptr += len;
	buffer->len -= len;
	buffer->processed += len;
	return ret;
}

static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
			     gpa_t addr, gpa_t value)
{
	int bytes = 8;
	int r;

	if (!is_long_mode(vcpu) && !is_pae(vcpu))
		bytes = 4;

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

	if (!__emulator_write_phys(vcpu, addr, &value, bytes))
		return -EFAULT;

	return 1;
}

static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
	kvm_x86_ops->tlb_flush(vcpu);
	return 1;
}

static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
{
	spin_lock(&vcpu->kvm->mmu_lock);
	mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
	spin_unlock(&vcpu->kvm->mmu_lock);
	return 1;
}

static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
			     struct kvm_pv_mmu_op_buffer *buffer)
{
	struct kvm_mmu_op_header *header;

	header = pv_mmu_peek_buffer(buffer, sizeof *header);
	if (!header)
		return 0;
	switch (header->op) {
	case KVM_MMU_OP_WRITE_PTE: {
		struct kvm_mmu_op_write_pte *wpte;

		wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
		if (!wpte)
			return 0;
		return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
					wpte->pte_val);
	}
	case KVM_MMU_OP_FLUSH_TLB: {
		struct kvm_mmu_op_flush_tlb *ftlb;

		ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
		if (!ftlb)
			return 0;
		return kvm_pv_mmu_flush_tlb(vcpu);
	}
	case KVM_MMU_OP_RELEASE_PT: {
		struct kvm_mmu_op_release_pt *rpt;

		rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
		if (!rpt)
			return 0;
		return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
	}
	default: return 0;
	}
}

int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
		  gpa_t addr, unsigned long *ret)
{
	int r;
	struct kvm_pv_mmu_op_buffer buffer;

	down_read(&vcpu->kvm->slots_lock);
	down_read(&current->mm->mmap_sem);

	buffer.ptr = buffer.buf;
	buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
	buffer.processed = 0;

	r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
	if (r)
		goto out;

	while (buffer.len) {
		r = kvm_pv_mmu_op_one(vcpu, &buffer);
		if (r < 0)
			goto out;
		if (r == 0)
			break;
	}

	r = 1;
out:
	*ret = buffer.processed;
	up_read(&current->mm->mmap_sem);
	up_read(&vcpu->kvm->slots_lock);
	return r;
}

2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178
#ifdef AUDIT

static const char *audit_msg;

static gva_t canonicalize(gva_t gva)
{
#ifdef CONFIG_X86_64
	gva = (long long)(gva << 16) >> 16;
#endif
	return gva;
}

static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
				gva_t va, int level)
{
	u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
	int i;
	gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
		u64 ent = pt[i];

2179
		if (ent == shadow_trap_nonpresent_pte)
2180 2181 2182
			continue;

		va = canonicalize(va);
2183 2184 2185 2186 2187
		if (level > 1) {
			if (ent == shadow_notrap_nonpresent_pte)
				printk(KERN_ERR "audit: (%s) nontrapping pte"
				       " in nonleaf level: levels %d gva %lx"
				       " level %d pte %llx\n", audit_msg,
2188
				       vcpu->arch.mmu.root_level, va, level, ent);
2189

2190
			audit_mappings_page(vcpu, ent, va, level - 1);
2191
		} else {
2192
			gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
A
Avi Kivity 已提交
2193 2194
			struct page *page = gpa_to_page(vcpu, gpa);
			hpa_t hpa = page_to_phys(page);
2195

2196
			if (is_shadow_present_pte(ent)
2197
			    && (ent & PT64_BASE_ADDR_MASK) != hpa)
2198 2199
				printk(KERN_ERR "xx audit error: (%s) levels %d"
				       " gva %lx gpa %llx hpa %llx ent %llx %d\n",
2200
				       audit_msg, vcpu->arch.mmu.root_level,
M
Mike Day 已提交
2201 2202
				       va, gpa, hpa, ent,
				       is_shadow_present_pte(ent));
2203 2204 2205 2206
			else if (ent == shadow_notrap_nonpresent_pte
				 && !is_error_hpa(hpa))
				printk(KERN_ERR "audit: (%s) notrap shadow,"
				       " valid guest gva %lx\n", audit_msg, va);
2207
			kvm_release_page_clean(page);
2208

2209 2210 2211 2212 2213 2214
		}
	}
}

static void audit_mappings(struct kvm_vcpu *vcpu)
{
2215
	unsigned i;
2216

2217 2218
	if (vcpu->arch.mmu.root_level == 4)
		audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
2219 2220
	else
		for (i = 0; i < 4; ++i)
2221
			if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
2222
				audit_mappings_page(vcpu,
2223
						    vcpu->arch.mmu.pae_root[i],
2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
						    i << 30,
						    2);
}

static int count_rmaps(struct kvm_vcpu *vcpu)
{
	int nmaps = 0;
	int i, j, k;

	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
		struct kvm_rmap_desc *d;

		for (j = 0; j < m->npages; ++j) {
2238
			unsigned long *rmapp = &m->rmap[j];
2239

2240
			if (!*rmapp)
2241
				continue;
2242
			if (!(*rmapp & 1)) {
2243 2244 2245
				++nmaps;
				continue;
			}
2246
			d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
			while (d) {
				for (k = 0; k < RMAP_EXT; ++k)
					if (d->shadow_ptes[k])
						++nmaps;
					else
						break;
				d = d->more;
			}
		}
	}
	return nmaps;
}

static int count_writable_mappings(struct kvm_vcpu *vcpu)
{
	int nmaps = 0;
2263
	struct kvm_mmu_page *sp;
2264 2265
	int i;

2266
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2267
		u64 *pt = sp->spt;
2268

2269
		if (sp->role.level != PT_PAGE_TABLE_LEVEL)
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
			continue;

		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
			u64 ent = pt[i];

			if (!(ent & PT_PRESENT_MASK))
				continue;
			if (!(ent & PT_WRITABLE_MASK))
				continue;
			++nmaps;
		}
	}
	return nmaps;
}

static void audit_rmap(struct kvm_vcpu *vcpu)
{
	int n_rmap = count_rmaps(vcpu);
	int n_actual = count_writable_mappings(vcpu);

	if (n_rmap != n_actual)
		printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
2292
		       __func__, audit_msg, n_rmap, n_actual);
2293 2294 2295 2296
}

static void audit_write_protection(struct kvm_vcpu *vcpu)
{
2297
	struct kvm_mmu_page *sp;
2298 2299 2300
	struct kvm_memory_slot *slot;
	unsigned long *rmapp;
	gfn_t gfn;
2301

2302
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2303
		if (sp->role.metaphysical)
2304 2305
			continue;

2306 2307
		slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
		gfn = unalias_gfn(vcpu->kvm, sp->gfn);
2308 2309
		rmapp = &slot->rmap[gfn - slot->base_gfn];
		if (*rmapp)
2310 2311
			printk(KERN_ERR "%s: (%s) shadow page has writable"
			       " mappings: gfn %lx role %x\n",
2312
			       __func__, audit_msg, sp->gfn,
2313
			       sp->role.word);
2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
	}
}

static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
{
	int olddbg = dbg;

	dbg = 0;
	audit_msg = msg;
	audit_rmap(vcpu);
	audit_write_protection(vcpu);
	audit_mappings(vcpu);
	dbg = olddbg;
}

#endif