mmu.c 80.8 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
A
Avi Kivity 已提交
19

20
#include "mmu.h"
21
#include "x86.h"
A
Avi Kivity 已提交
22
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
23

24
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
25 26 27 28 29
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>
30
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
31
#include <linux/hugetlb.h>
32
#include <linux/compiler.h>
33
#include <linux/srcu.h>
A
Avi Kivity 已提交
34

A
Avi Kivity 已提交
35 36
#include <asm/page.h>
#include <asm/cmpxchg.h>
37
#include <asm/io.h>
38
#include <asm/vmx.h>
A
Avi Kivity 已提交
39

40 41 42 43 44 45 46
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
47
bool tdp_enabled = false;
48

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
#undef MMU_DEBUG

#undef AUDIT

#ifdef AUDIT
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
#endif

#ifdef MMU_DEBUG

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)

#else

#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)

#endif

#if defined(MMU_DEBUG) || defined(AUDIT)
72 73
static int dbg = 0;
module_param(dbg, bool, 0644);
74
#endif
A
Avi Kivity 已提交
75

76 77 78
static int oos_shadow = 1;
module_param(oos_shadow, bool, 0644);

79 80 81
#ifndef MMU_DEBUG
#define ASSERT(x) do { } while (0)
#else
A
Avi Kivity 已提交
82 83 84 85 86
#define ASSERT(x)							\
	if (!(x)) {							\
		printk(KERN_WARNING "assertion failed %s:%d: %s\n",	\
		       __FILE__, __LINE__, #x);				\
	}
87
#endif
A
Avi Kivity 已提交
88 89 90 91 92 93 94 95 96

#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52

#define VALID_PAGE(x) ((x) != INVALID_PAGE)

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
M
Mike Day 已提交
97
		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
A
Avi Kivity 已提交
98 99 100 101 102 103 104 105 106 107 108

#define PT64_LEVEL_MASK(level) \
		(((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
109
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
110 111 112

#define PT32_LEVEL_MASK(level) \
		(((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
113 114 115
#define PT32_LVL_OFFSET_MASK(level) \
	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
116 117 118 119 120

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


121
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
A
Avi Kivity 已提交
122 123
#define PT64_DIR_BASE_ADDR_MASK \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
124 125 126 127 128 129
#define PT64_LVL_ADDR_MASK(level) \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
#define PT64_LVL_OFFSET_MASK(level) \
	(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
130 131 132 133

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
134 135 136
#define PT32_LVL_ADDR_MASK(level) \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
					    * PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
137

138 139
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
			| PT64_NX_MASK)
A
Avi Kivity 已提交
140

141 142
#define RMAP_EXT 4

143 144 145 146 147
#define ACC_EXEC_MASK    1
#define ACC_WRITE_MASK   PT_WRITABLE_MASK
#define ACC_USER_MASK    PT_USER_MASK
#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)

148 149 150
#include <trace/events/kvm.h>

#undef TRACE_INCLUDE_FILE
151 152 153
#define CREATE_TRACE_POINTS
#include "mmutrace.h"

154 155
#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)

156 157
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)

158
struct kvm_rmap_desc {
A
Avi Kivity 已提交
159
	u64 *sptes[RMAP_EXT];
160 161 162
	struct kvm_rmap_desc *more;
};

163 164 165 166 167 168 169 170 171 172 173 174 175 176
struct kvm_shadow_walk_iterator {
	u64 addr;
	hpa_t shadow_addr;
	int level;
	u64 *sptep;
	unsigned index;
};

#define for_each_shadow_entry(_vcpu, _addr, _walker)    \
	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
	     shadow_walk_okay(&(_walker));			\
	     shadow_walk_next(&(_walker)))


177 178 179 180
struct kvm_unsync_walk {
	int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
};

M
Marcelo Tosatti 已提交
181 182
typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);

183 184
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
185
static struct kmem_cache *mmu_page_header_cache;
186

187 188
static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;
S
Sheng Yang 已提交
189 190 191 192 193 194
static u64 __read_mostly shadow_base_present_pte;
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
195

196 197 198 199 200
static inline u64 rsvd_bits(int s, int e)
{
	return ((1ULL << (e - s + 1)) - 1) << s;
}

201 202 203 204 205 206 207
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{
	shadow_trap_nonpresent_pte = trap_pte;
	shadow_notrap_nonpresent_pte = notrap_pte;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);

S
Sheng Yang 已提交
208 209 210 211 212 213 214
void kvm_mmu_set_base_ptes(u64 base_pte)
{
	shadow_base_present_pte = base_pte;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);

void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
215
		u64 dirty_mask, u64 nx_mask, u64 x_mask)
S
Sheng Yang 已提交
216 217 218 219 220 221 222 223 224
{
	shadow_user_mask = user_mask;
	shadow_accessed_mask = accessed_mask;
	shadow_dirty_mask = dirty_mask;
	shadow_nx_mask = nx_mask;
	shadow_x_mask = x_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

A
Avi Kivity 已提交
225 226
static int is_write_protection(struct kvm_vcpu *vcpu)
{
227
	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
A
Avi Kivity 已提交
228 229 230 231 232 233 234
}

static int is_cpuid_PSE36(void)
{
	return 1;
}

235 236
static int is_nx(struct kvm_vcpu *vcpu)
{
237
	return vcpu->arch.efer & EFER_NX;
238 239
}

240 241 242 243 244 245
static int is_shadow_present_pte(u64 pte)
{
	return pte != shadow_trap_nonpresent_pte
		&& pte != shadow_notrap_nonpresent_pte;
}

M
Marcelo Tosatti 已提交
246 247 248 249 250
static int is_large_pte(u64 pte)
{
	return pte & PT_PAGE_SIZE_MASK;
}

251
static int is_writable_pte(unsigned long pte)
A
Avi Kivity 已提交
252 253 254 255
{
	return pte & PT_WRITABLE_MASK;
}

256
static int is_dirty_gpte(unsigned long pte)
257
{
A
Avi Kivity 已提交
258
	return pte & PT_DIRTY_MASK;
259 260
}

261
static int is_rmap_spte(u64 pte)
262
{
263
	return is_shadow_present_pte(pte);
264 265
}

266 267 268 269
static int is_last_spte(u64 pte, int level)
{
	if (level == PT_PAGE_TABLE_LEVEL)
		return 1;
270
	if (is_large_pte(pte))
271 272 273 274
		return 1;
	return 0;
}

275
static pfn_t spte_to_pfn(u64 pte)
276
{
277
	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
278 279
}

280 281 282 283 284 285 286
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

A
Avi Kivity 已提交
287
static void __set_spte(u64 *sptep, u64 spte)
288 289 290 291 292 293 294 295
{
#ifdef CONFIG_X86_64
	set_64bit((unsigned long *)sptep, spte);
#else
	set_64bit((unsigned long long *)sptep, spte);
#endif
}

296
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
297
				  struct kmem_cache *base_cache, int min)
298 299 300 301
{
	void *obj;

	if (cache->nobjs >= min)
302
		return 0;
303
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
304
		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
305
		if (!obj)
306
			return -ENOMEM;
307 308
		cache->objects[cache->nobjs++] = obj;
	}
309
	return 0;
310 311 312 313 314 315 316 317
}

static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
		kfree(mc->objects[--mc->nobjs]);
}

A
Avi Kivity 已提交
318
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
319
				       int min)
A
Avi Kivity 已提交
320 321 322 323 324 325
{
	struct page *page;

	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
326
		page = alloc_page(GFP_KERNEL);
A
Avi Kivity 已提交
327 328 329 330 331 332 333 334 335 336 337
		if (!page)
			return -ENOMEM;
		set_page_private(page, 0);
		cache->objects[cache->nobjs++] = page_address(page);
	}
	return 0;
}

static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
338
		free_page((unsigned long)mc->objects[--mc->nobjs]);
A
Avi Kivity 已提交
339 340
}

341
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
342
{
343 344
	int r;

345
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
346
				   pte_chain_cache, 4);
347 348
	if (r)
		goto out;
349
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
350
				   rmap_desc_cache, 4);
351 352
	if (r)
		goto out;
353
	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
354 355
	if (r)
		goto out;
356
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
357
				   mmu_page_header_cache, 4);
358 359
out:
	return r;
360 361 362 363
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
364 365 366 367
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
368 369 370 371 372 373 374 375 376 377 378 379 380 381
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
				    size_t size)
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	return p;
}

static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
{
382
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
383 384 385
				      sizeof(struct kvm_pte_chain));
}

386
static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
387
{
388
	kfree(pc);
389 390 391 392
}

static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
{
393
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
394 395 396
				      sizeof(struct kvm_rmap_desc));
}

397
static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
398
{
399
	kfree(rd);
400 401
}

M
Marcelo Tosatti 已提交
402 403 404 405
/*
 * Return the pointer to the largepage write count for a given
 * gfn, handling slots that are not large page aligned.
 */
406 407 408
static int *slot_largepage_idx(gfn_t gfn,
			       struct kvm_memory_slot *slot,
			       int level)
M
Marcelo Tosatti 已提交
409 410 411
{
	unsigned long idx;

412 413 414
	idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
	      (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
	return &slot->lpage_info[level - 2][idx].write_count;
M
Marcelo Tosatti 已提交
415 416 417 418
}

static void account_shadowed(struct kvm *kvm, gfn_t gfn)
{
419
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
420
	int *write_count;
421
	int i;
M
Marcelo Tosatti 已提交
422

423
	gfn = unalias_gfn(kvm, gfn);
424 425 426 427 428 429 430

	slot = gfn_to_memslot_unaliased(kvm, gfn);
	for (i = PT_DIRECTORY_LEVEL;
	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
		write_count   = slot_largepage_idx(gfn, slot, i);
		*write_count += 1;
	}
M
Marcelo Tosatti 已提交
431 432 433 434
}

static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
{
435
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
436
	int *write_count;
437
	int i;
M
Marcelo Tosatti 已提交
438

439
	gfn = unalias_gfn(kvm, gfn);
440 441 442 443 444 445 446
	for (i = PT_DIRECTORY_LEVEL;
	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
		slot          = gfn_to_memslot_unaliased(kvm, gfn);
		write_count   = slot_largepage_idx(gfn, slot, i);
		*write_count -= 1;
		WARN_ON(*write_count < 0);
	}
M
Marcelo Tosatti 已提交
447 448
}

449 450 451
static int has_wrprotected_page(struct kvm *kvm,
				gfn_t gfn,
				int level)
M
Marcelo Tosatti 已提交
452
{
453
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
454 455
	int *largepage_idx;

456 457
	gfn = unalias_gfn(kvm, gfn);
	slot = gfn_to_memslot_unaliased(kvm, gfn);
M
Marcelo Tosatti 已提交
458
	if (slot) {
459
		largepage_idx = slot_largepage_idx(gfn, slot, level);
M
Marcelo Tosatti 已提交
460 461 462 463 464 465
		return *largepage_idx;
	}

	return 1;
}

466
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
M
Marcelo Tosatti 已提交
467
{
J
Joerg Roedel 已提交
468
	unsigned long page_size;
469
	int i, ret = 0;
M
Marcelo Tosatti 已提交
470

J
Joerg Roedel 已提交
471
	page_size = kvm_host_page_size(kvm, gfn);
M
Marcelo Tosatti 已提交
472

473 474 475 476 477 478 479 480
	for (i = PT_PAGE_TABLE_LEVEL;
	     i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
		if (page_size >= KVM_HPAGE_SIZE(i))
			ret = i;
		else
			break;
	}

481
	return ret;
M
Marcelo Tosatti 已提交
482 483
}

484
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
M
Marcelo Tosatti 已提交
485 486
{
	struct kvm_memory_slot *slot;
487
	int host_level, level, max_level;
M
Marcelo Tosatti 已提交
488 489 490

	slot = gfn_to_memslot(vcpu->kvm, large_gfn);
	if (slot && slot->dirty_bitmap)
491
		return PT_PAGE_TABLE_LEVEL;
M
Marcelo Tosatti 已提交
492

493 494 495 496 497
	host_level = host_mapping_level(vcpu->kvm, large_gfn);

	if (host_level == PT_PAGE_TABLE_LEVEL)
		return host_level;

498 499 500 501
	max_level = kvm_x86_ops->get_lpage_level() < host_level ?
		kvm_x86_ops->get_lpage_level() : host_level;

	for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
502 503 504 505
		if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
			break;

	return level - 1;
M
Marcelo Tosatti 已提交
506 507
}

508 509 510 511 512
/*
 * Take gfn and return the reverse mapping to it.
 * Note: gfn must be unaliased before this function get called
 */

513
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
514 515
{
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
516
	unsigned long idx;
517 518

	slot = gfn_to_memslot(kvm, gfn);
519
	if (likely(level == PT_PAGE_TABLE_LEVEL))
M
Marcelo Tosatti 已提交
520 521
		return &slot->rmap[gfn - slot->base_gfn];

522 523
	idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
		(slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
M
Marcelo Tosatti 已提交
524

525
	return &slot->lpage_info[level - 2][idx].rmap_pde;
526 527
}

528 529 530
/*
 * Reverse mapping data structures:
 *
531 532
 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
 * that points to page_address(page).
533
 *
534 535
 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
 * containing more mappings.
536 537 538 539
 *
 * Returns the number of rmap entries before the spte was added or zero if
 * the spte was not added.
 *
540
 */
541
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
542
{
543
	struct kvm_mmu_page *sp;
544
	struct kvm_rmap_desc *desc;
545
	unsigned long *rmapp;
546
	int i, count = 0;
547

548
	if (!is_rmap_spte(*spte))
549
		return count;
550
	gfn = unalias_gfn(vcpu->kvm, gfn);
551 552
	sp = page_header(__pa(spte));
	sp->gfns[spte - sp->spt] = gfn;
553
	rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
554
	if (!*rmapp) {
555
		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
556 557
		*rmapp = (unsigned long)spte;
	} else if (!(*rmapp & 1)) {
558
		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
559
		desc = mmu_alloc_rmap_desc(vcpu);
A
Avi Kivity 已提交
560 561
		desc->sptes[0] = (u64 *)*rmapp;
		desc->sptes[1] = spte;
562
		*rmapp = (unsigned long)desc | 1;
563 564
	} else {
		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
565
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
A
Avi Kivity 已提交
566
		while (desc->sptes[RMAP_EXT-1] && desc->more) {
567
			desc = desc->more;
568 569
			count += RMAP_EXT;
		}
A
Avi Kivity 已提交
570
		if (desc->sptes[RMAP_EXT-1]) {
571
			desc->more = mmu_alloc_rmap_desc(vcpu);
572 573
			desc = desc->more;
		}
A
Avi Kivity 已提交
574
		for (i = 0; desc->sptes[i]; ++i)
575
			;
A
Avi Kivity 已提交
576
		desc->sptes[i] = spte;
577
	}
578
	return count;
579 580
}

581
static void rmap_desc_remove_entry(unsigned long *rmapp,
582 583 584 585 586 587
				   struct kvm_rmap_desc *desc,
				   int i,
				   struct kvm_rmap_desc *prev_desc)
{
	int j;

A
Avi Kivity 已提交
588
	for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
589
		;
A
Avi Kivity 已提交
590 591
	desc->sptes[i] = desc->sptes[j];
	desc->sptes[j] = NULL;
592 593 594
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
A
Avi Kivity 已提交
595
		*rmapp = (unsigned long)desc->sptes[0];
596 597 598 599
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
600
			*rmapp = (unsigned long)desc->more | 1;
601
	mmu_free_rmap_desc(desc);
602 603
}

604
static void rmap_remove(struct kvm *kvm, u64 *spte)
605 606 607
{
	struct kvm_rmap_desc *desc;
	struct kvm_rmap_desc *prev_desc;
608
	struct kvm_mmu_page *sp;
609
	pfn_t pfn;
610
	unsigned long *rmapp;
611 612
	int i;

613
	if (!is_rmap_spte(*spte))
614
		return;
615
	sp = page_header(__pa(spte));
616
	pfn = spte_to_pfn(*spte);
S
Sheng Yang 已提交
617
	if (*spte & shadow_accessed_mask)
618
		kvm_set_pfn_accessed(pfn);
619
	if (is_writable_pte(*spte))
620
		kvm_set_pfn_dirty(pfn);
621
	rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
622
	if (!*rmapp) {
623 624
		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
		BUG();
625
	} else if (!(*rmapp & 1)) {
626
		rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
627
		if ((u64 *)*rmapp != spte) {
628 629 630 631
			printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
			       spte, *spte);
			BUG();
		}
632
		*rmapp = 0;
633 634
	} else {
		rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
635
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
636 637
		prev_desc = NULL;
		while (desc) {
A
Avi Kivity 已提交
638 639
			for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
				if (desc->sptes[i] == spte) {
640
					rmap_desc_remove_entry(rmapp,
641
							       desc, i,
642 643 644 645 646 647
							       prev_desc);
					return;
				}
			prev_desc = desc;
			desc = desc->more;
		}
648
		pr_err("rmap_remove: %p %llx many->many\n", spte, *spte);
649 650 651 652
		BUG();
	}
}

653
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
654 655
{
	struct kvm_rmap_desc *desc;
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
	struct kvm_rmap_desc *prev_desc;
	u64 *prev_spte;
	int i;

	if (!*rmapp)
		return NULL;
	else if (!(*rmapp & 1)) {
		if (!spte)
			return (u64 *)*rmapp;
		return NULL;
	}
	desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
	prev_desc = NULL;
	prev_spte = NULL;
	while (desc) {
A
Avi Kivity 已提交
671
		for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
672
			if (prev_spte == spte)
A
Avi Kivity 已提交
673 674
				return desc->sptes[i];
			prev_spte = desc->sptes[i];
675 676 677 678 679 680
		}
		desc = desc->more;
	}
	return NULL;
}

681
static int rmap_write_protect(struct kvm *kvm, u64 gfn)
682
{
683
	unsigned long *rmapp;
684
	u64 *spte;
685
	int i, write_protected = 0;
686

687
	gfn = unalias_gfn(kvm, gfn);
688
	rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
689

690 691
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
692 693 694
		BUG_ON(!spte);
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
695
		if (is_writable_pte(*spte)) {
A
Avi Kivity 已提交
696
			__set_spte(spte, *spte & ~PT_WRITABLE_MASK);
697 698
			write_protected = 1;
		}
699
		spte = rmap_next(kvm, rmapp, spte);
700
	}
701
	if (write_protected) {
702
		pfn_t pfn;
703 704

		spte = rmap_next(kvm, rmapp, NULL);
705 706
		pfn = spte_to_pfn(*spte);
		kvm_set_pfn_dirty(pfn);
707 708
	}

M
Marcelo Tosatti 已提交
709
	/* check for huge page mappings */
710 711 712 713 714 715 716 717 718
	for (i = PT_DIRECTORY_LEVEL;
	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
		rmapp = gfn_to_rmap(kvm, gfn, i);
		spte = rmap_next(kvm, rmapp, NULL);
		while (spte) {
			BUG_ON(!spte);
			BUG_ON(!(*spte & PT_PRESENT_MASK));
			BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
			pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
719
			if (is_writable_pte(*spte)) {
720 721 722 723 724 725 726
				rmap_remove(kvm, spte);
				--kvm->stat.lpages;
				__set_spte(spte, shadow_trap_nonpresent_pte);
				spte = NULL;
				write_protected = 1;
			}
			spte = rmap_next(kvm, rmapp, spte);
M
Marcelo Tosatti 已提交
727 728 729
		}
	}

730
	return write_protected;
731 732
}

F
Frederik Deweerdt 已提交
733 734
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
			   unsigned long data)
735 736 737 738 739 740 741 742
{
	u64 *spte;
	int need_tlb_flush = 0;

	while ((spte = rmap_next(kvm, rmapp, NULL))) {
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
		rmap_remove(kvm, spte);
A
Avi Kivity 已提交
743
		__set_spte(spte, shadow_trap_nonpresent_pte);
744 745 746 747 748
		need_tlb_flush = 1;
	}
	return need_tlb_flush;
}

F
Frederik Deweerdt 已提交
749 750
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
			     unsigned long data)
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
{
	int need_flush = 0;
	u64 *spte, new_spte;
	pte_t *ptep = (pte_t *)data;
	pfn_t new_pfn;

	WARN_ON(pte_huge(*ptep));
	new_pfn = pte_pfn(*ptep);
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
		BUG_ON(!is_shadow_present_pte(*spte));
		rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
		need_flush = 1;
		if (pte_write(*ptep)) {
			rmap_remove(kvm, spte);
			__set_spte(spte, shadow_trap_nonpresent_pte);
			spte = rmap_next(kvm, rmapp, NULL);
		} else {
			new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
			new_spte |= (u64)new_pfn << PAGE_SHIFT;

			new_spte &= ~PT_WRITABLE_MASK;
			new_spte &= ~SPTE_HOST_WRITEABLE;
774
			if (is_writable_pte(*spte))
775 776 777 778 779 780 781 782 783 784 785
				kvm_set_pfn_dirty(spte_to_pfn(*spte));
			__set_spte(spte, new_spte);
			spte = rmap_next(kvm, rmapp, spte);
		}
	}
	if (need_flush)
		kvm_flush_remote_tlbs(kvm);

	return 0;
}

F
Frederik Deweerdt 已提交
786 787
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
			  unsigned long data,
788
			  int (*handler)(struct kvm *kvm, unsigned long *rmapp,
F
Frederik Deweerdt 已提交
789
					 unsigned long data))
790
{
791
	int i, j;
792
	int ret;
793
	int retval = 0;
794 795 796
	struct kvm_memslots *slots;

	slots = rcu_dereference(kvm->memslots);
797

798 799
	for (i = 0; i < slots->nmemslots; i++) {
		struct kvm_memory_slot *memslot = &slots->memslots[i];
800 801 802 803 804 805
		unsigned long start = memslot->userspace_addr;
		unsigned long end;

		end = start + (memslot->npages << PAGE_SHIFT);
		if (hva >= start && hva < end) {
			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
806

807
			ret = handler(kvm, &memslot->rmap[gfn_offset], data);
808 809 810 811

			for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
				int idx = gfn_offset;
				idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
812
				ret |= handler(kvm,
813 814
					&memslot->lpage_info[j][idx].rmap_pde,
					data);
815
			}
816 817
			trace_kvm_age_page(hva, memslot, ret);
			retval |= ret;
818 819 820 821 822 823 824 825
		}
	}

	return retval;
}

int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
826 827 828 829 830
	return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
}

void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
F
Frederik Deweerdt 已提交
831
	kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
832 833
}

F
Frederik Deweerdt 已提交
834 835
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
			 unsigned long data)
836 837 838 839
{
	u64 *spte;
	int young = 0;

840 841 842 843 844 845 846
	/*
	 * Emulate the accessed bit for EPT, by checking if this page has
	 * an EPT mapping, and clearing it if it does. On the next access,
	 * a new EPT mapping will be established.
	 * This has some overhead, but not as much as the cost of swapping
	 * out actively used pages or breaking up actively used hugepages.
	 */
847
	if (!shadow_accessed_mask)
848
		return kvm_unmap_rmapp(kvm, rmapp, data);
849

850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
		int _young;
		u64 _spte = *spte;
		BUG_ON(!(_spte & PT_PRESENT_MASK));
		_young = _spte & PT_ACCESSED_MASK;
		if (_young) {
			young = 1;
			clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
		}
		spte = rmap_next(kvm, rmapp, spte);
	}
	return young;
}

865 866
#define RMAP_RECYCLE_THRESHOLD 1000

867
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
868 869
{
	unsigned long *rmapp;
870 871 872
	struct kvm_mmu_page *sp;

	sp = page_header(__pa(spte));
873 874

	gfn = unalias_gfn(vcpu->kvm, gfn);
875
	rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
876

877
	kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
878 879 880
	kvm_flush_remote_tlbs(vcpu->kvm);
}

881 882
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{
883
	return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
884 885
}

886
#ifdef MMU_DEBUG
887
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
888
{
889 890 891
	u64 *pos;
	u64 *end;

892
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
893
		if (is_shadow_present_pte(*pos)) {
894
			printk(KERN_ERR "%s: %p %llx\n", __func__,
895
			       pos, *pos);
A
Avi Kivity 已提交
896
			return 0;
897
		}
A
Avi Kivity 已提交
898 899
	return 1;
}
900
#endif
A
Avi Kivity 已提交
901

902
static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
903
{
904 905 906 907 908
	ASSERT(is_empty_shadow_page(sp->spt));
	list_del(&sp->link);
	__free_page(virt_to_page(sp->spt));
	__free_page(virt_to_page(sp->gfns));
	kfree(sp);
909
	++kvm->arch.n_free_mmu_pages;
910 911
}

912 913
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
914
	return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
915 916
}

917 918
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
					       u64 *parent_pte)
A
Avi Kivity 已提交
919
{
920
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
921

922 923 924
	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
	sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
925
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
926
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
927
	INIT_LIST_HEAD(&sp->oos_link);
928
	bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
929 930
	sp->multimapped = 0;
	sp->parent_pte = parent_pte;
931
	--vcpu->kvm->arch.n_free_mmu_pages;
932
	return sp;
A
Avi Kivity 已提交
933 934
}

935
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
936
				    struct kvm_mmu_page *sp, u64 *parent_pte)
937 938 939 940 941 942 943
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

	if (!parent_pte)
		return;
944 945
	if (!sp->multimapped) {
		u64 *old = sp->parent_pte;
946 947

		if (!old) {
948
			sp->parent_pte = parent_pte;
949 950
			return;
		}
951
		sp->multimapped = 1;
952
		pte_chain = mmu_alloc_pte_chain(vcpu);
953 954
		INIT_HLIST_HEAD(&sp->parent_ptes);
		hlist_add_head(&pte_chain->link, &sp->parent_ptes);
955 956
		pte_chain->parent_ptes[0] = old;
	}
957
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
958 959 960 961 962 963 964 965
		if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
			continue;
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
			if (!pte_chain->parent_ptes[i]) {
				pte_chain->parent_ptes[i] = parent_pte;
				return;
			}
	}
966
	pte_chain = mmu_alloc_pte_chain(vcpu);
967
	BUG_ON(!pte_chain);
968
	hlist_add_head(&pte_chain->link, &sp->parent_ptes);
969 970 971
	pte_chain->parent_ptes[0] = parent_pte;
}

972
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
973 974 975 976 977 978
				       u64 *parent_pte)
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

979 980 981
	if (!sp->multimapped) {
		BUG_ON(sp->parent_pte != parent_pte);
		sp->parent_pte = NULL;
982 983
		return;
	}
984
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
985 986 987 988 989
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
			if (!pte_chain->parent_ptes[i])
				break;
			if (pte_chain->parent_ptes[i] != parent_pte)
				continue;
990 991
			while (i + 1 < NR_PTE_CHAIN_ENTRIES
				&& pte_chain->parent_ptes[i + 1]) {
992 993 994 995 996
				pte_chain->parent_ptes[i]
					= pte_chain->parent_ptes[i + 1];
				++i;
			}
			pte_chain->parent_ptes[i] = NULL;
997 998
			if (i == 0) {
				hlist_del(&pte_chain->link);
999
				mmu_free_pte_chain(pte_chain);
1000 1001 1002
				if (hlist_empty(&sp->parent_ptes)) {
					sp->multimapped = 0;
					sp->parent_pte = NULL;
1003 1004
				}
			}
1005 1006 1007 1008 1009
			return;
		}
	BUG();
}

M
Marcelo Tosatti 已提交
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034

static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			    mmu_parent_walk_fn fn)
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	struct kvm_mmu_page *parent_sp;
	int i;

	if (!sp->multimapped && sp->parent_pte) {
		parent_sp = page_header(__pa(sp->parent_pte));
		fn(vcpu, parent_sp);
		mmu_parent_walk(vcpu, parent_sp, fn);
		return;
	}
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
			if (!pte_chain->parent_ptes[i])
				break;
			parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
			fn(vcpu, parent_sp);
			mmu_parent_walk(vcpu, parent_sp, fn);
		}
}

1035 1036 1037 1038 1039 1040
static void kvm_mmu_update_unsync_bitmap(u64 *spte)
{
	unsigned int index;
	struct kvm_mmu_page *sp = page_header(__pa(spte));

	index = spte - sp->spt;
1041 1042 1043
	if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
		sp->unsync_children++;
	WARN_ON(!sp->unsync_children);
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
}

static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

	if (!sp->parent_pte)
		return;

	if (!sp->multimapped) {
		kvm_mmu_update_unsync_bitmap(sp->parent_pte);
		return;
	}

	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
			if (!pte_chain->parent_ptes[i])
				break;
			kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
		}
}

static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
	kvm_mmu_update_parents_unsync(sp);
	return 1;
}

static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
					struct kvm_mmu_page *sp)
{
	mmu_parent_walk(vcpu, sp, unsync_walk_fn);
	kvm_mmu_update_parents_unsync(sp);
}

1081 1082 1083 1084 1085 1086 1087 1088 1089
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp)
{
	int i;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		sp->spt[i] = shadow_trap_nonpresent_pte;
}

1090 1091 1092 1093 1094 1095
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
			       struct kvm_mmu_page *sp)
{
	return 1;
}

M
Marcelo Tosatti 已提交
1096 1097 1098 1099
static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
}

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
#define KVM_PAGE_ARRAY_NR 16

struct kvm_mmu_pages {
	struct mmu_page_and_offset {
		struct kvm_mmu_page *sp;
		unsigned int idx;
	} page[KVM_PAGE_ARRAY_NR];
	unsigned int nr;
};

1110 1111 1112 1113 1114
#define for_each_unsync_children(bitmap, idx)		\
	for (idx = find_first_bit(bitmap, 512);		\
	     idx < 512;					\
	     idx = find_next_bit(bitmap, 512, idx+1))

1115 1116
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
			 int idx)
1117
{
1118
	int i;
1119

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
	if (sp->unsync)
		for (i=0; i < pvec->nr; i++)
			if (pvec->page[i].sp == sp)
				return 0;

	pvec->page[pvec->nr].sp = sp;
	pvec->page[pvec->nr].idx = idx;
	pvec->nr++;
	return (pvec->nr == KVM_PAGE_ARRAY_NR);
}

static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	int i, ret, nr_unsync_leaf = 0;
1135

1136
	for_each_unsync_children(sp->unsync_child_bitmap, i) {
1137 1138
		u64 ent = sp->spt[i];

1139
		if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
1140 1141 1142 1143
			struct kvm_mmu_page *child;
			child = page_header(ent & PT64_BASE_ADDR_MASK);

			if (child->unsync_children) {
1144 1145 1146 1147 1148 1149 1150 1151 1152
				if (mmu_pages_add(pvec, child, i))
					return -ENOSPC;

				ret = __mmu_unsync_walk(child, pvec);
				if (!ret)
					__clear_bit(i, sp->unsync_child_bitmap);
				else if (ret > 0)
					nr_unsync_leaf += ret;
				else
1153 1154 1155 1156
					return ret;
			}

			if (child->unsync) {
1157 1158 1159
				nr_unsync_leaf++;
				if (mmu_pages_add(pvec, child, i))
					return -ENOSPC;
1160 1161 1162 1163
			}
		}
	}

1164
	if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
1165 1166
		sp->unsync_children = 0;

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
	return nr_unsync_leaf;
}

static int mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	if (!sp->unsync_children)
		return 0;

	mmu_pages_add(pvec, sp, 0);
	return __mmu_unsync_walk(sp, pvec);
1178 1179
}

1180
static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1181 1182 1183
{
	unsigned index;
	struct hlist_head *bucket;
1184
	struct kvm_mmu_page *sp;
1185 1186
	struct hlist_node *node;

1187
	pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1188
	index = kvm_page_table_hashfn(gfn);
1189
	bucket = &kvm->arch.mmu_page_hash[index];
1190
	hlist_for_each_entry(sp, node, bucket, hash_link)
1191
		if (sp->gfn == gfn && !sp->role.direct
1192
		    && !sp->role.invalid) {
1193
			pgprintk("%s: found role %x\n",
1194
				 __func__, sp->role.word);
1195
			return sp;
1196 1197 1198 1199
		}
	return NULL;
}

1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	WARN_ON(!sp->unsync);
	sp->unsync = 0;
	--kvm->stat.mmu_unsync;
}

static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);

static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
	if (sp->role.glevels != vcpu->arch.mmu.root_level) {
		kvm_mmu_zap_page(vcpu->kvm, sp);
		return 1;
	}

A
Avi Kivity 已提交
1216
	trace_kvm_mmu_sync_page(sp);
1217 1218
	if (rmap_write_protect(vcpu->kvm, sp->gfn))
		kvm_flush_remote_tlbs(vcpu->kvm);
1219
	kvm_unlink_unsync_page(vcpu->kvm, sp);
1220 1221 1222 1223 1224 1225 1226 1227 1228
	if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
		kvm_mmu_zap_page(vcpu->kvm, sp);
		return 1;
	}

	kvm_mmu_flush_tlb(vcpu);
	return 0;
}

1229 1230 1231
struct mmu_page_path {
	struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
	unsigned int idx[PT64_ROOT_LEVEL-1];
1232 1233
};

1234 1235 1236 1237 1238 1239
#define for_each_sp(pvec, sp, parents, i)			\
		for (i = mmu_pages_next(&pvec, &parents, -1),	\
			sp = pvec.page[i].sp;			\
			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
			i = mmu_pages_next(&pvec, &parents, i))

1240 1241 1242
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
			  struct mmu_page_path *parents,
			  int i)
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
{
	int n;

	for (n = i+1; n < pvec->nr; n++) {
		struct kvm_mmu_page *sp = pvec->page[n].sp;

		if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
			parents->idx[0] = pvec->page[n].idx;
			return n;
		}

		parents->parent[sp->role.level-2] = sp;
		parents->idx[sp->role.level-1] = pvec->page[n].idx;
	}

	return n;
}

1261
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1262
{
1263 1264 1265 1266 1267
	struct kvm_mmu_page *sp;
	unsigned int level = 0;

	do {
		unsigned int idx = parents->idx[level];
1268

1269 1270 1271 1272 1273 1274 1275 1276 1277
		sp = parents->parent[level];
		if (!sp)
			return;

		--sp->unsync_children;
		WARN_ON((int)sp->unsync_children < 0);
		__clear_bit(idx, sp->unsync_child_bitmap);
		level++;
	} while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1278 1279
}

1280 1281 1282
static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
			       struct mmu_page_path *parents,
			       struct kvm_mmu_pages *pvec)
1283
{
1284 1285 1286
	parents->parent[parent->role.level-1] = NULL;
	pvec->nr = 0;
}
1287

1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
static void mmu_sync_children(struct kvm_vcpu *vcpu,
			      struct kvm_mmu_page *parent)
{
	int i;
	struct kvm_mmu_page *sp;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;

	kvm_mmu_pages_init(parent, &parents, &pages);
	while (mmu_unsync_walk(parent, &pages)) {
1298 1299 1300 1301 1302 1303 1304 1305
		int protected = 0;

		for_each_sp(pages, sp, parents, i)
			protected |= rmap_write_protect(vcpu->kvm, sp->gfn);

		if (protected)
			kvm_flush_remote_tlbs(vcpu->kvm);

1306 1307 1308 1309
		for_each_sp(pages, sp, parents, i) {
			kvm_sync_page(vcpu, sp);
			mmu_pages_clear_parents(&parents);
		}
1310
		cond_resched_lock(&vcpu->kvm->mmu_lock);
1311 1312
		kvm_mmu_pages_init(parent, &parents, &pages);
	}
1313 1314
}

1315 1316 1317 1318
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
1319
					     int direct,
1320
					     unsigned access,
1321
					     u64 *parent_pte)
1322 1323 1324 1325 1326
{
	union kvm_mmu_page_role role;
	unsigned index;
	unsigned quadrant;
	struct hlist_head *bucket;
1327
	struct kvm_mmu_page *sp;
1328
	struct hlist_node *node, *tmp;
1329

1330
	role = vcpu->arch.mmu.base_role;
1331
	role.level = level;
1332
	role.direct = direct;
1333
	role.access = access;
1334
	if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1335 1336 1337 1338
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
1339
	index = kvm_page_table_hashfn(gfn);
1340
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1341 1342 1343 1344 1345 1346 1347 1348 1349
	hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
		if (sp->gfn == gfn) {
			if (sp->unsync)
				if (kvm_sync_page(vcpu, sp))
					continue;

			if (sp->role.word != role.word)
				continue;

1350
			mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1351 1352 1353 1354
			if (sp->unsync_children) {
				set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
				kvm_mmu_mark_parents_unsync(vcpu, sp);
			}
A
Avi Kivity 已提交
1355
			trace_kvm_mmu_get_page(sp, false);
1356
			return sp;
1357
		}
A
Avi Kivity 已提交
1358
	++vcpu->kvm->stat.mmu_cache_miss;
1359 1360 1361 1362 1363 1364
	sp = kvm_mmu_alloc_page(vcpu, parent_pte);
	if (!sp)
		return sp;
	sp->gfn = gfn;
	sp->role = role;
	hlist_add_head(&sp->hash_link, bucket);
1365
	if (!direct) {
1366 1367
		if (rmap_write_protect(vcpu->kvm, gfn))
			kvm_flush_remote_tlbs(vcpu->kvm);
1368 1369
		account_shadowed(vcpu->kvm, gfn);
	}
1370 1371 1372 1373
	if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
		vcpu->arch.mmu.prefetch_page(vcpu, sp);
	else
		nonpaging_prefetch_page(vcpu, sp);
A
Avi Kivity 已提交
1374
	trace_kvm_mmu_get_page(sp, true);
1375
	return sp;
1376 1377
}

1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
			     struct kvm_vcpu *vcpu, u64 addr)
{
	iterator->addr = addr;
	iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
	iterator->level = vcpu->arch.mmu.shadow_root_level;
	if (iterator->level == PT32E_ROOT_LEVEL) {
		iterator->shadow_addr
			= vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
		--iterator->level;
		if (!iterator->shadow_addr)
			iterator->level = 0;
	}
}

static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
	if (iterator->level < PT_PAGE_TABLE_LEVEL)
		return false;
1398 1399 1400 1401 1402

	if (iterator->level == PT_PAGE_TABLE_LEVEL)
		if (is_large_pte(*iterator->sptep))
			return false;

1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
	return true;
}

static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
	iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
	--iterator->level;
}

1414
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1415
					 struct kvm_mmu_page *sp)
1416
{
1417 1418 1419 1420
	unsigned i;
	u64 *pt;
	u64 ent;

1421
	pt = sp->spt;
1422 1423 1424 1425

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		ent = pt[i];

M
Marcelo Tosatti 已提交
1426
		if (is_shadow_present_pte(ent)) {
1427
			if (!is_last_spte(ent, sp->role.level)) {
M
Marcelo Tosatti 已提交
1428 1429 1430 1431
				ent &= PT64_BASE_ADDR_MASK;
				mmu_page_remove_parent_pte(page_header(ent),
							   &pt[i]);
			} else {
1432 1433
				if (is_large_pte(ent))
					--kvm->stat.lpages;
M
Marcelo Tosatti 已提交
1434 1435 1436
				rmap_remove(kvm, &pt[i]);
			}
		}
1437
		pt[i] = shadow_trap_nonpresent_pte;
1438
	}
1439 1440
}

1441
static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1442
{
1443
	mmu_page_remove_parent_pte(sp, parent_pte);
1444 1445
}

1446 1447 1448
static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
{
	int i;
1449
	struct kvm_vcpu *vcpu;
1450

1451 1452
	kvm_for_each_vcpu(i, vcpu, kvm)
		vcpu->arch.last_pte_updated = NULL;
1453 1454
}

1455
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1456 1457 1458
{
	u64 *parent_pte;

1459 1460 1461
	while (sp->multimapped || sp->parent_pte) {
		if (!sp->multimapped)
			parent_pte = sp->parent_pte;
1462 1463 1464
		else {
			struct kvm_pte_chain *chain;

1465
			chain = container_of(sp->parent_ptes.first,
1466 1467 1468
					     struct kvm_pte_chain, link);
			parent_pte = chain->parent_ptes[0];
		}
1469
		BUG_ON(!parent_pte);
1470
		kvm_mmu_put_page(sp, parent_pte);
A
Avi Kivity 已提交
1471
		__set_spte(parent_pte, shadow_trap_nonpresent_pte);
1472
	}
1473 1474
}

1475 1476
static int mmu_zap_unsync_children(struct kvm *kvm,
				   struct kvm_mmu_page *parent)
1477
{
1478 1479 1480
	int i, zapped = 0;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
1481

1482
	if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1483
		return 0;
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497

	kvm_mmu_pages_init(parent, &parents, &pages);
	while (mmu_unsync_walk(parent, &pages)) {
		struct kvm_mmu_page *sp;

		for_each_sp(pages, sp, parents, i) {
			kvm_mmu_zap_page(kvm, sp);
			mmu_pages_clear_parents(&parents);
		}
		zapped += pages.nr;
		kvm_mmu_pages_init(parent, &parents, &pages);
	}

	return zapped;
1498 1499
}

1500
static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1501
{
1502
	int ret;
A
Avi Kivity 已提交
1503 1504

	trace_kvm_mmu_zap_page(sp);
1505
	++kvm->stat.mmu_shadow_zapped;
1506
	ret = mmu_zap_unsync_children(kvm, sp);
1507
	kvm_mmu_page_unlink_children(kvm, sp);
1508
	kvm_mmu_unlink_parents(kvm, sp);
A
Avi Kivity 已提交
1509
	kvm_flush_remote_tlbs(kvm);
1510
	if (!sp->role.invalid && !sp->role.direct)
A
Avi Kivity 已提交
1511
		unaccount_shadowed(kvm, sp->gfn);
1512 1513
	if (sp->unsync)
		kvm_unlink_unsync_page(kvm, sp);
1514 1515 1516
	if (!sp->root_count) {
		hlist_del(&sp->hash_link);
		kvm_mmu_free_page(kvm, sp);
1517 1518
	} else {
		sp->role.invalid = 1;
A
Avi Kivity 已提交
1519
		list_move(&sp->link, &kvm->arch.active_mmu_pages);
1520 1521
		kvm_reload_remote_mmus(kvm);
	}
1522
	kvm_mmu_reset_last_pte_updated(kvm);
1523
	return ret;
1524 1525
}

1526 1527 1528 1529 1530 1531
/*
 * Changing the number of mmu pages allocated to the vm
 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
 */
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
{
1532 1533 1534 1535 1536
	int used_pages;

	used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
	used_pages = max(0, used_pages);

1537 1538 1539 1540 1541 1542
	/*
	 * If we set the number of mmu pages to be smaller be than the
	 * number of actived pages , we must to free some mmu pages before we
	 * change the value
	 */

1543 1544
	if (used_pages > kvm_nr_mmu_pages) {
		while (used_pages > kvm_nr_mmu_pages) {
1545 1546
			struct kvm_mmu_page *page;

1547
			page = container_of(kvm->arch.active_mmu_pages.prev,
1548 1549
					    struct kvm_mmu_page, link);
			kvm_mmu_zap_page(kvm, page);
1550
			used_pages--;
1551
		}
1552
		kvm->arch.n_free_mmu_pages = 0;
1553 1554
	}
	else
1555 1556
		kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
					 - kvm->arch.n_alloc_mmu_pages;
1557

1558
	kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1559 1560
}

1561
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1562 1563 1564
{
	unsigned index;
	struct hlist_head *bucket;
1565
	struct kvm_mmu_page *sp;
1566 1567 1568
	struct hlist_node *node, *n;
	int r;

1569
	pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1570
	r = 0;
1571
	index = kvm_page_table_hashfn(gfn);
1572
	bucket = &kvm->arch.mmu_page_hash[index];
1573
	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1574
		if (sp->gfn == gfn && !sp->role.direct) {
1575
			pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1576
				 sp->role.word);
1577
			r = 1;
1578 1579
			if (kvm_mmu_zap_page(kvm, sp))
				n = bucket->first;
1580 1581
		}
	return r;
1582 1583
}

1584
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1585
{
A
Avi Kivity 已提交
1586 1587
	unsigned index;
	struct hlist_head *bucket;
1588
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
1589
	struct hlist_node *node, *nn;
1590

A
Avi Kivity 已提交
1591 1592 1593
	index = kvm_page_table_hashfn(gfn);
	bucket = &kvm->arch.mmu_page_hash[index];
	hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1594
		if (sp->gfn == gfn && !sp->role.direct
A
Avi Kivity 已提交
1595 1596 1597 1598 1599
		    && !sp->role.invalid) {
			pgprintk("%s: zap %lx %x\n",
				 __func__, gfn, sp->role.word);
			kvm_mmu_zap_page(kvm, sp);
		}
1600 1601 1602
	}
}

1603
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
A
Avi Kivity 已提交
1604
{
1605
	int slot = memslot_id(kvm, gfn);
1606
	struct kvm_mmu_page *sp = page_header(__pa(pte));
A
Avi Kivity 已提交
1607

1608
	__set_bit(slot, sp->slot_bitmap);
A
Avi Kivity 已提交
1609 1610
}

1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
static void mmu_convert_notrap(struct kvm_mmu_page *sp)
{
	int i;
	u64 *pt = sp->spt;

	if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
		return;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		if (pt[i] == shadow_notrap_nonpresent_pte)
A
Avi Kivity 已提交
1621
			__set_spte(&pt[i], shadow_trap_nonpresent_pte);
1622 1623 1624
	}
}

1625 1626
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
{
1627 1628
	struct page *page;

1629
	gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
1630 1631 1632

	if (gpa == UNMAPPED_GVA)
		return NULL;
1633 1634 1635 1636

	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);

	return page;
1637 1638
}

1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
/*
 * The function is based on mtrr_type_lookup() in
 * arch/x86/kernel/cpu/mtrr/generic.c
 */
static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
			 u64 start, u64 end)
{
	int i;
	u64 base, mask;
	u8 prev_match, curr_match;
	int num_var_ranges = KVM_NR_VAR_MTRR;

	if (!mtrr_state->enabled)
		return 0xFF;

	/* Make end inclusive end, instead of exclusive */
	end--;

	/* Look in fixed ranges. Just return the type as per start */
	if (mtrr_state->have_fixed && (start < 0x100000)) {
		int idx;

		if (start < 0x80000) {
			idx = 0;
			idx += (start >> 16);
			return mtrr_state->fixed_ranges[idx];
		} else if (start < 0xC0000) {
			idx = 1 * 8;
			idx += ((start - 0x80000) >> 14);
			return mtrr_state->fixed_ranges[idx];
		} else if (start < 0x1000000) {
			idx = 3 * 8;
			idx += ((start - 0xC0000) >> 12);
			return mtrr_state->fixed_ranges[idx];
		}
	}

	/*
	 * Look in variable ranges
	 * Look of multiple ranges matching this address and pick type
	 * as per MTRR precedence
	 */
	if (!(mtrr_state->enabled & 2))
		return mtrr_state->def_type;

	prev_match = 0xFF;
	for (i = 0; i < num_var_ranges; ++i) {
		unsigned short start_state, end_state;

		if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
			continue;

		base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
		       (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
		mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
		       (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);

		start_state = ((start & mask) == (base & mask));
		end_state = ((end & mask) == (base & mask));
		if (start_state != end_state)
			return 0xFE;

		if ((start & mask) != (base & mask))
			continue;

		curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
		if (prev_match == 0xFF) {
			prev_match = curr_match;
			continue;
		}

		if (prev_match == MTRR_TYPE_UNCACHABLE ||
		    curr_match == MTRR_TYPE_UNCACHABLE)
			return MTRR_TYPE_UNCACHABLE;

		if ((prev_match == MTRR_TYPE_WRBACK &&
		     curr_match == MTRR_TYPE_WRTHROUGH) ||
		    (prev_match == MTRR_TYPE_WRTHROUGH &&
		     curr_match == MTRR_TYPE_WRBACK)) {
			prev_match = MTRR_TYPE_WRTHROUGH;
			curr_match = MTRR_TYPE_WRTHROUGH;
		}

		if (prev_match != curr_match)
			return MTRR_TYPE_UNCACHABLE;
	}

	if (prev_match != 0xFF)
		return prev_match;

	return mtrr_state->def_type;
}

1732
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1733 1734 1735 1736 1737 1738 1739 1740 1741
{
	u8 mtrr;

	mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
			     (gfn << PAGE_SHIFT) + PAGE_SIZE);
	if (mtrr == 0xfe || mtrr == 0xff)
		mtrr = MTRR_TYPE_WRBACK;
	return mtrr;
}
1742
EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1743

1744 1745 1746 1747 1748 1749 1750
static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
	unsigned index;
	struct hlist_head *bucket;
	struct kvm_mmu_page *s;
	struct hlist_node *node, *n;

A
Avi Kivity 已提交
1751
	trace_kvm_mmu_unsync_page(sp);
1752 1753 1754 1755
	index = kvm_page_table_hashfn(sp->gfn);
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
	/* don't unsync if pagetable is shadowed with multiple roles */
	hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1756
		if (s->gfn != sp->gfn || s->role.direct)
1757 1758 1759 1760 1761 1762
			continue;
		if (s->role.word != sp->role.word)
			return 1;
	}
	++vcpu->kvm->stat.mmu_unsync;
	sp->unsync = 1;
1763

1764
	kvm_mmu_mark_parents_unsync(vcpu, sp);
1765

1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
	mmu_convert_notrap(sp);
	return 0;
}

static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
				  bool can_unsync)
{
	struct kvm_mmu_page *shadow;

	shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
	if (shadow) {
		if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
			return 1;
		if (shadow->unsync)
			return 0;
1781
		if (can_unsync && oos_shadow)
1782 1783 1784 1785 1786 1787
			return kvm_unsync_page(vcpu, shadow);
		return 1;
	}
	return 0;
}

A
Avi Kivity 已提交
1788
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
M
Marcelo Tosatti 已提交
1789
		    unsigned pte_access, int user_fault,
1790
		    int write_fault, int dirty, int level,
1791
		    gfn_t gfn, pfn_t pfn, bool speculative,
1792
		    bool can_unsync, bool reset_host_protection)
1793 1794
{
	u64 spte;
M
Marcelo Tosatti 已提交
1795
	int ret = 0;
S
Sheng Yang 已提交
1796

1797 1798 1799 1800 1801
	/*
	 * We don't set the accessed bit, since we sometimes want to see
	 * whether the guest actually used the pte (in order to detect
	 * demand paging).
	 */
S
Sheng Yang 已提交
1802
	spte = shadow_base_present_pte | shadow_dirty_mask;
1803
	if (!speculative)
1804
		spte |= shadow_accessed_mask;
1805 1806
	if (!dirty)
		pte_access &= ~ACC_WRITE_MASK;
S
Sheng Yang 已提交
1807 1808 1809 1810
	if (pte_access & ACC_EXEC_MASK)
		spte |= shadow_x_mask;
	else
		spte |= shadow_nx_mask;
1811
	if (pte_access & ACC_USER_MASK)
S
Sheng Yang 已提交
1812
		spte |= shadow_user_mask;
1813
	if (level > PT_PAGE_TABLE_LEVEL)
M
Marcelo Tosatti 已提交
1814
		spte |= PT_PAGE_SIZE_MASK;
1815 1816 1817
	if (tdp_enabled)
		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
			kvm_is_mmio_pfn(pfn));
1818

1819 1820 1821
	if (reset_host_protection)
		spte |= SPTE_HOST_WRITEABLE;

1822
	spte |= (u64)pfn << PAGE_SHIFT;
1823 1824 1825 1826

	if ((pte_access & ACC_WRITE_MASK)
	    || (write_fault && !is_write_protection(vcpu) && !user_fault)) {

1827 1828
		if (level > PT_PAGE_TABLE_LEVEL &&
		    has_wrprotected_page(vcpu->kvm, gfn, level)) {
1829 1830 1831 1832 1833
			ret = 1;
			spte = shadow_trap_nonpresent_pte;
			goto set_pte;
		}

1834 1835
		spte |= PT_WRITABLE_MASK;

1836 1837 1838 1839 1840 1841
		/*
		 * Optimization: for pte sync, if spte was writable the hash
		 * lookup is unnecessary (and expensive). Write protection
		 * is responsibility of mmu_get_page / kvm_sync_page.
		 * Same reasoning can be applied to dirty page accounting.
		 */
1842
		if (!can_unsync && is_writable_pte(*sptep))
1843 1844
			goto set_pte;

1845
		if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1846
			pgprintk("%s: found shadow page for %lx, marking ro\n",
1847
				 __func__, gfn);
M
Marcelo Tosatti 已提交
1848
			ret = 1;
1849
			pte_access &= ~ACC_WRITE_MASK;
1850
			if (is_writable_pte(spte))
1851 1852 1853 1854 1855 1856 1857
				spte &= ~PT_WRITABLE_MASK;
		}
	}

	if (pte_access & ACC_WRITE_MASK)
		mark_page_dirty(vcpu->kvm, gfn);

1858
set_pte:
A
Avi Kivity 已提交
1859
	__set_spte(sptep, spte);
M
Marcelo Tosatti 已提交
1860 1861 1862
	return ret;
}

A
Avi Kivity 已提交
1863
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
M
Marcelo Tosatti 已提交
1864 1865
			 unsigned pt_access, unsigned pte_access,
			 int user_fault, int write_fault, int dirty,
1866
			 int *ptwrite, int level, gfn_t gfn,
1867 1868
			 pfn_t pfn, bool speculative,
			 bool reset_host_protection)
M
Marcelo Tosatti 已提交
1869 1870
{
	int was_rmapped = 0;
1871
	int was_writable = is_writable_pte(*sptep);
1872
	int rmap_count;
M
Marcelo Tosatti 已提交
1873 1874 1875

	pgprintk("%s: spte %llx access %x write_fault %d"
		 " user_fault %d gfn %lx\n",
A
Avi Kivity 已提交
1876
		 __func__, *sptep, pt_access,
M
Marcelo Tosatti 已提交
1877 1878
		 write_fault, user_fault, gfn);

A
Avi Kivity 已提交
1879
	if (is_rmap_spte(*sptep)) {
M
Marcelo Tosatti 已提交
1880 1881 1882 1883
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
1884 1885
		if (level > PT_PAGE_TABLE_LEVEL &&
		    !is_large_pte(*sptep)) {
M
Marcelo Tosatti 已提交
1886
			struct kvm_mmu_page *child;
A
Avi Kivity 已提交
1887
			u64 pte = *sptep;
M
Marcelo Tosatti 已提交
1888 1889

			child = page_header(pte & PT64_BASE_ADDR_MASK);
A
Avi Kivity 已提交
1890 1891
			mmu_page_remove_parent_pte(child, sptep);
		} else if (pfn != spte_to_pfn(*sptep)) {
M
Marcelo Tosatti 已提交
1892
			pgprintk("hfn old %lx new %lx\n",
A
Avi Kivity 已提交
1893 1894
				 spte_to_pfn(*sptep), pfn);
			rmap_remove(vcpu->kvm, sptep);
1895 1896
		} else
			was_rmapped = 1;
M
Marcelo Tosatti 已提交
1897
	}
1898

A
Avi Kivity 已提交
1899
	if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1900 1901
		      dirty, level, gfn, pfn, speculative, true,
		      reset_host_protection)) {
M
Marcelo Tosatti 已提交
1902 1903
		if (write_fault)
			*ptwrite = 1;
1904 1905
		kvm_x86_ops->tlb_flush(vcpu);
	}
M
Marcelo Tosatti 已提交
1906

A
Avi Kivity 已提交
1907
	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
M
Marcelo Tosatti 已提交
1908
	pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
A
Avi Kivity 已提交
1909
		 is_large_pte(*sptep)? "2MB" : "4kB",
1910 1911
		 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
		 *sptep, sptep);
A
Avi Kivity 已提交
1912
	if (!was_rmapped && is_large_pte(*sptep))
M
Marcelo Tosatti 已提交
1913 1914
		++vcpu->kvm->stat.lpages;

A
Avi Kivity 已提交
1915
	page_header_update_slot(vcpu->kvm, sptep, gfn);
1916
	if (!was_rmapped) {
1917
		rmap_count = rmap_add(vcpu, sptep, gfn);
1918
		kvm_release_pfn_clean(pfn);
1919
		if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1920
			rmap_recycle(vcpu, sptep, gfn);
1921
	} else {
1922
		if (was_writable)
1923
			kvm_release_pfn_dirty(pfn);
1924
		else
1925
			kvm_release_pfn_clean(pfn);
1926
	}
1927
	if (speculative) {
A
Avi Kivity 已提交
1928
		vcpu->arch.last_pte_updated = sptep;
1929 1930
		vcpu->arch.last_pte_gfn = gfn;
	}
1931 1932
}

A
Avi Kivity 已提交
1933 1934 1935 1936
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}

1937
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1938
			int level, gfn_t gfn, pfn_t pfn)
1939
{
1940
	struct kvm_shadow_walk_iterator iterator;
1941
	struct kvm_mmu_page *sp;
1942
	int pt_write = 0;
1943
	gfn_t pseudo_gfn;
A
Avi Kivity 已提交
1944

1945
	for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1946
		if (iterator.level == level) {
1947 1948
			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
				     0, write, 1, &pt_write,
1949
				     level, gfn, pfn, false, true);
1950 1951
			++vcpu->stat.pf_fixed;
			break;
A
Avi Kivity 已提交
1952 1953
		}

1954 1955 1956 1957 1958 1959 1960 1961 1962 1963
		if (*iterator.sptep == shadow_trap_nonpresent_pte) {
			pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
			sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
					      iterator.level - 1,
					      1, ACC_ALL, iterator.sptep);
			if (!sp) {
				pgprintk("nonpaging_map: ENOMEM\n");
				kvm_release_pfn_clean(pfn);
				return -ENOMEM;
			}
1964

A
Avi Kivity 已提交
1965 1966 1967 1968
			__set_spte(iterator.sptep,
				   __pa(sp->spt)
				   | PT_PRESENT_MASK | PT_WRITABLE_MASK
				   | shadow_user_mask | shadow_x_mask);
1969 1970 1971
		}
	}
	return pt_write;
A
Avi Kivity 已提交
1972 1973
}

1974 1975 1976
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
{
	int r;
1977
	int level;
1978
	pfn_t pfn;
1979
	unsigned long mmu_seq;
1980

1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
	level = mapping_level(vcpu, gfn);

	/*
	 * This path builds a PAE pagetable - so we can map 2mb pages at
	 * maximum. Therefore check if the level is larger than that.
	 */
	if (level > PT_DIRECTORY_LEVEL)
		level = PT_DIRECTORY_LEVEL;

	gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
M
Marcelo Tosatti 已提交
1991

1992
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
1993
	smp_rmb();
1994
	pfn = gfn_to_pfn(vcpu->kvm, gfn);
1995

1996
	/* mmio */
1997 1998
	if (is_error_pfn(pfn)) {
		kvm_release_pfn_clean(pfn);
1999 2000 2001
		return 1;
	}

2002
	spin_lock(&vcpu->kvm->mmu_lock);
2003 2004
	if (mmu_notifier_retry(vcpu, mmu_seq))
		goto out_unlock;
2005
	kvm_mmu_free_some_pages(vcpu);
2006
	r = __direct_map(vcpu, v, write, level, gfn, pfn);
2007 2008 2009
	spin_unlock(&vcpu->kvm->mmu_lock);


2010
	return r;
2011 2012 2013 2014 2015

out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
	return 0;
2016 2017 2018
}


2019 2020 2021
static void mmu_free_roots(struct kvm_vcpu *vcpu)
{
	int i;
2022
	struct kvm_mmu_page *sp;
2023

2024
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
A
Avi Kivity 已提交
2025
		return;
2026
	spin_lock(&vcpu->kvm->mmu_lock);
2027 2028
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
2029

2030 2031
		sp = page_header(root);
		--sp->root_count;
2032 2033
		if (!sp->root_count && sp->role.invalid)
			kvm_mmu_zap_page(vcpu->kvm, sp);
2034
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2035
		spin_unlock(&vcpu->kvm->mmu_lock);
2036 2037 2038
		return;
	}
	for (i = 0; i < 4; ++i) {
2039
		hpa_t root = vcpu->arch.mmu.pae_root[i];
2040

A
Avi Kivity 已提交
2041 2042
		if (root) {
			root &= PT64_BASE_ADDR_MASK;
2043 2044
			sp = page_header(root);
			--sp->root_count;
2045 2046
			if (!sp->root_count && sp->role.invalid)
				kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
2047
		}
2048
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2049
	}
2050
	spin_unlock(&vcpu->kvm->mmu_lock);
2051
	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2052 2053
}

2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
	int ret = 0;

	if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
		set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
		ret = 1;
	}

	return ret;
}

static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2067 2068
{
	int i;
2069
	gfn_t root_gfn;
2070
	struct kvm_mmu_page *sp;
2071
	int direct = 0;
A
Avi Kivity 已提交
2072
	u64 pdptr;
2073

2074
	root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
2075

2076 2077
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
2078 2079

		ASSERT(!VALID_PAGE(root));
2080
		if (tdp_enabled)
2081
			direct = 1;
2082 2083
		if (mmu_check_root(vcpu, root_gfn))
			return 1;
2084
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
2085
				      PT64_ROOT_LEVEL, direct,
2086
				      ACC_ALL, NULL);
2087 2088
		root = __pa(sp->spt);
		++sp->root_count;
2089
		vcpu->arch.mmu.root_hpa = root;
2090
		return 0;
2091
	}
2092
	direct = !is_paging(vcpu);
2093
	if (tdp_enabled)
2094
		direct = 1;
2095
	for (i = 0; i < 4; ++i) {
2096
		hpa_t root = vcpu->arch.mmu.pae_root[i];
2097 2098

		ASSERT(!VALID_PAGE(root));
2099
		if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
A
Avi Kivity 已提交
2100
			pdptr = kvm_pdptr_read(vcpu, i);
2101
			if (!is_present_gpte(pdptr)) {
2102
				vcpu->arch.mmu.pae_root[i] = 0;
A
Avi Kivity 已提交
2103 2104
				continue;
			}
A
Avi Kivity 已提交
2105
			root_gfn = pdptr >> PAGE_SHIFT;
2106
		} else if (vcpu->arch.mmu.root_level == 0)
2107
			root_gfn = 0;
2108 2109
		if (mmu_check_root(vcpu, root_gfn))
			return 1;
2110
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2111
				      PT32_ROOT_LEVEL, direct,
2112
				      ACC_ALL, NULL);
2113 2114
		root = __pa(sp->spt);
		++sp->root_count;
2115
		vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2116
	}
2117
	vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2118
	return 0;
2119 2120
}

2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
static void mmu_sync_roots(struct kvm_vcpu *vcpu)
{
	int i;
	struct kvm_mmu_page *sp;

	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return;
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
		sp = page_header(root);
		mmu_sync_children(vcpu, sp);
		return;
	}
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->arch.mmu.pae_root[i];

2137
		if (root && VALID_PAGE(root)) {
2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148
			root &= PT64_BASE_ADDR_MASK;
			sp = page_header(root);
			mmu_sync_children(vcpu, sp);
		}
	}
}

void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
{
	spin_lock(&vcpu->kvm->mmu_lock);
	mmu_sync_roots(vcpu);
2149
	spin_unlock(&vcpu->kvm->mmu_lock);
2150 2151
}

2152 2153
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
				  u32 access, u32 *error)
A
Avi Kivity 已提交
2154
{
2155 2156
	if (error)
		*error = 0;
A
Avi Kivity 已提交
2157 2158 2159 2160
	return vaddr;
}

static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
A
Avi Kivity 已提交
2161
				u32 error_code)
A
Avi Kivity 已提交
2162
{
2163
	gfn_t gfn;
2164
	int r;
A
Avi Kivity 已提交
2165

2166
	pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2167 2168 2169
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
2170

A
Avi Kivity 已提交
2171
	ASSERT(vcpu);
2172
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
2173

2174
	gfn = gva >> PAGE_SHIFT;
A
Avi Kivity 已提交
2175

2176 2177
	return nonpaging_map(vcpu, gva & PAGE_MASK,
			     error_code & PFERR_WRITE_MASK, gfn);
A
Avi Kivity 已提交
2178 2179
}

2180 2181 2182
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
				u32 error_code)
{
2183
	pfn_t pfn;
2184
	int r;
2185
	int level;
M
Marcelo Tosatti 已提交
2186
	gfn_t gfn = gpa >> PAGE_SHIFT;
2187
	unsigned long mmu_seq;
2188 2189 2190 2191 2192 2193 2194 2195

	ASSERT(vcpu);
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

2196 2197 2198 2199
	level = mapping_level(vcpu, gfn);

	gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);

2200
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
2201
	smp_rmb();
2202 2203 2204
	pfn = gfn_to_pfn(vcpu->kvm, gfn);
	if (is_error_pfn(pfn)) {
		kvm_release_pfn_clean(pfn);
2205 2206 2207
		return 1;
	}
	spin_lock(&vcpu->kvm->mmu_lock);
2208 2209
	if (mmu_notifier_retry(vcpu, mmu_seq))
		goto out_unlock;
2210 2211
	kvm_mmu_free_some_pages(vcpu);
	r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2212
			 level, gfn, pfn);
2213 2214 2215
	spin_unlock(&vcpu->kvm->mmu_lock);

	return r;
2216 2217 2218 2219 2220

out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
	return 0;
2221 2222
}

A
Avi Kivity 已提交
2223 2224
static void nonpaging_free(struct kvm_vcpu *vcpu)
{
2225
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
2226 2227 2228 2229
}

static int nonpaging_init_context(struct kvm_vcpu *vcpu)
{
2230
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
2231 2232 2233 2234 2235

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
	context->free = nonpaging_free;
2236
	context->prefetch_page = nonpaging_prefetch_page;
2237
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
2238
	context->invlpg = nonpaging_invlpg;
2239
	context->root_level = 0;
A
Avi Kivity 已提交
2240
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
2241
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
2242 2243 2244
	return 0;
}

2245
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2246
{
A
Avi Kivity 已提交
2247
	++vcpu->stat.tlb_flush;
2248
	kvm_x86_ops->tlb_flush(vcpu);
A
Avi Kivity 已提交
2249 2250 2251 2252
}

static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
2253
	pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2254
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
2255 2256 2257 2258 2259 2260
}

static void inject_page_fault(struct kvm_vcpu *vcpu,
			      u64 addr,
			      u32 err_code)
{
2261
	kvm_inject_page_fault(vcpu, addr, err_code);
A
Avi Kivity 已提交
2262 2263 2264 2265 2266 2267 2268
}

static void paging_free(struct kvm_vcpu *vcpu)
{
	nonpaging_free(vcpu);
}

2269 2270 2271 2272 2273 2274 2275 2276
static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
{
	int bit7;

	bit7 = (gpte >> 7) & 1;
	return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
}

A
Avi Kivity 已提交
2277 2278 2279 2280 2281 2282 2283 2284
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
{
	struct kvm_mmu *context = &vcpu->arch.mmu;
	int maxphyaddr = cpuid_maxphyaddr(vcpu);
	u64 exb_bit_rsvd = 0;

	if (!is_nx(vcpu))
		exb_bit_rsvd = rsvd_bits(63, 63);
	switch (level) {
	case PT32_ROOT_LEVEL:
		/* no rsvd bits for 2 level 4K page table entries */
		context->rsvd_bits_mask[0][1] = 0;
		context->rsvd_bits_mask[0][0] = 0;
		if (is_cpuid_PSE36())
			/* 36bits PSE 4MB page */
			context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
		else
			/* 32 bits PSE 4MB page */
			context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2304
		context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2305 2306
		break;
	case PT32E_ROOT_LEVEL:
2307 2308 2309
		context->rsvd_bits_mask[0][2] =
			rsvd_bits(maxphyaddr, 63) |
			rsvd_bits(7, 8) | rsvd_bits(1, 2);	/* PDPTE */
2310
		context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2311
			rsvd_bits(maxphyaddr, 62);	/* PDE */
2312 2313 2314 2315 2316
		context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 62); 	/* PTE */
		context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 62) |
			rsvd_bits(13, 20);		/* large page */
2317
		context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2318 2319 2320 2321 2322 2323 2324
		break;
	case PT64_ROOT_LEVEL:
		context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
		context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
		context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2325
			rsvd_bits(maxphyaddr, 51);
2326 2327 2328
		context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51);
		context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2329 2330 2331
		context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51) |
			rsvd_bits(13, 29);
2332
		context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2333 2334
			rsvd_bits(maxphyaddr, 51) |
			rsvd_bits(13, 20);		/* large page */
2335
		context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2336 2337 2338 2339
		break;
	}
}

2340
static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
A
Avi Kivity 已提交
2341
{
2342
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
2343 2344 2345 2346 2347

	ASSERT(is_pae(vcpu));
	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
2348
	context->prefetch_page = paging64_prefetch_page;
2349
	context->sync_page = paging64_sync_page;
M
Marcelo Tosatti 已提交
2350
	context->invlpg = paging64_invlpg;
A
Avi Kivity 已提交
2351
	context->free = paging_free;
2352 2353
	context->root_level = level;
	context->shadow_root_level = level;
A
Avi Kivity 已提交
2354
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
2355 2356 2357
	return 0;
}

2358 2359
static int paging64_init_context(struct kvm_vcpu *vcpu)
{
2360
	reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2361 2362 2363
	return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
}

A
Avi Kivity 已提交
2364 2365
static int paging32_init_context(struct kvm_vcpu *vcpu)
{
2366
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
2367

2368
	reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
A
Avi Kivity 已提交
2369 2370 2371 2372
	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
	context->free = paging_free;
2373
	context->prefetch_page = paging32_prefetch_page;
2374
	context->sync_page = paging32_sync_page;
M
Marcelo Tosatti 已提交
2375
	context->invlpg = paging32_invlpg;
A
Avi Kivity 已提交
2376 2377
	context->root_level = PT32_ROOT_LEVEL;
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
2378
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
2379 2380 2381 2382 2383
	return 0;
}

static int paging32E_init_context(struct kvm_vcpu *vcpu)
{
2384
	reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2385
	return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
2386 2387
}

2388 2389 2390 2391 2392 2393 2394 2395
static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->arch.mmu;

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = tdp_page_fault;
	context->free = nonpaging_free;
	context->prefetch_page = nonpaging_prefetch_page;
2396
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
2397
	context->invlpg = nonpaging_invlpg;
2398
	context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2399 2400 2401 2402 2403 2404
	context->root_hpa = INVALID_PAGE;

	if (!is_paging(vcpu)) {
		context->gva_to_gpa = nonpaging_gva_to_gpa;
		context->root_level = 0;
	} else if (is_long_mode(vcpu)) {
2405
		reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2406 2407 2408
		context->gva_to_gpa = paging64_gva_to_gpa;
		context->root_level = PT64_ROOT_LEVEL;
	} else if (is_pae(vcpu)) {
2409
		reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2410 2411 2412
		context->gva_to_gpa = paging64_gva_to_gpa;
		context->root_level = PT32E_ROOT_LEVEL;
	} else {
2413
		reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2414 2415 2416 2417 2418 2419 2420 2421
		context->gva_to_gpa = paging32_gva_to_gpa;
		context->root_level = PT32_ROOT_LEVEL;
	}

	return 0;
}

static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2422
{
2423 2424
	int r;

A
Avi Kivity 已提交
2425
	ASSERT(vcpu);
2426
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
2427 2428

	if (!is_paging(vcpu))
2429
		r = nonpaging_init_context(vcpu);
A
Avi Kivity 已提交
2430
	else if (is_long_mode(vcpu))
2431
		r = paging64_init_context(vcpu);
A
Avi Kivity 已提交
2432
	else if (is_pae(vcpu))
2433
		r = paging32E_init_context(vcpu);
A
Avi Kivity 已提交
2434
	else
2435 2436 2437 2438 2439
		r = paging32_init_context(vcpu);

	vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;

	return r;
A
Avi Kivity 已提交
2440 2441
}

2442 2443
static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{
2444 2445
	vcpu->arch.update_pte.pfn = bad_pfn;

2446 2447 2448 2449 2450 2451
	if (tdp_enabled)
		return init_kvm_tdp_mmu(vcpu);
	else
		return init_kvm_softmmu(vcpu);
}

A
Avi Kivity 已提交
2452 2453 2454
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
2455 2456 2457
	if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
		vcpu->arch.mmu.free(vcpu);
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
2458 2459 2460 2461
	}
}

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2462 2463 2464 2465
{
	destroy_kvm_mmu(vcpu);
	return init_kvm_mmu(vcpu);
}
2466
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
2467 2468

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2469
{
2470 2471
	int r;

2472
	r = mmu_topup_memory_caches(vcpu);
A
Avi Kivity 已提交
2473 2474
	if (r)
		goto out;
2475
	spin_lock(&vcpu->kvm->mmu_lock);
2476
	kvm_mmu_free_some_pages(vcpu);
2477
	r = mmu_alloc_roots(vcpu);
2478
	mmu_sync_roots(vcpu);
2479
	spin_unlock(&vcpu->kvm->mmu_lock);
2480 2481
	if (r)
		goto out;
2482
	/* set_cr3() should ensure TLB has been flushed */
2483
	kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2484 2485
out:
	return r;
A
Avi Kivity 已提交
2486
}
A
Avi Kivity 已提交
2487 2488 2489 2490 2491 2492
EXPORT_SYMBOL_GPL(kvm_mmu_load);

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
	mmu_free_roots(vcpu);
}
A
Avi Kivity 已提交
2493

2494
static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2495
				  struct kvm_mmu_page *sp,
2496 2497 2498 2499 2500 2501
				  u64 *spte)
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
2502
	if (is_shadow_present_pte(pte)) {
2503
		if (is_last_spte(pte, sp->role.level))
2504
			rmap_remove(vcpu->kvm, spte);
2505 2506
		else {
			child = page_header(pte & PT64_BASE_ADDR_MASK);
2507
			mmu_page_remove_parent_pte(child, spte);
2508 2509
		}
	}
A
Avi Kivity 已提交
2510
	__set_spte(spte, shadow_trap_nonpresent_pte);
M
Marcelo Tosatti 已提交
2511 2512
	if (is_large_pte(pte))
		--vcpu->kvm->stat.lpages;
2513 2514
}

2515
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2516
				  struct kvm_mmu_page *sp,
2517
				  u64 *spte,
2518
				  const void *new)
2519
{
2520
	if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2521 2522
		++vcpu->kvm->stat.mmu_pde_zapped;
		return;
2523
        }
2524

A
Avi Kivity 已提交
2525
	++vcpu->kvm->stat.mmu_pte_updated;
2526
	if (sp->role.glevels == PT32_ROOT_LEVEL)
2527
		paging32_update_pte(vcpu, sp, spte, new);
2528
	else
2529
		paging64_update_pte(vcpu, sp, spte, new);
2530 2531
}

2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
	old ^= PT64_NX_MASK;
	new ^= PT64_NX_MASK;
	return (old & ~new & PT64_PERM_MASK) != 0;
}

static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
{
	if (need_remote_flush(old, new))
		kvm_flush_remote_tlbs(vcpu->kvm);
	else
		kvm_mmu_flush_tlb(vcpu);
}

2553 2554
static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{
2555
	u64 *spte = vcpu->arch.last_pte_updated;
2556

S
Sheng Yang 已提交
2557
	return !!(spte && (*spte & shadow_accessed_mask));
2558 2559
}

2560 2561 2562 2563 2564 2565
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
					  const u8 *new, int bytes)
{
	gfn_t gfn;
	int r;
	u64 gpte = 0;
2566
	pfn_t pfn;
2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590

	if (bytes != 4 && bytes != 8)
		return;

	/*
	 * Assume that the pte write on a page table of the same type
	 * as the current vcpu paging mode.  This is nearly always true
	 * (might be false while changing modes).  Note it is verified later
	 * by update_pte().
	 */
	if (is_pae(vcpu)) {
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
		if ((bytes == 4) && (gpa % 4 == 0)) {
			r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
			if (r)
				return;
			memcpy((void *)&gpte + (gpa % 8), new, 4);
		} else if ((bytes == 8) && (gpa % 8 == 0)) {
			memcpy((void *)&gpte, new, 8);
		}
	} else {
		if ((bytes == 4) && (gpa % 4 == 0))
			memcpy((void *)&gpte, new, 4);
	}
2591
	if (!is_present_gpte(gpte))
2592 2593
		return;
	gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2594

2595
	vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2596
	smp_rmb();
2597
	pfn = gfn_to_pfn(vcpu->kvm, gfn);
2598

2599 2600
	if (is_error_pfn(pfn)) {
		kvm_release_pfn_clean(pfn);
2601 2602
		return;
	}
2603
	vcpu->arch.update_pte.gfn = gfn;
2604
	vcpu->arch.update_pte.pfn = pfn;
2605 2606
}

2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618
static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	u64 *spte = vcpu->arch.last_pte_updated;

	if (spte
	    && vcpu->arch.last_pte_gfn == gfn
	    && shadow_accessed_mask
	    && !(*spte & shadow_accessed_mask)
	    && is_shadow_present_pte(*spte))
		set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
}

2619
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2620 2621
		       const u8 *new, int bytes,
		       bool guest_initiated)
2622
{
2623
	gfn_t gfn = gpa >> PAGE_SHIFT;
2624
	struct kvm_mmu_page *sp;
2625
	struct hlist_node *node, *n;
2626 2627
	struct hlist_head *bucket;
	unsigned index;
2628
	u64 entry, gentry;
2629 2630
	u64 *spte;
	unsigned offset = offset_in_page(gpa);
2631
	unsigned pte_size;
2632
	unsigned page_offset;
2633
	unsigned misaligned;
2634
	unsigned quadrant;
2635
	int level;
2636
	int flooded = 0;
2637
	int npte;
2638
	int r;
2639

2640
	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2641
	mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2642
	spin_lock(&vcpu->kvm->mmu_lock);
2643
	kvm_mmu_access_page(vcpu, gfn);
2644
	kvm_mmu_free_some_pages(vcpu);
A
Avi Kivity 已提交
2645
	++vcpu->kvm->stat.mmu_pte_write;
2646
	kvm_mmu_audit(vcpu, "pre pte write");
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
	if (guest_initiated) {
		if (gfn == vcpu->arch.last_pt_write_gfn
		    && !last_updated_pte_accessed(vcpu)) {
			++vcpu->arch.last_pt_write_count;
			if (vcpu->arch.last_pt_write_count >= 3)
				flooded = 1;
		} else {
			vcpu->arch.last_pt_write_gfn = gfn;
			vcpu->arch.last_pt_write_count = 1;
			vcpu->arch.last_pte_updated = NULL;
		}
2658
	}
2659
	index = kvm_page_table_hashfn(gfn);
2660
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2661
	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2662
		if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2663
			continue;
2664
		pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2665
		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2666
		misaligned |= bytes < 4;
2667
		if (misaligned || flooded) {
2668 2669 2670 2671
			/*
			 * Misaligned accesses are too much trouble to fix
			 * up; also, they usually indicate a page is not used
			 * as a page table.
2672 2673 2674 2675 2676
			 *
			 * If we're seeing too many writes to a page,
			 * it may no longer be a page table, or we may be
			 * forking, in which case it is better to unmap the
			 * page.
2677 2678
			 */
			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2679
				 gpa, bytes, sp->role.word);
2680 2681
			if (kvm_mmu_zap_page(vcpu->kvm, sp))
				n = bucket->first;
A
Avi Kivity 已提交
2682
			++vcpu->kvm->stat.mmu_flooded;
2683 2684
			continue;
		}
2685
		page_offset = offset;
2686
		level = sp->role.level;
2687
		npte = 1;
2688
		if (sp->role.glevels == PT32_ROOT_LEVEL) {
2689 2690 2691 2692 2693 2694 2695
			page_offset <<= 1;	/* 32->64 */
			/*
			 * A 32-bit pde maps 4MB while the shadow pdes map
			 * only 2MB.  So we need to double the offset again
			 * and zap two pdes instead of one.
			 */
			if (level == PT32_ROOT_LEVEL) {
2696
				page_offset &= ~7; /* kill rounding error */
2697 2698 2699
				page_offset <<= 1;
				npte = 2;
			}
2700
			quadrant = page_offset >> PAGE_SHIFT;
2701
			page_offset &= ~PAGE_MASK;
2702
			if (quadrant != sp->role.quadrant)
2703
				continue;
2704
		}
2705
		spte = &sp->spt[page_offset / sizeof(*spte)];
2706 2707 2708 2709 2710 2711 2712 2713 2714
		if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
			gentry = 0;
			r = kvm_read_guest_atomic(vcpu->kvm,
						  gpa & ~(u64)(pte_size - 1),
						  &gentry, pte_size);
			new = (const void *)&gentry;
			if (r < 0)
				new = NULL;
		}
2715
		while (npte--) {
2716
			entry = *spte;
2717
			mmu_pte_write_zap_pte(vcpu, sp, spte);
2718 2719
			if (new)
				mmu_pte_write_new_pte(vcpu, sp, spte, new);
2720
			mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2721
			++spte;
2722 2723
		}
	}
2724
	kvm_mmu_audit(vcpu, "post pte write");
2725
	spin_unlock(&vcpu->kvm->mmu_lock);
2726 2727 2728
	if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
		kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
		vcpu->arch.update_pte.pfn = bad_pfn;
2729
	}
2730 2731
}

2732 2733
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
2734 2735
	gpa_t gpa;
	int r;
2736

2737 2738 2739
	if (tdp_enabled)
		return 0;

2740
	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2741

2742
	spin_lock(&vcpu->kvm->mmu_lock);
2743
	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2744
	spin_unlock(&vcpu->kvm->mmu_lock);
2745
	return r;
2746
}
2747
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2748

2749
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2750
{
2751 2752
	while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES &&
	       !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2753
		struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
2754

2755
		sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2756 2757
				  struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
2758
		++vcpu->kvm->stat.mmu_recycled;
A
Avi Kivity 已提交
2759 2760 2761
	}
}

2762 2763 2764 2765 2766
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
{
	int r;
	enum emulation_result er;

2767
	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2768 2769 2770 2771 2772 2773 2774 2775
	if (r < 0)
		goto out;

	if (!r) {
		r = 1;
		goto out;
	}

2776 2777 2778 2779
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		goto out;

A
Avi Kivity 已提交
2780
	er = emulate_instruction(vcpu, cr2, error_code, 0);
2781 2782 2783 2784 2785 2786 2787 2788

	switch (er) {
	case EMULATE_DONE:
		return 1;
	case EMULATE_DO_MMIO:
		++vcpu->stat.mmio_exits;
		return 0;
	case EMULATE_FAIL:
2789 2790
		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2791
		vcpu->run->internal.ndata = 0;
2792
		return 0;
2793 2794 2795 2796 2797 2798 2799 2800
	default:
		BUG();
	}
out:
	return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

M
Marcelo Tosatti 已提交
2801 2802 2803 2804 2805 2806 2807 2808
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
	vcpu->arch.mmu.invlpg(vcpu, gva);
	kvm_mmu_flush_tlb(vcpu);
	++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

2809 2810 2811 2812 2813 2814
void kvm_enable_tdp(void)
{
	tdp_enabled = true;
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);

2815 2816 2817 2818 2819 2820
void kvm_disable_tdp(void)
{
	tdp_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_tdp);

A
Avi Kivity 已提交
2821 2822
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
2823
	free_page((unsigned long)vcpu->arch.mmu.pae_root);
A
Avi Kivity 已提交
2824 2825 2826 2827
}

static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
2828
	struct page *page;
A
Avi Kivity 已提交
2829 2830 2831 2832
	int i;

	ASSERT(vcpu);

2833 2834 2835 2836 2837 2838 2839
	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.
	 */
	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
	if (!page)
2840 2841
		return -ENOMEM;

2842
	vcpu->arch.mmu.pae_root = page_address(page);
2843
	for (i = 0; i < 4; ++i)
2844
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2845

A
Avi Kivity 已提交
2846 2847 2848
	return 0;
}

2849
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2850 2851
{
	ASSERT(vcpu);
2852
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
2853

2854 2855
	return alloc_mmu_pages(vcpu);
}
A
Avi Kivity 已提交
2856

2857 2858 2859
int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
2860
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2861

2862
	return init_kvm_mmu(vcpu);
A
Avi Kivity 已提交
2863 2864 2865 2866 2867 2868 2869 2870
}

void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);

	destroy_kvm_mmu(vcpu);
	free_mmu_pages(vcpu);
2871
	mmu_free_memory_caches(vcpu);
A
Avi Kivity 已提交
2872 2873
}

2874
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
A
Avi Kivity 已提交
2875
{
2876
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
2877

2878
	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
A
Avi Kivity 已提交
2879 2880 2881
		int i;
		u64 *pt;

2882
		if (!test_bit(slot, sp->slot_bitmap))
A
Avi Kivity 已提交
2883 2884
			continue;

2885
		pt = sp->spt;
A
Avi Kivity 已提交
2886 2887
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
			/* avoid RMW */
2888
			if (pt[i] & PT_WRITABLE_MASK)
A
Avi Kivity 已提交
2889 2890
				pt[i] &= ~PT_WRITABLE_MASK;
	}
2891
	kvm_flush_remote_tlbs(kvm);
A
Avi Kivity 已提交
2892
}
2893

2894
void kvm_mmu_zap_all(struct kvm *kvm)
D
Dor Laor 已提交
2895
{
2896
	struct kvm_mmu_page *sp, *node;
D
Dor Laor 已提交
2897

2898
	spin_lock(&kvm->mmu_lock);
2899
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2900 2901 2902
		if (kvm_mmu_zap_page(kvm, sp))
			node = container_of(kvm->arch.active_mmu_pages.next,
					    struct kvm_mmu_page, link);
2903
	spin_unlock(&kvm->mmu_lock);
D
Dor Laor 已提交
2904

2905
	kvm_flush_remote_tlbs(kvm);
D
Dor Laor 已提交
2906 2907
}

2908
static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925
{
	struct kvm_mmu_page *page;

	page = container_of(kvm->arch.active_mmu_pages.prev,
			    struct kvm_mmu_page, link);
	kvm_mmu_zap_page(kvm, page);
}

static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
{
	struct kvm *kvm;
	struct kvm *kvm_freed = NULL;
	int cache_count = 0;

	spin_lock(&kvm_lock);

	list_for_each_entry(kvm, &vm_list, vm_list) {
2926
		int npages, idx;
2927

2928
		idx = srcu_read_lock(&kvm->srcu);
2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940
		spin_lock(&kvm->mmu_lock);
		npages = kvm->arch.n_alloc_mmu_pages -
			 kvm->arch.n_free_mmu_pages;
		cache_count += npages;
		if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
			kvm_mmu_remove_one_alloc_mmu_page(kvm);
			cache_count--;
			kvm_freed = kvm;
		}
		nr_to_scan--;

		spin_unlock(&kvm->mmu_lock);
2941
		srcu_read_unlock(&kvm->srcu, idx);
2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
	}
	if (kvm_freed)
		list_move_tail(&kvm_freed->vm_list, &vm_list);

	spin_unlock(&kvm_lock);

	return cache_count;
}

static struct shrinker mmu_shrinker = {
	.shrink = mmu_shrink,
	.seeks = DEFAULT_SEEKS * 10,
};

I
Ingo Molnar 已提交
2956
static void mmu_destroy_caches(void)
2957 2958 2959 2960 2961
{
	if (pte_chain_cache)
		kmem_cache_destroy(pte_chain_cache);
	if (rmap_desc_cache)
		kmem_cache_destroy(rmap_desc_cache);
2962 2963
	if (mmu_page_header_cache)
		kmem_cache_destroy(mmu_page_header_cache);
2964 2965
}

2966 2967 2968 2969 2970 2971
void kvm_mmu_module_exit(void)
{
	mmu_destroy_caches();
	unregister_shrinker(&mmu_shrinker);
}

2972 2973 2974 2975
int kvm_mmu_module_init(void)
{
	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
					    sizeof(struct kvm_pte_chain),
2976
					    0, 0, NULL);
2977 2978 2979 2980
	if (!pte_chain_cache)
		goto nomem;
	rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
					    sizeof(struct kvm_rmap_desc),
2981
					    0, 0, NULL);
2982 2983 2984
	if (!rmap_desc_cache)
		goto nomem;

2985 2986
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
2987
						  0, 0, NULL);
2988 2989 2990
	if (!mmu_page_header_cache)
		goto nomem;

2991 2992
	register_shrinker(&mmu_shrinker);

2993 2994 2995
	return 0;

nomem:
2996
	mmu_destroy_caches();
2997 2998 2999
	return -ENOMEM;
}

3000 3001 3002 3003 3004 3005 3006 3007
/*
 * Caculate mmu pages needed for kvm.
 */
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
	int i;
	unsigned int nr_mmu_pages;
	unsigned int  nr_pages = 0;
3008
	struct kvm_memslots *slots;
3009

3010 3011 3012
	slots = rcu_dereference(kvm->memslots);
	for (i = 0; i < slots->nmemslots; i++)
		nr_pages += slots->memslots[i].npages;
3013 3014 3015 3016 3017 3018 3019 3020

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
	nr_mmu_pages = max(nr_mmu_pages,
			(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);

	return nr_mmu_pages;
}

3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055
static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
				unsigned len)
{
	if (len > buffer->len)
		return NULL;
	return buffer->ptr;
}

static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
				unsigned len)
{
	void *ret;

	ret = pv_mmu_peek_buffer(buffer, len);
	if (!ret)
		return ret;
	buffer->ptr += len;
	buffer->len -= len;
	buffer->processed += len;
	return ret;
}

static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
			     gpa_t addr, gpa_t value)
{
	int bytes = 8;
	int r;

	if (!is_long_mode(vcpu) && !is_pae(vcpu))
		bytes = 4;

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

3056
	if (!emulator_write_phys(vcpu, addr, &value, bytes))
3057 3058 3059 3060 3061 3062 3063
		return -EFAULT;

	return 1;
}

static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
3064
	kvm_set_cr3(vcpu, vcpu->arch.cr3);
3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117
	return 1;
}

static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
{
	spin_lock(&vcpu->kvm->mmu_lock);
	mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
	spin_unlock(&vcpu->kvm->mmu_lock);
	return 1;
}

static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
			     struct kvm_pv_mmu_op_buffer *buffer)
{
	struct kvm_mmu_op_header *header;

	header = pv_mmu_peek_buffer(buffer, sizeof *header);
	if (!header)
		return 0;
	switch (header->op) {
	case KVM_MMU_OP_WRITE_PTE: {
		struct kvm_mmu_op_write_pte *wpte;

		wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
		if (!wpte)
			return 0;
		return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
					wpte->pte_val);
	}
	case KVM_MMU_OP_FLUSH_TLB: {
		struct kvm_mmu_op_flush_tlb *ftlb;

		ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
		if (!ftlb)
			return 0;
		return kvm_pv_mmu_flush_tlb(vcpu);
	}
	case KVM_MMU_OP_RELEASE_PT: {
		struct kvm_mmu_op_release_pt *rpt;

		rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
		if (!rpt)
			return 0;
		return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
	}
	default: return 0;
	}
}

int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
		  gpa_t addr, unsigned long *ret)
{
	int r;
3118
	struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3119

3120 3121 3122
	buffer->ptr = buffer->buf;
	buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
	buffer->processed = 0;
3123

3124
	r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3125 3126 3127
	if (r)
		goto out;

3128 3129
	while (buffer->len) {
		r = kvm_pv_mmu_op_one(vcpu, buffer);
3130 3131 3132 3133 3134 3135 3136 3137
		if (r < 0)
			goto out;
		if (r == 0)
			break;
	}

	r = 1;
out:
3138
	*ret = buffer->processed;
3139 3140 3141
	return r;
}

3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
{
	struct kvm_shadow_walk_iterator iterator;
	int nr_sptes = 0;

	spin_lock(&vcpu->kvm->mmu_lock);
	for_each_shadow_entry(vcpu, addr, iterator) {
		sptes[iterator.level-1] = *iterator.sptep;
		nr_sptes++;
		if (!is_shadow_present_pte(*iterator.sptep))
			break;
	}
	spin_unlock(&vcpu->kvm->mmu_lock);

	return nr_sptes;
}
EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);

3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171
#ifdef AUDIT

static const char *audit_msg;

static gva_t canonicalize(gva_t gva)
{
#ifdef CONFIG_X86_64
	gva = (long long)(gva << 16) >> 16;
#endif
	return gva;
}

3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184

typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
				 u64 *sptep);

static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
			    inspect_spte_fn fn)
{
	int i;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		u64 ent = sp->spt[i];

		if (is_shadow_present_pte(ent)) {
3185
			if (!is_last_spte(ent, sp->role.level)) {
3186 3187 3188
				struct kvm_mmu_page *child;
				child = page_header(ent & PT64_BASE_ADDR_MASK);
				__mmu_spte_walk(kvm, child, fn);
3189
			} else
3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219
				fn(kvm, sp, &sp->spt[i]);
		}
	}
}

static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
{
	int i;
	struct kvm_mmu_page *sp;

	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return;
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
		sp = page_header(root);
		__mmu_spte_walk(vcpu->kvm, sp, fn);
		return;
	}
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->arch.mmu.pae_root[i];

		if (root && VALID_PAGE(root)) {
			root &= PT64_BASE_ADDR_MASK;
			sp = page_header(root);
			__mmu_spte_walk(vcpu->kvm, sp, fn);
		}
	}
	return;
}

3220 3221 3222 3223 3224 3225 3226 3227 3228 3229
static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
				gva_t va, int level)
{
	u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
	int i;
	gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
		u64 ent = pt[i];

3230
		if (ent == shadow_trap_nonpresent_pte)
3231 3232 3233
			continue;

		va = canonicalize(va);
3234 3235 3236
		if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
			audit_mappings_page(vcpu, ent, va, level - 1);
		else {
3237
			gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
J
Jan Kiszka 已提交
3238 3239 3240
			gfn_t gfn = gpa >> PAGE_SHIFT;
			pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
			hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3241

3242 3243 3244 3245 3246
			if (is_error_pfn(pfn)) {
				kvm_release_pfn_clean(pfn);
				continue;
			}

3247
			if (is_shadow_present_pte(ent)
3248
			    && (ent & PT64_BASE_ADDR_MASK) != hpa)
3249 3250
				printk(KERN_ERR "xx audit error: (%s) levels %d"
				       " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3251
				       audit_msg, vcpu->arch.mmu.root_level,
M
Mike Day 已提交
3252 3253
				       va, gpa, hpa, ent,
				       is_shadow_present_pte(ent));
3254 3255 3256 3257
			else if (ent == shadow_notrap_nonpresent_pte
				 && !is_error_hpa(hpa))
				printk(KERN_ERR "audit: (%s) notrap shadow,"
				       " valid guest gva %lx\n", audit_msg, va);
3258
			kvm_release_pfn_clean(pfn);
3259

3260 3261 3262 3263 3264 3265
		}
	}
}

static void audit_mappings(struct kvm_vcpu *vcpu)
{
3266
	unsigned i;
3267

3268 3269
	if (vcpu->arch.mmu.root_level == 4)
		audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3270 3271
	else
		for (i = 0; i < 4; ++i)
3272
			if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3273
				audit_mappings_page(vcpu,
3274
						    vcpu->arch.mmu.pae_root[i],
3275 3276 3277 3278 3279 3280 3281
						    i << 30,
						    2);
}

static int count_rmaps(struct kvm_vcpu *vcpu)
{
	int nmaps = 0;
3282
	int i, j, k, idx;
3283

3284 3285
	idx = srcu_read_lock(&kvm->srcu);
	slots = rcu_dereference(kvm->memslots);
3286
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3287
		struct kvm_memory_slot *m = &slots->memslots[i];
3288 3289 3290
		struct kvm_rmap_desc *d;

		for (j = 0; j < m->npages; ++j) {
3291
			unsigned long *rmapp = &m->rmap[j];
3292

3293
			if (!*rmapp)
3294
				continue;
3295
			if (!(*rmapp & 1)) {
3296 3297 3298
				++nmaps;
				continue;
			}
3299
			d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3300 3301
			while (d) {
				for (k = 0; k < RMAP_EXT; ++k)
A
Avi Kivity 已提交
3302
					if (d->sptes[k])
3303 3304 3305 3306 3307 3308 3309
						++nmaps;
					else
						break;
				d = d->more;
			}
		}
	}
3310
	srcu_read_unlock(&kvm->srcu, idx);
3311 3312 3313
	return nmaps;
}

3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335
void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
{
	unsigned long *rmapp;
	struct kvm_mmu_page *rev_sp;
	gfn_t gfn;

	if (*sptep & PT_WRITABLE_MASK) {
		rev_sp = page_header(__pa(sptep));
		gfn = rev_sp->gfns[sptep - rev_sp->spt];

		if (!gfn_to_memslot(kvm, gfn)) {
			if (!printk_ratelimit())
				return;
			printk(KERN_ERR "%s: no memslot for gfn %ld\n",
					 audit_msg, gfn);
			printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
					audit_msg, sptep - rev_sp->spt,
					rev_sp->gfn);
			dump_stack();
			return;
		}

3336 3337
		rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
				    is_large_pte(*sptep));
3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354
		if (!*rmapp) {
			if (!printk_ratelimit())
				return;
			printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
					 audit_msg, *sptep);
			dump_stack();
		}
	}

}

void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
{
	mmu_spte_walk(vcpu, inspect_spte_has_rmap);
}

static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
3355
{
3356
	struct kvm_mmu_page *sp;
3357 3358
	int i;

3359
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3360
		u64 *pt = sp->spt;
3361

3362
		if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3363 3364 3365 3366 3367 3368 3369 3370 3371
			continue;

		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
			u64 ent = pt[i];

			if (!(ent & PT_PRESENT_MASK))
				continue;
			if (!(ent & PT_WRITABLE_MASK))
				continue;
3372
			inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
3373 3374
		}
	}
3375
	return;
3376 3377 3378 3379
}

static void audit_rmap(struct kvm_vcpu *vcpu)
{
3380 3381
	check_writable_mappings_rmap(vcpu);
	count_rmaps(vcpu);
3382 3383 3384 3385
}

static void audit_write_protection(struct kvm_vcpu *vcpu)
{
3386
	struct kvm_mmu_page *sp;
3387 3388
	struct kvm_memory_slot *slot;
	unsigned long *rmapp;
3389
	u64 *spte;
3390
	gfn_t gfn;
3391

3392
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3393
		if (sp->role.direct)
3394
			continue;
3395 3396
		if (sp->unsync)
			continue;
3397

3398
		gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3399
		slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3400
		rmapp = &slot->rmap[gfn - slot->base_gfn];
3401 3402 3403 3404 3405 3406

		spte = rmap_next(vcpu->kvm, rmapp, NULL);
		while (spte) {
			if (*spte & PT_WRITABLE_MASK)
				printk(KERN_ERR "%s: (%s) shadow page has "
				"writable mappings: gfn %lx role %x\n",
3407
			       __func__, audit_msg, sp->gfn,
3408
			       sp->role.word);
3409 3410
			spte = rmap_next(vcpu->kvm, rmapp, spte);
		}
3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421
	}
}

static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
{
	int olddbg = dbg;

	dbg = 0;
	audit_msg = msg;
	audit_rmap(vcpu);
	audit_write_protection(vcpu);
3422 3423
	if (strcmp("pre pte write", audit_msg) != 0)
		audit_mappings(vcpu);
3424
	audit_writable_sptes_have_rmaps(vcpu);
3425 3426 3427 3428
	dbg = olddbg;
}

#endif