mmu.c 86.0 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
A
Avi Kivity 已提交
10
 * Copyright 2010 Red Hat, Inc. and/or its affilates.
A
Avi Kivity 已提交
11 12 13 14 15 16 17 18 19
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
A
Avi Kivity 已提交
20

21
#include "mmu.h"
22
#include "x86.h"
A
Avi Kivity 已提交
23
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
24

25
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
26 27 28 29 30
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>
31
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
32
#include <linux/hugetlb.h>
33
#include <linux/compiler.h>
34
#include <linux/srcu.h>
35
#include <linux/slab.h>
36
#include <linux/uaccess.h>
A
Avi Kivity 已提交
37

A
Avi Kivity 已提交
38 39
#include <asm/page.h>
#include <asm/cmpxchg.h>
40
#include <asm/io.h>
41
#include <asm/vmx.h>
A
Avi Kivity 已提交
42

43 44 45 46 47 48 49
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
50
bool tdp_enabled = false;
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
#undef MMU_DEBUG

#undef AUDIT

#ifdef AUDIT
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
#endif

#ifdef MMU_DEBUG

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)

#else

#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)

#endif

#if defined(MMU_DEBUG) || defined(AUDIT)
75 76
static int dbg = 0;
module_param(dbg, bool, 0644);
77
#endif
A
Avi Kivity 已提交
78

79 80 81
static int oos_shadow = 1;
module_param(oos_shadow, bool, 0644);

82 83 84
#ifndef MMU_DEBUG
#define ASSERT(x) do { } while (0)
#else
A
Avi Kivity 已提交
85 86 87 88 89
#define ASSERT(x)							\
	if (!(x)) {							\
		printk(KERN_WARNING "assertion failed %s:%d: %s\n",	\
		       __FILE__, __LINE__, #x);				\
	}
90
#endif
A
Avi Kivity 已提交
91 92 93 94 95 96 97

#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
M
Mike Day 已提交
98
		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
A
Avi Kivity 已提交
99 100 101 102 103 104 105 106 107 108 109

#define PT64_LEVEL_MASK(level) \
		(((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
110
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
111 112 113

#define PT32_LEVEL_MASK(level) \
		(((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
114 115 116
#define PT32_LVL_OFFSET_MASK(level) \
	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
117 118 119 120 121

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


122
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
A
Avi Kivity 已提交
123 124
#define PT64_DIR_BASE_ADDR_MASK \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
125 126 127 128 129 130
#define PT64_LVL_ADDR_MASK(level) \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
#define PT64_LVL_OFFSET_MASK(level) \
	(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
131 132 133 134

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
135 136 137
#define PT32_LVL_ADDR_MASK(level) \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
					    * PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
138

139 140
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
			| PT64_NX_MASK)
A
Avi Kivity 已提交
141

142 143
#define RMAP_EXT 4

144 145 146 147 148
#define ACC_EXEC_MASK    1
#define ACC_WRITE_MASK   PT_WRITABLE_MASK
#define ACC_USER_MASK    PT_USER_MASK
#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)

149 150
#include <trace/events/kvm.h>

151 152 153
#define CREATE_TRACE_POINTS
#include "mmutrace.h"

154 155
#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)

156 157
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)

158
struct kvm_rmap_desc {
A
Avi Kivity 已提交
159
	u64 *sptes[RMAP_EXT];
160 161 162
	struct kvm_rmap_desc *more;
};

163 164 165 166 167 168 169 170 171 172 173 174 175
struct kvm_shadow_walk_iterator {
	u64 addr;
	hpa_t shadow_addr;
	int level;
	u64 *sptep;
	unsigned index;
};

#define for_each_shadow_entry(_vcpu, _addr, _walker)    \
	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
	     shadow_walk_okay(&(_walker));			\
	     shadow_walk_next(&(_walker)))

176
typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
M
Marcelo Tosatti 已提交
177

178 179
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
180
static struct kmem_cache *mmu_page_header_cache;
181

182 183
static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;
S
Sheng Yang 已提交
184 185 186 187 188 189
static u64 __read_mostly shadow_base_present_pte;
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
190

191 192 193 194 195
static inline u64 rsvd_bits(int s, int e)
{
	return ((1ULL << (e - s + 1)) - 1) << s;
}

196 197 198 199 200 201 202
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{
	shadow_trap_nonpresent_pte = trap_pte;
	shadow_notrap_nonpresent_pte = notrap_pte;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);

S
Sheng Yang 已提交
203 204 205 206 207 208 209
void kvm_mmu_set_base_ptes(u64 base_pte)
{
	shadow_base_present_pte = base_pte;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);

void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
210
		u64 dirty_mask, u64 nx_mask, u64 x_mask)
S
Sheng Yang 已提交
211 212 213 214 215 216 217 218 219
{
	shadow_user_mask = user_mask;
	shadow_accessed_mask = accessed_mask;
	shadow_dirty_mask = dirty_mask;
	shadow_nx_mask = nx_mask;
	shadow_x_mask = x_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

220
static bool is_write_protection(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
221
{
222
	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
A
Avi Kivity 已提交
223 224 225 226 227 228 229
}

static int is_cpuid_PSE36(void)
{
	return 1;
}

230 231
static int is_nx(struct kvm_vcpu *vcpu)
{
232
	return vcpu->arch.efer & EFER_NX;
233 234
}

235 236 237 238 239 240
static int is_shadow_present_pte(u64 pte)
{
	return pte != shadow_trap_nonpresent_pte
		&& pte != shadow_notrap_nonpresent_pte;
}

M
Marcelo Tosatti 已提交
241 242 243 244 245
static int is_large_pte(u64 pte)
{
	return pte & PT_PAGE_SIZE_MASK;
}

246
static int is_writable_pte(unsigned long pte)
A
Avi Kivity 已提交
247 248 249 250
{
	return pte & PT_WRITABLE_MASK;
}

251
static int is_dirty_gpte(unsigned long pte)
252
{
A
Avi Kivity 已提交
253
	return pte & PT_DIRTY_MASK;
254 255
}

256
static int is_rmap_spte(u64 pte)
257
{
258
	return is_shadow_present_pte(pte);
259 260
}

261 262 263 264
static int is_last_spte(u64 pte, int level)
{
	if (level == PT_PAGE_TABLE_LEVEL)
		return 1;
265
	if (is_large_pte(pte))
266 267 268 269
		return 1;
	return 0;
}

270
static pfn_t spte_to_pfn(u64 pte)
271
{
272
	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
273 274
}

275 276 277 278 279 280 281
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

A
Avi Kivity 已提交
282
static void __set_spte(u64 *sptep, u64 spte)
283
{
284
	set_64bit(sptep, spte);
285 286
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
static u64 __xchg_spte(u64 *sptep, u64 new_spte)
{
#ifdef CONFIG_X86_64
	return xchg(sptep, new_spte);
#else
	u64 old_spte;

	do {
		old_spte = *sptep;
	} while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);

	return old_spte;
#endif
}

302 303 304 305 306 307 308 309
static bool spte_has_volatile_bits(u64 spte)
{
	if (!shadow_accessed_mask)
		return false;

	if (!is_shadow_present_pte(spte))
		return false;

310 311
	if ((spte & shadow_accessed_mask) &&
	      (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
312 313 314 315 316
		return false;

	return true;
}

317 318 319 320 321
static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
{
	return (old_spte & bit_mask) && !(new_spte & bit_mask);
}

322 323
static void update_spte(u64 *sptep, u64 new_spte)
{
324 325 326
	u64 mask, old_spte = *sptep;

	WARN_ON(!is_rmap_spte(new_spte));
327

328 329 330 331 332 333 334
	new_spte |= old_spte & shadow_dirty_mask;

	mask = shadow_accessed_mask;
	if (is_writable_pte(old_spte))
		mask |= shadow_dirty_mask;

	if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
335
		__set_spte(sptep, new_spte);
336
	else
337
		old_spte = __xchg_spte(sptep, new_spte);
338 339 340 341 342 343 344 345

	if (!shadow_accessed_mask)
		return;

	if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
	if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
346 347
}

348
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
349
				  struct kmem_cache *base_cache, int min)
350 351 352 353
{
	void *obj;

	if (cache->nobjs >= min)
354
		return 0;
355
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
356
		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
357
		if (!obj)
358
			return -ENOMEM;
359 360
		cache->objects[cache->nobjs++] = obj;
	}
361
	return 0;
362 363
}

364 365
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
				  struct kmem_cache *cache)
366 367
{
	while (mc->nobjs)
368
		kmem_cache_free(cache, mc->objects[--mc->nobjs]);
369 370
}

A
Avi Kivity 已提交
371
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
372
				       int min)
A
Avi Kivity 已提交
373 374 375 376 377 378
{
	struct page *page;

	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
379
		page = alloc_page(GFP_KERNEL);
A
Avi Kivity 已提交
380 381 382 383 384 385 386 387 388 389
		if (!page)
			return -ENOMEM;
		cache->objects[cache->nobjs++] = page_address(page);
	}
	return 0;
}

static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
390
		free_page((unsigned long)mc->objects[--mc->nobjs]);
A
Avi Kivity 已提交
391 392
}

393
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
394
{
395 396
	int r;

397
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
398
				   pte_chain_cache, 4);
399 400
	if (r)
		goto out;
401
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
402
				   rmap_desc_cache, 4);
403 404
	if (r)
		goto out;
405
	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
406 407
	if (r)
		goto out;
408
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
409
				   mmu_page_header_cache, 4);
410 411
out:
	return r;
412 413 414 415
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
416 417
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache);
418
	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
419 420
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
				mmu_page_header_cache);
421 422 423 424 425 426 427 428 429 430 431 432 433 434
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
				    size_t size)
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	return p;
}

static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
{
435
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
436 437 438
				      sizeof(struct kvm_pte_chain));
}

439
static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
440
{
441
	kmem_cache_free(pte_chain_cache, pc);
442 443 444 445
}

static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
{
446
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
447 448 449
				      sizeof(struct kvm_rmap_desc));
}

450
static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
451
{
452
	kmem_cache_free(rmap_desc_cache, rd);
453 454
}

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
	if (!sp->role.direct)
		return sp->gfns[index];

	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}

static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
	if (sp->role.direct)
		BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
	else
		sp->gfns[index] = gfn;
}

M
Marcelo Tosatti 已提交
471 472 473 474
/*
 * Return the pointer to the largepage write count for a given
 * gfn, handling slots that are not large page aligned.
 */
475 476 477
static int *slot_largepage_idx(gfn_t gfn,
			       struct kvm_memory_slot *slot,
			       int level)
M
Marcelo Tosatti 已提交
478 479 480
{
	unsigned long idx;

481 482
	idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
	      (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
483
	return &slot->lpage_info[level - 2][idx].write_count;
M
Marcelo Tosatti 已提交
484 485 486 487
}

static void account_shadowed(struct kvm *kvm, gfn_t gfn)
{
488
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
489
	int *write_count;
490
	int i;
M
Marcelo Tosatti 已提交
491

A
Avi Kivity 已提交
492
	slot = gfn_to_memslot(kvm, gfn);
493 494 495 496 497
	for (i = PT_DIRECTORY_LEVEL;
	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
		write_count   = slot_largepage_idx(gfn, slot, i);
		*write_count += 1;
	}
M
Marcelo Tosatti 已提交
498 499 500 501
}

static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
{
502
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
503
	int *write_count;
504
	int i;
M
Marcelo Tosatti 已提交
505

A
Avi Kivity 已提交
506
	slot = gfn_to_memslot(kvm, gfn);
507 508 509 510 511 512
	for (i = PT_DIRECTORY_LEVEL;
	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
		write_count   = slot_largepage_idx(gfn, slot, i);
		*write_count -= 1;
		WARN_ON(*write_count < 0);
	}
M
Marcelo Tosatti 已提交
513 514
}

515 516 517
static int has_wrprotected_page(struct kvm *kvm,
				gfn_t gfn,
				int level)
M
Marcelo Tosatti 已提交
518
{
519
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
520 521
	int *largepage_idx;

A
Avi Kivity 已提交
522
	slot = gfn_to_memslot(kvm, gfn);
M
Marcelo Tosatti 已提交
523
	if (slot) {
524
		largepage_idx = slot_largepage_idx(gfn, slot, level);
M
Marcelo Tosatti 已提交
525 526 527 528 529 530
		return *largepage_idx;
	}

	return 1;
}

531
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
M
Marcelo Tosatti 已提交
532
{
J
Joerg Roedel 已提交
533
	unsigned long page_size;
534
	int i, ret = 0;
M
Marcelo Tosatti 已提交
535

J
Joerg Roedel 已提交
536
	page_size = kvm_host_page_size(kvm, gfn);
M
Marcelo Tosatti 已提交
537

538 539 540 541 542 543 544 545
	for (i = PT_PAGE_TABLE_LEVEL;
	     i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
		if (page_size >= KVM_HPAGE_SIZE(i))
			ret = i;
		else
			break;
	}

546
	return ret;
M
Marcelo Tosatti 已提交
547 548
}

549
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
M
Marcelo Tosatti 已提交
550 551
{
	struct kvm_memory_slot *slot;
552
	int host_level, level, max_level;
M
Marcelo Tosatti 已提交
553 554 555

	slot = gfn_to_memslot(vcpu->kvm, large_gfn);
	if (slot && slot->dirty_bitmap)
556
		return PT_PAGE_TABLE_LEVEL;
M
Marcelo Tosatti 已提交
557

558 559 560 561 562
	host_level = host_mapping_level(vcpu->kvm, large_gfn);

	if (host_level == PT_PAGE_TABLE_LEVEL)
		return host_level;

563 564 565 566
	max_level = kvm_x86_ops->get_lpage_level() < host_level ?
		kvm_x86_ops->get_lpage_level() : host_level;

	for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
567 568 569 570
		if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
			break;

	return level - 1;
M
Marcelo Tosatti 已提交
571 572
}

573 574 575 576
/*
 * Take gfn and return the reverse mapping to it.
 */

577
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
578 579
{
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
580
	unsigned long idx;
581 582

	slot = gfn_to_memslot(kvm, gfn);
583
	if (likely(level == PT_PAGE_TABLE_LEVEL))
M
Marcelo Tosatti 已提交
584 585
		return &slot->rmap[gfn - slot->base_gfn];

586 587
	idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
		(slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
M
Marcelo Tosatti 已提交
588

589
	return &slot->lpage_info[level - 2][idx].rmap_pde;
590 591
}

592 593 594
/*
 * Reverse mapping data structures:
 *
595 596
 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
 * that points to page_address(page).
597
 *
598 599
 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
 * containing more mappings.
600 601 602 603
 *
 * Returns the number of rmap entries before the spte was added or zero if
 * the spte was not added.
 *
604
 */
605
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
606
{
607
	struct kvm_mmu_page *sp;
608
	struct kvm_rmap_desc *desc;
609
	unsigned long *rmapp;
610
	int i, count = 0;
611

612
	if (!is_rmap_spte(*spte))
613
		return count;
614
	sp = page_header(__pa(spte));
615
	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
616
	rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
617
	if (!*rmapp) {
618
		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
619 620
		*rmapp = (unsigned long)spte;
	} else if (!(*rmapp & 1)) {
621
		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
622
		desc = mmu_alloc_rmap_desc(vcpu);
A
Avi Kivity 已提交
623 624
		desc->sptes[0] = (u64 *)*rmapp;
		desc->sptes[1] = spte;
625
		*rmapp = (unsigned long)desc | 1;
626 627
	} else {
		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
628
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
A
Avi Kivity 已提交
629
		while (desc->sptes[RMAP_EXT-1] && desc->more) {
630
			desc = desc->more;
631 632
			count += RMAP_EXT;
		}
A
Avi Kivity 已提交
633
		if (desc->sptes[RMAP_EXT-1]) {
634
			desc->more = mmu_alloc_rmap_desc(vcpu);
635 636
			desc = desc->more;
		}
A
Avi Kivity 已提交
637
		for (i = 0; desc->sptes[i]; ++i)
638
			;
A
Avi Kivity 已提交
639
		desc->sptes[i] = spte;
640
	}
641
	return count;
642 643
}

644
static void rmap_desc_remove_entry(unsigned long *rmapp,
645 646 647 648 649 650
				   struct kvm_rmap_desc *desc,
				   int i,
				   struct kvm_rmap_desc *prev_desc)
{
	int j;

A
Avi Kivity 已提交
651
	for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
652
		;
A
Avi Kivity 已提交
653 654
	desc->sptes[i] = desc->sptes[j];
	desc->sptes[j] = NULL;
655 656 657
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
A
Avi Kivity 已提交
658
		*rmapp = (unsigned long)desc->sptes[0];
659 660 661 662
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
663
			*rmapp = (unsigned long)desc->more | 1;
664
	mmu_free_rmap_desc(desc);
665 666
}

667
static void rmap_remove(struct kvm *kvm, u64 *spte)
668 669 670
{
	struct kvm_rmap_desc *desc;
	struct kvm_rmap_desc *prev_desc;
671
	struct kvm_mmu_page *sp;
672
	gfn_t gfn;
673
	unsigned long *rmapp;
674 675
	int i;

676
	sp = page_header(__pa(spte));
677 678
	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
	rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
679
	if (!*rmapp) {
680
		printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
681
		BUG();
682
	} else if (!(*rmapp & 1)) {
683
		rmap_printk("rmap_remove:  %p 1->0\n", spte);
684
		if ((u64 *)*rmapp != spte) {
685
			printk(KERN_ERR "rmap_remove:  %p 1->BUG\n", spte);
686 687
			BUG();
		}
688
		*rmapp = 0;
689
	} else {
690
		rmap_printk("rmap_remove:  %p many->many\n", spte);
691
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
692 693
		prev_desc = NULL;
		while (desc) {
A
Avi Kivity 已提交
694 695
			for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
				if (desc->sptes[i] == spte) {
696
					rmap_desc_remove_entry(rmapp,
697
							       desc, i,
698 699 700 701 702 703
							       prev_desc);
					return;
				}
			prev_desc = desc;
			desc = desc->more;
		}
704
		pr_err("rmap_remove: %p many->many\n", spte);
705 706 707 708
		BUG();
	}
}

709
static void set_spte_track_bits(u64 *sptep, u64 new_spte)
A
Avi Kivity 已提交
710
{
711
	pfn_t pfn;
712 713
	u64 old_spte = *sptep;

714
	if (!spte_has_volatile_bits(old_spte))
715
		__set_spte(sptep, new_spte);
716
	else
717
		old_spte = __xchg_spte(sptep, new_spte);
718

719
	if (!is_rmap_spte(old_spte))
720
		return;
721

722
	pfn = spte_to_pfn(old_spte);
723
	if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
724
		kvm_set_pfn_accessed(pfn);
725
	if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
726
		kvm_set_pfn_dirty(pfn);
727 728 729 730 731
}

static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
{
	set_spte_track_bits(sptep, new_spte);
A
Avi Kivity 已提交
732 733 734
	rmap_remove(kvm, sptep);
}

735
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
736 737
{
	struct kvm_rmap_desc *desc;
738 739 740 741 742 743 744 745 746 747 748 749 750
	u64 *prev_spte;
	int i;

	if (!*rmapp)
		return NULL;
	else if (!(*rmapp & 1)) {
		if (!spte)
			return (u64 *)*rmapp;
		return NULL;
	}
	desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
	prev_spte = NULL;
	while (desc) {
A
Avi Kivity 已提交
751
		for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
752
			if (prev_spte == spte)
A
Avi Kivity 已提交
753 754
				return desc->sptes[i];
			prev_spte = desc->sptes[i];
755 756 757 758 759 760
		}
		desc = desc->more;
	}
	return NULL;
}

761
static int rmap_write_protect(struct kvm *kvm, u64 gfn)
762
{
763
	unsigned long *rmapp;
764
	u64 *spte;
765
	int i, write_protected = 0;
766

767
	rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
768

769 770
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
771 772 773
		BUG_ON(!spte);
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
774
		if (is_writable_pte(*spte)) {
775
			update_spte(spte, *spte & ~PT_WRITABLE_MASK);
776 777
			write_protected = 1;
		}
778
		spte = rmap_next(kvm, rmapp, spte);
779
	}
780

M
Marcelo Tosatti 已提交
781
	/* check for huge page mappings */
782 783 784 785 786 787 788 789 790
	for (i = PT_DIRECTORY_LEVEL;
	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
		rmapp = gfn_to_rmap(kvm, gfn, i);
		spte = rmap_next(kvm, rmapp, NULL);
		while (spte) {
			BUG_ON(!spte);
			BUG_ON(!(*spte & PT_PRESENT_MASK));
			BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
			pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
791
			if (is_writable_pte(*spte)) {
A
Avi Kivity 已提交
792 793
				drop_spte(kvm, spte,
					  shadow_trap_nonpresent_pte);
794 795 796 797 798
				--kvm->stat.lpages;
				spte = NULL;
				write_protected = 1;
			}
			spte = rmap_next(kvm, rmapp, spte);
M
Marcelo Tosatti 已提交
799 800 801
		}
	}

802
	return write_protected;
803 804
}

F
Frederik Deweerdt 已提交
805 806
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
			   unsigned long data)
807 808 809 810 811 812 813
{
	u64 *spte;
	int need_tlb_flush = 0;

	while ((spte = rmap_next(kvm, rmapp, NULL))) {
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
A
Avi Kivity 已提交
814
		drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
815 816 817 818 819
		need_tlb_flush = 1;
	}
	return need_tlb_flush;
}

F
Frederik Deweerdt 已提交
820 821
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
			     unsigned long data)
822 823
{
	int need_flush = 0;
824
	u64 *spte, new_spte;
825 826 827 828 829 830 831 832 833 834 835
	pte_t *ptep = (pte_t *)data;
	pfn_t new_pfn;

	WARN_ON(pte_huge(*ptep));
	new_pfn = pte_pfn(*ptep);
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
		BUG_ON(!is_shadow_present_pte(*spte));
		rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
		need_flush = 1;
		if (pte_write(*ptep)) {
A
Avi Kivity 已提交
836
			drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
837 838 839 840 841 842 843
			spte = rmap_next(kvm, rmapp, NULL);
		} else {
			new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
			new_spte |= (u64)new_pfn << PAGE_SHIFT;

			new_spte &= ~PT_WRITABLE_MASK;
			new_spte &= ~SPTE_HOST_WRITEABLE;
844
			new_spte &= ~shadow_accessed_mask;
845
			set_spte_track_bits(spte, new_spte);
846 847 848 849 850 851 852 853 854
			spte = rmap_next(kvm, rmapp, spte);
		}
	}
	if (need_flush)
		kvm_flush_remote_tlbs(kvm);

	return 0;
}

F
Frederik Deweerdt 已提交
855 856
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
			  unsigned long data,
857
			  int (*handler)(struct kvm *kvm, unsigned long *rmapp,
F
Frederik Deweerdt 已提交
858
					 unsigned long data))
859
{
860
	int i, j;
861
	int ret;
862
	int retval = 0;
863 864
	struct kvm_memslots *slots;

865
	slots = kvm_memslots(kvm);
866

867 868
	for (i = 0; i < slots->nmemslots; i++) {
		struct kvm_memory_slot *memslot = &slots->memslots[i];
869 870 871 872 873 874
		unsigned long start = memslot->userspace_addr;
		unsigned long end;

		end = start + (memslot->npages << PAGE_SHIFT);
		if (hva >= start && hva < end) {
			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
875

876
			ret = handler(kvm, &memslot->rmap[gfn_offset], data);
877 878

			for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
879 880 881 882 883 884
				unsigned long idx;
				int sh;

				sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j);
				idx = ((memslot->base_gfn+gfn_offset) >> sh) -
					(memslot->base_gfn >> sh);
885
				ret |= handler(kvm,
886 887
					&memslot->lpage_info[j][idx].rmap_pde,
					data);
888
			}
889 890
			trace_kvm_age_page(hva, memslot, ret);
			retval |= ret;
891 892 893 894 895 896 897 898
		}
	}

	return retval;
}

int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
899 900 901 902 903
	return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
}

void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
F
Frederik Deweerdt 已提交
904
	kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
905 906
}

F
Frederik Deweerdt 已提交
907 908
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
			 unsigned long data)
909 910 911 912
{
	u64 *spte;
	int young = 0;

913 914 915 916 917 918 919
	/*
	 * Emulate the accessed bit for EPT, by checking if this page has
	 * an EPT mapping, and clearing it if it does. On the next access,
	 * a new EPT mapping will be established.
	 * This has some overhead, but not as much as the cost of swapping
	 * out actively used pages or breaking up actively used hugepages.
	 */
920
	if (!shadow_accessed_mask)
921
		return kvm_unmap_rmapp(kvm, rmapp, data);
922

923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
		int _young;
		u64 _spte = *spte;
		BUG_ON(!(_spte & PT_PRESENT_MASK));
		_young = _spte & PT_ACCESSED_MASK;
		if (_young) {
			young = 1;
			clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
		}
		spte = rmap_next(kvm, rmapp, spte);
	}
	return young;
}

938 939
#define RMAP_RECYCLE_THRESHOLD 1000

940
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
941 942
{
	unsigned long *rmapp;
943 944 945
	struct kvm_mmu_page *sp;

	sp = page_header(__pa(spte));
946

947
	rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
948

949
	kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
950 951 952
	kvm_flush_remote_tlbs(vcpu->kvm);
}

953 954
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{
955
	return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
956 957
}

958
#ifdef MMU_DEBUG
959
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
960
{
961 962 963
	u64 *pos;
	u64 *end;

964
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
965
		if (is_shadow_present_pte(*pos)) {
966
			printk(KERN_ERR "%s: %p %llx\n", __func__,
967
			       pos, *pos);
A
Avi Kivity 已提交
968
			return 0;
969
		}
A
Avi Kivity 已提交
970 971
	return 1;
}
972
#endif
A
Avi Kivity 已提交
973

974
static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
975
{
976
	ASSERT(is_empty_shadow_page(sp->spt));
977
	hlist_del(&sp->hash_link);
978 979
	list_del(&sp->link);
	__free_page(virt_to_page(sp->spt));
980 981
	if (!sp->role.direct)
		__free_page(virt_to_page(sp->gfns));
982
	kmem_cache_free(mmu_page_header_cache, sp);
983
	++kvm->arch.n_free_mmu_pages;
984 985
}

986 987
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
988
	return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
989 990
}

991
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
992
					       u64 *parent_pte, int direct)
A
Avi Kivity 已提交
993
{
994
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
995

996 997
	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
998 999 1000
	if (!direct)
		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
						  PAGE_SIZE);
1001
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1002
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1003
	bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
1004 1005
	sp->multimapped = 0;
	sp->parent_pte = parent_pte;
1006
	--vcpu->kvm->arch.n_free_mmu_pages;
1007
	return sp;
A
Avi Kivity 已提交
1008 1009
}

1010
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1011
				    struct kvm_mmu_page *sp, u64 *parent_pte)
1012 1013 1014 1015 1016 1017 1018
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

	if (!parent_pte)
		return;
1019 1020
	if (!sp->multimapped) {
		u64 *old = sp->parent_pte;
1021 1022

		if (!old) {
1023
			sp->parent_pte = parent_pte;
1024 1025
			return;
		}
1026
		sp->multimapped = 1;
1027
		pte_chain = mmu_alloc_pte_chain(vcpu);
1028 1029
		INIT_HLIST_HEAD(&sp->parent_ptes);
		hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1030 1031
		pte_chain->parent_ptes[0] = old;
	}
1032
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
1033 1034 1035 1036 1037 1038 1039 1040
		if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
			continue;
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
			if (!pte_chain->parent_ptes[i]) {
				pte_chain->parent_ptes[i] = parent_pte;
				return;
			}
	}
1041
	pte_chain = mmu_alloc_pte_chain(vcpu);
1042
	BUG_ON(!pte_chain);
1043
	hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1044 1045 1046
	pte_chain->parent_ptes[0] = parent_pte;
}

1047
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1048 1049 1050 1051 1052 1053
				       u64 *parent_pte)
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

1054 1055 1056
	if (!sp->multimapped) {
		BUG_ON(sp->parent_pte != parent_pte);
		sp->parent_pte = NULL;
1057 1058
		return;
	}
1059
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1060 1061 1062 1063 1064
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
			if (!pte_chain->parent_ptes[i])
				break;
			if (pte_chain->parent_ptes[i] != parent_pte)
				continue;
1065 1066
			while (i + 1 < NR_PTE_CHAIN_ENTRIES
				&& pte_chain->parent_ptes[i + 1]) {
1067 1068 1069 1070 1071
				pte_chain->parent_ptes[i]
					= pte_chain->parent_ptes[i + 1];
				++i;
			}
			pte_chain->parent_ptes[i] = NULL;
1072 1073
			if (i == 0) {
				hlist_del(&pte_chain->link);
1074
				mmu_free_pte_chain(pte_chain);
1075 1076 1077
				if (hlist_empty(&sp->parent_ptes)) {
					sp->multimapped = 0;
					sp->parent_pte = NULL;
1078 1079
				}
			}
1080 1081 1082 1083 1084
			return;
		}
	BUG();
}

1085
static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
M
Marcelo Tosatti 已提交
1086 1087 1088 1089 1090 1091 1092 1093
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	struct kvm_mmu_page *parent_sp;
	int i;

	if (!sp->multimapped && sp->parent_pte) {
		parent_sp = page_header(__pa(sp->parent_pte));
1094
		fn(parent_sp, sp->parent_pte);
M
Marcelo Tosatti 已提交
1095 1096
		return;
	}
1097

M
Marcelo Tosatti 已提交
1098 1099
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1100 1101 1102
			u64 *spte = pte_chain->parent_ptes[i];

			if (!spte)
M
Marcelo Tosatti 已提交
1103
				break;
1104 1105
			parent_sp = page_header(__pa(spte));
			fn(parent_sp, spte);
M
Marcelo Tosatti 已提交
1106 1107 1108
		}
}

1109 1110
static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1111
{
1112
	mmu_parent_walk(sp, mark_unsync);
1113 1114
}

1115
static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
1116
{
1117
	unsigned int index;
1118

1119 1120
	index = spte - sp->spt;
	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1121
		return;
1122
	if (sp->unsync_children++)
1123
		return;
1124
	kvm_mmu_mark_parents_unsync(sp);
1125 1126
}

1127 1128 1129 1130 1131 1132 1133 1134 1135
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp)
{
	int i;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		sp->spt[i] = shadow_trap_nonpresent_pte;
}

1136
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1137
			       struct kvm_mmu_page *sp, bool clear_unsync)
1138 1139 1140 1141
{
	return 1;
}

M
Marcelo Tosatti 已提交
1142 1143 1144 1145
static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
}

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
#define KVM_PAGE_ARRAY_NR 16

struct kvm_mmu_pages {
	struct mmu_page_and_offset {
		struct kvm_mmu_page *sp;
		unsigned int idx;
	} page[KVM_PAGE_ARRAY_NR];
	unsigned int nr;
};

1156 1157 1158 1159 1160
#define for_each_unsync_children(bitmap, idx)		\
	for (idx = find_first_bit(bitmap, 512);		\
	     idx < 512;					\
	     idx = find_next_bit(bitmap, 512, idx+1))

1161 1162
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
			 int idx)
1163
{
1164
	int i;
1165

1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
	if (sp->unsync)
		for (i=0; i < pvec->nr; i++)
			if (pvec->page[i].sp == sp)
				return 0;

	pvec->page[pvec->nr].sp = sp;
	pvec->page[pvec->nr].idx = idx;
	pvec->nr++;
	return (pvec->nr == KVM_PAGE_ARRAY_NR);
}

static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	int i, ret, nr_unsync_leaf = 0;
1181

1182
	for_each_unsync_children(sp->unsync_child_bitmap, i) {
1183
		struct kvm_mmu_page *child;
1184 1185
		u64 ent = sp->spt[i];

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
		if (!is_shadow_present_pte(ent) || is_large_pte(ent))
			goto clear_child_bitmap;

		child = page_header(ent & PT64_BASE_ADDR_MASK);

		if (child->unsync_children) {
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;

			ret = __mmu_unsync_walk(child, pvec);
			if (!ret)
				goto clear_child_bitmap;
			else if (ret > 0)
				nr_unsync_leaf += ret;
			else
				return ret;
		} else if (child->unsync) {
			nr_unsync_leaf++;
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;
		} else
			 goto clear_child_bitmap;

		continue;

clear_child_bitmap:
		__clear_bit(i, sp->unsync_child_bitmap);
		sp->unsync_children--;
		WARN_ON((int)sp->unsync_children < 0);
1215 1216 1217
	}


1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
	return nr_unsync_leaf;
}

static int mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	if (!sp->unsync_children)
		return 0;

	mmu_pages_add(pvec, sp, 0);
	return __mmu_unsync_walk(sp, pvec);
1229 1230 1231 1232 1233
}

static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	WARN_ON(!sp->unsync);
1234
	trace_kvm_mmu_sync_page(sp);
1235 1236 1237 1238
	sp->unsync = 0;
	--kvm->stat.mmu_unsync;
}

1239 1240 1241 1242
static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				    struct list_head *invalid_list);
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list);
1243

1244 1245
#define for_each_gfn_sp(kvm, sp, gfn, pos)				\
  hlist_for_each_entry(sp, pos,						\
1246 1247 1248
   &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)	\
	if ((sp)->gfn != (gfn)) {} else

1249 1250
#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos)		\
  hlist_for_each_entry(sp, pos,						\
1251 1252 1253 1254
   &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)	\
		if ((sp)->gfn != (gfn) || (sp)->role.direct ||		\
			(sp)->role.invalid) {} else

1255
/* @sp->gfn should be write-protected at the call site */
1256
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1257
			   struct list_head *invalid_list, bool clear_unsync)
1258
{
1259
	if (sp->role.cr4_pae != !!is_pae(vcpu)) {
1260
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1261 1262 1263
		return 1;
	}

1264
	if (clear_unsync)
1265 1266
		kvm_unlink_unsync_page(vcpu->kvm, sp);

1267
	if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) {
1268
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1269 1270 1271 1272 1273 1274 1275
		return 1;
	}

	kvm_mmu_flush_tlb(vcpu);
	return 0;
}

1276 1277 1278
static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
				   struct kvm_mmu_page *sp)
{
1279
	LIST_HEAD(invalid_list);
1280 1281
	int ret;

1282
	ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
1283
	if (ret)
1284 1285
		kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);

1286 1287 1288
	return ret;
}

1289 1290
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			 struct list_head *invalid_list)
1291
{
1292
	return __kvm_sync_page(vcpu, sp, invalid_list, true);
1293 1294
}

1295 1296 1297 1298
/* @gfn should be write-protected at the call site */
static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
{
	struct kvm_mmu_page *s;
1299
	struct hlist_node *node;
1300
	LIST_HEAD(invalid_list);
1301 1302
	bool flush = false;

1303
	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1304
		if (!s->unsync)
1305 1306 1307 1308
			continue;

		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
		if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
1309
			(vcpu->arch.mmu.sync_page(vcpu, s, true))) {
1310
			kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
1311 1312 1313 1314 1315 1316
			continue;
		}
		kvm_unlink_unsync_page(vcpu->kvm, s);
		flush = true;
	}

1317
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1318 1319 1320 1321
	if (flush)
		kvm_mmu_flush_tlb(vcpu);
}

1322 1323 1324
struct mmu_page_path {
	struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
	unsigned int idx[PT64_ROOT_LEVEL-1];
1325 1326
};

1327 1328 1329 1330 1331 1332
#define for_each_sp(pvec, sp, parents, i)			\
		for (i = mmu_pages_next(&pvec, &parents, -1),	\
			sp = pvec.page[i].sp;			\
			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
			i = mmu_pages_next(&pvec, &parents, i))

1333 1334 1335
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
			  struct mmu_page_path *parents,
			  int i)
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
{
	int n;

	for (n = i+1; n < pvec->nr; n++) {
		struct kvm_mmu_page *sp = pvec->page[n].sp;

		if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
			parents->idx[0] = pvec->page[n].idx;
			return n;
		}

		parents->parent[sp->role.level-2] = sp;
		parents->idx[sp->role.level-1] = pvec->page[n].idx;
	}

	return n;
}

1354
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1355
{
1356 1357 1358 1359 1360
	struct kvm_mmu_page *sp;
	unsigned int level = 0;

	do {
		unsigned int idx = parents->idx[level];
1361

1362 1363 1364 1365 1366 1367 1368 1369 1370
		sp = parents->parent[level];
		if (!sp)
			return;

		--sp->unsync_children;
		WARN_ON((int)sp->unsync_children < 0);
		__clear_bit(idx, sp->unsync_child_bitmap);
		level++;
	} while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1371 1372
}

1373 1374 1375
static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
			       struct mmu_page_path *parents,
			       struct kvm_mmu_pages *pvec)
1376
{
1377 1378 1379
	parents->parent[parent->role.level-1] = NULL;
	pvec->nr = 0;
}
1380

1381 1382 1383 1384 1385 1386 1387
static void mmu_sync_children(struct kvm_vcpu *vcpu,
			      struct kvm_mmu_page *parent)
{
	int i;
	struct kvm_mmu_page *sp;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
1388
	LIST_HEAD(invalid_list);
1389 1390 1391

	kvm_mmu_pages_init(parent, &parents, &pages);
	while (mmu_unsync_walk(parent, &pages)) {
1392 1393 1394 1395 1396 1397 1398 1399
		int protected = 0;

		for_each_sp(pages, sp, parents, i)
			protected |= rmap_write_protect(vcpu->kvm, sp->gfn);

		if (protected)
			kvm_flush_remote_tlbs(vcpu->kvm);

1400
		for_each_sp(pages, sp, parents, i) {
1401
			kvm_sync_page(vcpu, sp, &invalid_list);
1402 1403
			mmu_pages_clear_parents(&parents);
		}
1404
		kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1405
		cond_resched_lock(&vcpu->kvm->mmu_lock);
1406 1407
		kvm_mmu_pages_init(parent, &parents, &pages);
	}
1408 1409
}

1410 1411 1412 1413
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
1414
					     int direct,
1415
					     unsigned access,
1416
					     u64 *parent_pte)
1417 1418 1419
{
	union kvm_mmu_page_role role;
	unsigned quadrant;
1420
	struct kvm_mmu_page *sp;
1421
	struct hlist_node *node;
1422
	bool need_sync = false;
1423

1424
	role = vcpu->arch.mmu.base_role;
1425
	role.level = level;
1426
	role.direct = direct;
1427
	if (role.direct)
1428
		role.cr4_pae = 0;
1429
	role.access = access;
1430
	if (!tdp_enabled && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1431 1432 1433 1434
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
1435
	for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
1436 1437
		if (!need_sync && sp->unsync)
			need_sync = true;
1438

1439 1440
		if (sp->role.word != role.word)
			continue;
1441

1442 1443
		if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
			break;
1444

1445 1446
		mmu_page_add_parent_pte(vcpu, sp, parent_pte);
		if (sp->unsync_children) {
1447
			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1448 1449 1450
			kvm_mmu_mark_parents_unsync(sp);
		} else if (sp->unsync)
			kvm_mmu_mark_parents_unsync(sp);
1451

1452 1453 1454
		trace_kvm_mmu_get_page(sp, false);
		return sp;
	}
A
Avi Kivity 已提交
1455
	++vcpu->kvm->stat.mmu_cache_miss;
1456
	sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
1457 1458 1459 1460
	if (!sp)
		return sp;
	sp->gfn = gfn;
	sp->role = role;
1461 1462
	hlist_add_head(&sp->hash_link,
		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
1463
	if (!direct) {
1464 1465
		if (rmap_write_protect(vcpu->kvm, gfn))
			kvm_flush_remote_tlbs(vcpu->kvm);
1466 1467 1468
		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
			kvm_sync_pages(vcpu, gfn);

1469 1470
		account_shadowed(vcpu->kvm, gfn);
	}
1471 1472 1473 1474
	if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
		vcpu->arch.mmu.prefetch_page(vcpu, sp);
	else
		nonpaging_prefetch_page(vcpu, sp);
A
Avi Kivity 已提交
1475
	trace_kvm_mmu_get_page(sp, true);
1476
	return sp;
1477 1478
}

1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
			     struct kvm_vcpu *vcpu, u64 addr)
{
	iterator->addr = addr;
	iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
	iterator->level = vcpu->arch.mmu.shadow_root_level;
	if (iterator->level == PT32E_ROOT_LEVEL) {
		iterator->shadow_addr
			= vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
		--iterator->level;
		if (!iterator->shadow_addr)
			iterator->level = 0;
	}
}

static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
	if (iterator->level < PT_PAGE_TABLE_LEVEL)
		return false;
1499 1500 1501 1502 1503

	if (iterator->level == PT_PAGE_TABLE_LEVEL)
		if (is_large_pte(*iterator->sptep))
			return false;

1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
	return true;
}

static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
	iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
	--iterator->level;
}

1515 1516 1517 1518 1519 1520 1521
static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
{
	u64 spte;

	spte = __pa(sp->spt)
		| PT_PRESENT_MASK | PT_ACCESSED_MASK
		| PT_WRITABLE_MASK | PT_USER_MASK;
1522
	__set_spte(sptep, spte);
1523 1524
}

1525 1526 1527 1528 1529 1530 1531 1532
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
	if (is_large_pte(*sptep)) {
		drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
		kvm_flush_remote_tlbs(vcpu->kvm);
	}
}

1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
				   unsigned direct_access)
{
	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
		struct kvm_mmu_page *child;

		/*
		 * For the direct sp, if the guest pte's dirty bit
		 * changed form clean to dirty, it will corrupt the
		 * sp's access: allow writable in the read-only sp,
		 * so we should update the spte at this point to get
		 * a new sp with the correct access.
		 */
		child = page_header(*sptep & PT64_BASE_ADDR_MASK);
		if (child->role.access == direct_access)
			return;

		mmu_page_remove_parent_pte(child, sptep);
		__set_spte(sptep, shadow_trap_nonpresent_pte);
		kvm_flush_remote_tlbs(vcpu->kvm);
	}
}

1556
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1557
					 struct kvm_mmu_page *sp)
1558
{
1559 1560 1561 1562
	unsigned i;
	u64 *pt;
	u64 ent;

1563
	pt = sp->spt;
1564 1565 1566 1567

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		ent = pt[i];

M
Marcelo Tosatti 已提交
1568
		if (is_shadow_present_pte(ent)) {
1569
			if (!is_last_spte(ent, sp->role.level)) {
M
Marcelo Tosatti 已提交
1570 1571 1572 1573
				ent &= PT64_BASE_ADDR_MASK;
				mmu_page_remove_parent_pte(page_header(ent),
							   &pt[i]);
			} else {
1574 1575
				if (is_large_pte(ent))
					--kvm->stat.lpages;
A
Avi Kivity 已提交
1576 1577
				drop_spte(kvm, &pt[i],
					  shadow_trap_nonpresent_pte);
M
Marcelo Tosatti 已提交
1578 1579
			}
		}
1580
		pt[i] = shadow_trap_nonpresent_pte;
1581
	}
1582 1583
}

1584
static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1585
{
1586
	mmu_page_remove_parent_pte(sp, parent_pte);
1587 1588
}

1589 1590 1591
static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
{
	int i;
1592
	struct kvm_vcpu *vcpu;
1593

1594 1595
	kvm_for_each_vcpu(i, vcpu, kvm)
		vcpu->arch.last_pte_updated = NULL;
1596 1597
}

1598
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1599 1600 1601
{
	u64 *parent_pte;

1602 1603 1604
	while (sp->multimapped || sp->parent_pte) {
		if (!sp->multimapped)
			parent_pte = sp->parent_pte;
1605 1606 1607
		else {
			struct kvm_pte_chain *chain;

1608
			chain = container_of(sp->parent_ptes.first,
1609 1610 1611
					     struct kvm_pte_chain, link);
			parent_pte = chain->parent_ptes[0];
		}
1612
		BUG_ON(!parent_pte);
1613
		kvm_mmu_put_page(sp, parent_pte);
A
Avi Kivity 已提交
1614
		__set_spte(parent_pte, shadow_trap_nonpresent_pte);
1615
	}
1616 1617
}

1618
static int mmu_zap_unsync_children(struct kvm *kvm,
1619 1620
				   struct kvm_mmu_page *parent,
				   struct list_head *invalid_list)
1621
{
1622 1623 1624
	int i, zapped = 0;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
1625

1626
	if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1627
		return 0;
1628 1629 1630 1631 1632 1633

	kvm_mmu_pages_init(parent, &parents, &pages);
	while (mmu_unsync_walk(parent, &pages)) {
		struct kvm_mmu_page *sp;

		for_each_sp(pages, sp, parents, i) {
1634
			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
1635
			mmu_pages_clear_parents(&parents);
1636
			zapped++;
1637 1638 1639 1640 1641
		}
		kvm_mmu_pages_init(parent, &parents, &pages);
	}

	return zapped;
1642 1643
}

1644 1645
static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				    struct list_head *invalid_list)
1646
{
1647
	int ret;
A
Avi Kivity 已提交
1648

1649
	trace_kvm_mmu_prepare_zap_page(sp);
1650
	++kvm->stat.mmu_shadow_zapped;
1651
	ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
1652
	kvm_mmu_page_unlink_children(kvm, sp);
1653
	kvm_mmu_unlink_parents(kvm, sp);
1654
	if (!sp->role.invalid && !sp->role.direct)
A
Avi Kivity 已提交
1655
		unaccount_shadowed(kvm, sp->gfn);
1656 1657
	if (sp->unsync)
		kvm_unlink_unsync_page(kvm, sp);
1658
	if (!sp->root_count) {
1659 1660
		/* Count self */
		ret++;
1661
		list_move(&sp->link, invalid_list);
1662
	} else {
A
Avi Kivity 已提交
1663
		list_move(&sp->link, &kvm->arch.active_mmu_pages);
1664 1665
		kvm_reload_remote_mmus(kvm);
	}
1666 1667

	sp->role.invalid = 1;
1668
	kvm_mmu_reset_last_pte_updated(kvm);
1669
	return ret;
1670 1671
}

1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list)
{
	struct kvm_mmu_page *sp;

	if (list_empty(invalid_list))
		return;

	kvm_flush_remote_tlbs(kvm);

	do {
		sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
		WARN_ON(!sp->role.invalid || sp->root_count);
		kvm_mmu_free_page(kvm, sp);
	} while (!list_empty(invalid_list));

}

1690 1691 1692 1693 1694 1695
/*
 * Changing the number of mmu pages allocated to the vm
 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
 */
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
{
1696
	int used_pages;
1697
	LIST_HEAD(invalid_list);
1698

1699
	used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm);
1700 1701
	used_pages = max(0, used_pages);

1702 1703 1704 1705 1706 1707
	/*
	 * If we set the number of mmu pages to be smaller be than the
	 * number of actived pages , we must to free some mmu pages before we
	 * change the value
	 */

1708
	if (used_pages > kvm_nr_mmu_pages) {
1709 1710
		while (used_pages > kvm_nr_mmu_pages &&
			!list_empty(&kvm->arch.active_mmu_pages)) {
1711 1712
			struct kvm_mmu_page *page;

1713
			page = container_of(kvm->arch.active_mmu_pages.prev,
1714
					    struct kvm_mmu_page, link);
1715 1716
			used_pages -= kvm_mmu_prepare_zap_page(kvm, page,
							       &invalid_list);
1717
		}
1718
		kvm_mmu_commit_zap_page(kvm, &invalid_list);
1719
		kvm_nr_mmu_pages = used_pages;
1720
		kvm->arch.n_free_mmu_pages = 0;
1721 1722
	}
	else
1723
		kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1724
					 - kvm->arch.n_max_mmu_pages;
1725

1726
	kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages;
1727 1728
}

1729
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1730
{
1731
	struct kvm_mmu_page *sp;
1732
	struct hlist_node *node;
1733
	LIST_HEAD(invalid_list);
1734 1735
	int r;

1736
	pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1737
	r = 0;
1738 1739

	for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1740 1741 1742
		pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
			 sp->role.word);
		r = 1;
1743
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1744
	}
1745
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
1746
	return r;
1747 1748
}

1749
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1750
{
1751
	struct kvm_mmu_page *sp;
1752
	struct hlist_node *node;
1753
	LIST_HEAD(invalid_list);
1754

1755
	for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1756 1757
		pgprintk("%s: zap %lx %x\n",
			 __func__, gfn, sp->role.word);
1758
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1759
	}
1760
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
1761 1762
}

1763
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
A
Avi Kivity 已提交
1764
{
1765
	int slot = memslot_id(kvm, gfn);
1766
	struct kvm_mmu_page *sp = page_header(__pa(pte));
A
Avi Kivity 已提交
1767

1768
	__set_bit(slot, sp->slot_bitmap);
A
Avi Kivity 已提交
1769 1770
}

1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
static void mmu_convert_notrap(struct kvm_mmu_page *sp)
{
	int i;
	u64 *pt = sp->spt;

	if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
		return;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		if (pt[i] == shadow_notrap_nonpresent_pte)
A
Avi Kivity 已提交
1781
			__set_spte(&pt[i], shadow_trap_nonpresent_pte);
1782 1783 1784
	}
}

1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
/*
 * The function is based on mtrr_type_lookup() in
 * arch/x86/kernel/cpu/mtrr/generic.c
 */
static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
			 u64 start, u64 end)
{
	int i;
	u64 base, mask;
	u8 prev_match, curr_match;
	int num_var_ranges = KVM_NR_VAR_MTRR;

	if (!mtrr_state->enabled)
		return 0xFF;

	/* Make end inclusive end, instead of exclusive */
	end--;

	/* Look in fixed ranges. Just return the type as per start */
	if (mtrr_state->have_fixed && (start < 0x100000)) {
		int idx;

		if (start < 0x80000) {
			idx = 0;
			idx += (start >> 16);
			return mtrr_state->fixed_ranges[idx];
		} else if (start < 0xC0000) {
			idx = 1 * 8;
			idx += ((start - 0x80000) >> 14);
			return mtrr_state->fixed_ranges[idx];
		} else if (start < 0x1000000) {
			idx = 3 * 8;
			idx += ((start - 0xC0000) >> 12);
			return mtrr_state->fixed_ranges[idx];
		}
	}

	/*
	 * Look in variable ranges
	 * Look of multiple ranges matching this address and pick type
	 * as per MTRR precedence
	 */
	if (!(mtrr_state->enabled & 2))
		return mtrr_state->def_type;

	prev_match = 0xFF;
	for (i = 0; i < num_var_ranges; ++i) {
		unsigned short start_state, end_state;

		if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
			continue;

		base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
		       (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
		mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
		       (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);

		start_state = ((start & mask) == (base & mask));
		end_state = ((end & mask) == (base & mask));
		if (start_state != end_state)
			return 0xFE;

		if ((start & mask) != (base & mask))
			continue;

		curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
		if (prev_match == 0xFF) {
			prev_match = curr_match;
			continue;
		}

		if (prev_match == MTRR_TYPE_UNCACHABLE ||
		    curr_match == MTRR_TYPE_UNCACHABLE)
			return MTRR_TYPE_UNCACHABLE;

		if ((prev_match == MTRR_TYPE_WRBACK &&
		     curr_match == MTRR_TYPE_WRTHROUGH) ||
		    (prev_match == MTRR_TYPE_WRTHROUGH &&
		     curr_match == MTRR_TYPE_WRBACK)) {
			prev_match = MTRR_TYPE_WRTHROUGH;
			curr_match = MTRR_TYPE_WRTHROUGH;
		}

		if (prev_match != curr_match)
			return MTRR_TYPE_UNCACHABLE;
	}

	if (prev_match != 0xFF)
		return prev_match;

	return mtrr_state->def_type;
}

1878
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1879 1880 1881 1882 1883 1884 1885 1886 1887
{
	u8 mtrr;

	mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
			     (gfn << PAGE_SHIFT) + PAGE_SIZE);
	if (mtrr == 0xfe || mtrr == 0xff)
		mtrr = MTRR_TYPE_WRBACK;
	return mtrr;
}
1888
EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1889

1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
	trace_kvm_mmu_unsync_page(sp);
	++vcpu->kvm->stat.mmu_unsync;
	sp->unsync = 1;

	kvm_mmu_mark_parents_unsync(sp);
	mmu_convert_notrap(sp);
}

static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
1901 1902
{
	struct kvm_mmu_page *s;
1903
	struct hlist_node *node;
1904

1905
	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1906
		if (s->unsync)
1907
			continue;
1908 1909
		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
		__kvm_unsync_page(vcpu, s);
1910 1911 1912 1913 1914 1915
	}
}

static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
				  bool can_unsync)
{
1916
	struct kvm_mmu_page *s;
1917
	struct hlist_node *node;
1918 1919
	bool need_unsync = false;

1920
	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1921 1922 1923
		if (!can_unsync)
			return 1;

1924
		if (s->role.level != PT_PAGE_TABLE_LEVEL)
1925
			return 1;
1926 1927

		if (!need_unsync && !s->unsync) {
1928
			if (!oos_shadow)
1929 1930 1931
				return 1;
			need_unsync = true;
		}
1932
	}
1933 1934
	if (need_unsync)
		kvm_unsync_pages(vcpu, gfn);
1935 1936 1937
	return 0;
}

A
Avi Kivity 已提交
1938
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
M
Marcelo Tosatti 已提交
1939
		    unsigned pte_access, int user_fault,
1940
		    int write_fault, int dirty, int level,
1941
		    gfn_t gfn, pfn_t pfn, bool speculative,
1942
		    bool can_unsync, bool reset_host_protection)
1943 1944
{
	u64 spte;
M
Marcelo Tosatti 已提交
1945
	int ret = 0;
S
Sheng Yang 已提交
1946

1947 1948 1949 1950 1951
	/*
	 * We don't set the accessed bit, since we sometimes want to see
	 * whether the guest actually used the pte (in order to detect
	 * demand paging).
	 */
1952
	spte = shadow_base_present_pte;
1953
	if (!speculative)
1954
		spte |= shadow_accessed_mask;
1955 1956
	if (!dirty)
		pte_access &= ~ACC_WRITE_MASK;
S
Sheng Yang 已提交
1957 1958 1959 1960
	if (pte_access & ACC_EXEC_MASK)
		spte |= shadow_x_mask;
	else
		spte |= shadow_nx_mask;
1961
	if (pte_access & ACC_USER_MASK)
S
Sheng Yang 已提交
1962
		spte |= shadow_user_mask;
1963
	if (level > PT_PAGE_TABLE_LEVEL)
M
Marcelo Tosatti 已提交
1964
		spte |= PT_PAGE_SIZE_MASK;
1965 1966 1967
	if (tdp_enabled)
		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
			kvm_is_mmio_pfn(pfn));
1968

1969 1970 1971
	if (reset_host_protection)
		spte |= SPTE_HOST_WRITEABLE;

1972
	spte |= (u64)pfn << PAGE_SHIFT;
1973 1974

	if ((pte_access & ACC_WRITE_MASK)
1975 1976
	    || (!tdp_enabled && write_fault && !is_write_protection(vcpu)
		&& !user_fault)) {
1977

1978 1979
		if (level > PT_PAGE_TABLE_LEVEL &&
		    has_wrprotected_page(vcpu->kvm, gfn, level)) {
1980
			ret = 1;
A
Avi Kivity 已提交
1981 1982
			drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
			goto done;
1983 1984
		}

1985 1986
		spte |= PT_WRITABLE_MASK;

1987 1988 1989
		if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
			spte &= ~PT_USER_MASK;

1990 1991 1992 1993 1994 1995
		/*
		 * Optimization: for pte sync, if spte was writable the hash
		 * lookup is unnecessary (and expensive). Write protection
		 * is responsibility of mmu_get_page / kvm_sync_page.
		 * Same reasoning can be applied to dirty page accounting.
		 */
1996
		if (!can_unsync && is_writable_pte(*sptep))
1997 1998
			goto set_pte;

1999
		if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2000
			pgprintk("%s: found shadow page for %lx, marking ro\n",
2001
				 __func__, gfn);
M
Marcelo Tosatti 已提交
2002
			ret = 1;
2003
			pte_access &= ~ACC_WRITE_MASK;
2004
			if (is_writable_pte(spte))
2005 2006 2007 2008 2009 2010 2011
				spte &= ~PT_WRITABLE_MASK;
		}
	}

	if (pte_access & ACC_WRITE_MASK)
		mark_page_dirty(vcpu->kvm, gfn);

2012
set_pte:
2013
	update_spte(sptep, spte);
A
Avi Kivity 已提交
2014
done:
M
Marcelo Tosatti 已提交
2015 2016 2017
	return ret;
}

A
Avi Kivity 已提交
2018
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
M
Marcelo Tosatti 已提交
2019 2020
			 unsigned pt_access, unsigned pte_access,
			 int user_fault, int write_fault, int dirty,
2021
			 int *ptwrite, int level, gfn_t gfn,
2022 2023
			 pfn_t pfn, bool speculative,
			 bool reset_host_protection)
M
Marcelo Tosatti 已提交
2024 2025
{
	int was_rmapped = 0;
2026
	int rmap_count;
M
Marcelo Tosatti 已提交
2027 2028 2029

	pgprintk("%s: spte %llx access %x write_fault %d"
		 " user_fault %d gfn %lx\n",
A
Avi Kivity 已提交
2030
		 __func__, *sptep, pt_access,
M
Marcelo Tosatti 已提交
2031 2032
		 write_fault, user_fault, gfn);

A
Avi Kivity 已提交
2033
	if (is_rmap_spte(*sptep)) {
M
Marcelo Tosatti 已提交
2034 2035 2036 2037
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
2038 2039
		if (level > PT_PAGE_TABLE_LEVEL &&
		    !is_large_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2040
			struct kvm_mmu_page *child;
A
Avi Kivity 已提交
2041
			u64 pte = *sptep;
M
Marcelo Tosatti 已提交
2042 2043

			child = page_header(pte & PT64_BASE_ADDR_MASK);
A
Avi Kivity 已提交
2044
			mmu_page_remove_parent_pte(child, sptep);
2045 2046
			__set_spte(sptep, shadow_trap_nonpresent_pte);
			kvm_flush_remote_tlbs(vcpu->kvm);
A
Avi Kivity 已提交
2047
		} else if (pfn != spte_to_pfn(*sptep)) {
M
Marcelo Tosatti 已提交
2048
			pgprintk("hfn old %lx new %lx\n",
A
Avi Kivity 已提交
2049
				 spte_to_pfn(*sptep), pfn);
A
Avi Kivity 已提交
2050
			drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2051
			kvm_flush_remote_tlbs(vcpu->kvm);
2052 2053
		} else
			was_rmapped = 1;
M
Marcelo Tosatti 已提交
2054
	}
2055

A
Avi Kivity 已提交
2056
	if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
2057 2058
		      dirty, level, gfn, pfn, speculative, true,
		      reset_host_protection)) {
M
Marcelo Tosatti 已提交
2059 2060
		if (write_fault)
			*ptwrite = 1;
2061
		kvm_mmu_flush_tlb(vcpu);
2062
	}
M
Marcelo Tosatti 已提交
2063

A
Avi Kivity 已提交
2064
	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
M
Marcelo Tosatti 已提交
2065
	pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
A
Avi Kivity 已提交
2066
		 is_large_pte(*sptep)? "2MB" : "4kB",
2067 2068
		 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
		 *sptep, sptep);
A
Avi Kivity 已提交
2069
	if (!was_rmapped && is_large_pte(*sptep))
M
Marcelo Tosatti 已提交
2070 2071
		++vcpu->kvm->stat.lpages;

A
Avi Kivity 已提交
2072
	page_header_update_slot(vcpu->kvm, sptep, gfn);
2073
	if (!was_rmapped) {
2074
		rmap_count = rmap_add(vcpu, sptep, gfn);
2075
		if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2076
			rmap_recycle(vcpu, sptep, gfn);
2077
	}
2078
	kvm_release_pfn_clean(pfn);
2079
	if (speculative) {
A
Avi Kivity 已提交
2080
		vcpu->arch.last_pte_updated = sptep;
2081 2082
		vcpu->arch.last_pte_gfn = gfn;
	}
2083 2084
}

A
Avi Kivity 已提交
2085 2086 2087 2088
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}

2089
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2090
			int level, gfn_t gfn, pfn_t pfn)
2091
{
2092
	struct kvm_shadow_walk_iterator iterator;
2093
	struct kvm_mmu_page *sp;
2094
	int pt_write = 0;
2095
	gfn_t pseudo_gfn;
A
Avi Kivity 已提交
2096

2097
	for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
2098
		if (iterator.level == level) {
2099 2100
			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
				     0, write, 1, &pt_write,
2101
				     level, gfn, pfn, false, true);
2102 2103
			++vcpu->stat.pf_fixed;
			break;
A
Avi Kivity 已提交
2104 2105
		}

2106
		if (*iterator.sptep == shadow_trap_nonpresent_pte) {
2107 2108 2109 2110
			u64 base_addr = iterator.addr;

			base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
			pseudo_gfn = base_addr >> PAGE_SHIFT;
2111 2112 2113 2114 2115 2116 2117 2118
			sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
					      iterator.level - 1,
					      1, ACC_ALL, iterator.sptep);
			if (!sp) {
				pgprintk("nonpaging_map: ENOMEM\n");
				kvm_release_pfn_clean(pfn);
				return -ENOMEM;
			}
2119

A
Avi Kivity 已提交
2120 2121 2122 2123
			__set_spte(iterator.sptep,
				   __pa(sp->spt)
				   | PT_PRESENT_MASK | PT_WRITABLE_MASK
				   | shadow_user_mask | shadow_x_mask);
2124 2125 2126
		}
	}
	return pt_write;
A
Avi Kivity 已提交
2127 2128
}

2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
static void kvm_send_hwpoison_signal(struct kvm *kvm, gfn_t gfn)
{
	char buf[1];
	void __user *hva;
	int r;

	/* Touch the page, so send SIGBUS */
	hva = (void __user *)gfn_to_hva(kvm, gfn);
	r = copy_from_user(buf, hva, 1);
}

static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
{
	kvm_release_pfn_clean(pfn);
	if (is_hwpoison_pfn(pfn)) {
		kvm_send_hwpoison_signal(kvm, gfn);
		return 0;
2146 2147 2148
	} else if (is_fault_pfn(pfn))
		return -EFAULT;

2149 2150 2151
	return 1;
}

2152 2153 2154
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
{
	int r;
2155
	int level;
2156
	pfn_t pfn;
2157
	unsigned long mmu_seq;
2158

2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
	level = mapping_level(vcpu, gfn);

	/*
	 * This path builds a PAE pagetable - so we can map 2mb pages at
	 * maximum. Therefore check if the level is larger than that.
	 */
	if (level > PT_DIRECTORY_LEVEL)
		level = PT_DIRECTORY_LEVEL;

	gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
M
Marcelo Tosatti 已提交
2169

2170
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
2171
	smp_rmb();
2172
	pfn = gfn_to_pfn(vcpu->kvm, gfn);
2173

2174
	/* mmio */
2175 2176
	if (is_error_pfn(pfn))
		return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2177

2178
	spin_lock(&vcpu->kvm->mmu_lock);
2179 2180
	if (mmu_notifier_retry(vcpu, mmu_seq))
		goto out_unlock;
2181
	kvm_mmu_free_some_pages(vcpu);
2182
	r = __direct_map(vcpu, v, write, level, gfn, pfn);
2183 2184 2185
	spin_unlock(&vcpu->kvm->mmu_lock);


2186
	return r;
2187 2188 2189 2190 2191

out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
	return 0;
2192 2193 2194
}


2195 2196 2197
static void mmu_free_roots(struct kvm_vcpu *vcpu)
{
	int i;
2198
	struct kvm_mmu_page *sp;
2199
	LIST_HEAD(invalid_list);
2200

2201
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
A
Avi Kivity 已提交
2202
		return;
2203
	spin_lock(&vcpu->kvm->mmu_lock);
2204 2205
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
2206

2207 2208
		sp = page_header(root);
		--sp->root_count;
2209 2210 2211 2212
		if (!sp->root_count && sp->role.invalid) {
			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
			kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
		}
2213
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2214
		spin_unlock(&vcpu->kvm->mmu_lock);
2215 2216 2217
		return;
	}
	for (i = 0; i < 4; ++i) {
2218
		hpa_t root = vcpu->arch.mmu.pae_root[i];
2219

A
Avi Kivity 已提交
2220 2221
		if (root) {
			root &= PT64_BASE_ADDR_MASK;
2222 2223
			sp = page_header(root);
			--sp->root_count;
2224
			if (!sp->root_count && sp->role.invalid)
2225 2226
				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
							 &invalid_list);
A
Avi Kivity 已提交
2227
		}
2228
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2229
	}
2230
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2231
	spin_unlock(&vcpu->kvm->mmu_lock);
2232
	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2233 2234
}

2235 2236 2237 2238 2239
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
	int ret = 0;

	if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
2240
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2241 2242 2243 2244 2245 2246 2247
		ret = 1;
	}

	return ret;
}

static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2248 2249
{
	int i;
2250
	gfn_t root_gfn;
2251
	struct kvm_mmu_page *sp;
2252
	int direct = 0;
A
Avi Kivity 已提交
2253
	u64 pdptr;
2254

2255
	root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
2256

2257 2258
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
2259 2260

		ASSERT(!VALID_PAGE(root));
2261 2262
		if (mmu_check_root(vcpu, root_gfn))
			return 1;
2263 2264 2265 2266
		if (tdp_enabled) {
			direct = 1;
			root_gfn = 0;
		}
2267
		spin_lock(&vcpu->kvm->mmu_lock);
2268
		kvm_mmu_free_some_pages(vcpu);
2269
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
2270
				      PT64_ROOT_LEVEL, direct,
2271
				      ACC_ALL, NULL);
2272 2273
		root = __pa(sp->spt);
		++sp->root_count;
2274
		spin_unlock(&vcpu->kvm->mmu_lock);
2275
		vcpu->arch.mmu.root_hpa = root;
2276
		return 0;
2277
	}
2278
	direct = !is_paging(vcpu);
2279
	for (i = 0; i < 4; ++i) {
2280
		hpa_t root = vcpu->arch.mmu.pae_root[i];
2281 2282

		ASSERT(!VALID_PAGE(root));
2283
		if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
A
Avi Kivity 已提交
2284
			pdptr = kvm_pdptr_read(vcpu, i);
2285
			if (!is_present_gpte(pdptr)) {
2286
				vcpu->arch.mmu.pae_root[i] = 0;
A
Avi Kivity 已提交
2287 2288
				continue;
			}
A
Avi Kivity 已提交
2289
			root_gfn = pdptr >> PAGE_SHIFT;
2290
		} else if (vcpu->arch.mmu.root_level == 0)
2291
			root_gfn = 0;
2292 2293
		if (mmu_check_root(vcpu, root_gfn))
			return 1;
2294 2295 2296 2297
		if (tdp_enabled) {
			direct = 1;
			root_gfn = i << 30;
		}
2298
		spin_lock(&vcpu->kvm->mmu_lock);
2299
		kvm_mmu_free_some_pages(vcpu);
2300
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2301
				      PT32_ROOT_LEVEL, direct,
2302
				      ACC_ALL, NULL);
2303 2304
		root = __pa(sp->spt);
		++sp->root_count;
2305 2306
		spin_unlock(&vcpu->kvm->mmu_lock);

2307
		vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2308
	}
2309
	vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2310
	return 0;
2311 2312
}

2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
static void mmu_sync_roots(struct kvm_vcpu *vcpu)
{
	int i;
	struct kvm_mmu_page *sp;

	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return;
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
		sp = page_header(root);
		mmu_sync_children(vcpu, sp);
		return;
	}
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->arch.mmu.pae_root[i];

2329
		if (root && VALID_PAGE(root)) {
2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
			root &= PT64_BASE_ADDR_MASK;
			sp = page_header(root);
			mmu_sync_children(vcpu, sp);
		}
	}
}

void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
{
	spin_lock(&vcpu->kvm->mmu_lock);
	mmu_sync_roots(vcpu);
2341
	spin_unlock(&vcpu->kvm->mmu_lock);
2342 2343
}

2344 2345
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
				  u32 access, u32 *error)
A
Avi Kivity 已提交
2346
{
2347 2348
	if (error)
		*error = 0;
A
Avi Kivity 已提交
2349 2350 2351 2352
	return vaddr;
}

static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
A
Avi Kivity 已提交
2353
				u32 error_code)
A
Avi Kivity 已提交
2354
{
2355
	gfn_t gfn;
2356
	int r;
A
Avi Kivity 已提交
2357

2358
	pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2359 2360 2361
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
2362

A
Avi Kivity 已提交
2363
	ASSERT(vcpu);
2364
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
2365

2366
	gfn = gva >> PAGE_SHIFT;
A
Avi Kivity 已提交
2367

2368 2369
	return nonpaging_map(vcpu, gva & PAGE_MASK,
			     error_code & PFERR_WRITE_MASK, gfn);
A
Avi Kivity 已提交
2370 2371
}

2372 2373 2374
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
				u32 error_code)
{
2375
	pfn_t pfn;
2376
	int r;
2377
	int level;
M
Marcelo Tosatti 已提交
2378
	gfn_t gfn = gpa >> PAGE_SHIFT;
2379
	unsigned long mmu_seq;
2380 2381 2382 2383 2384 2385 2386 2387

	ASSERT(vcpu);
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

2388 2389 2390 2391
	level = mapping_level(vcpu, gfn);

	gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);

2392
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
2393
	smp_rmb();
2394
	pfn = gfn_to_pfn(vcpu->kvm, gfn);
2395 2396
	if (is_error_pfn(pfn))
		return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2397
	spin_lock(&vcpu->kvm->mmu_lock);
2398 2399
	if (mmu_notifier_retry(vcpu, mmu_seq))
		goto out_unlock;
2400 2401
	kvm_mmu_free_some_pages(vcpu);
	r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2402
			 level, gfn, pfn);
2403 2404 2405
	spin_unlock(&vcpu->kvm->mmu_lock);

	return r;
2406 2407 2408 2409 2410

out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
	return 0;
2411 2412
}

A
Avi Kivity 已提交
2413 2414
static void nonpaging_free(struct kvm_vcpu *vcpu)
{
2415
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
2416 2417 2418 2419
}

static int nonpaging_init_context(struct kvm_vcpu *vcpu)
{
2420
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
2421 2422 2423 2424 2425

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
	context->free = nonpaging_free;
2426
	context->prefetch_page = nonpaging_prefetch_page;
2427
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
2428
	context->invlpg = nonpaging_invlpg;
2429
	context->root_level = 0;
A
Avi Kivity 已提交
2430
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
2431
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
2432 2433 2434
	return 0;
}

2435
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2436
{
A
Avi Kivity 已提交
2437
	++vcpu->stat.tlb_flush;
2438
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
A
Avi Kivity 已提交
2439 2440 2441 2442
}

static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
2443
	pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2444
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
2445 2446 2447 2448 2449 2450
}

static void inject_page_fault(struct kvm_vcpu *vcpu,
			      u64 addr,
			      u32 err_code)
{
2451
	kvm_inject_page_fault(vcpu, addr, err_code);
A
Avi Kivity 已提交
2452 2453 2454 2455 2456 2457 2458
}

static void paging_free(struct kvm_vcpu *vcpu)
{
	nonpaging_free(vcpu);
}

2459 2460 2461 2462 2463 2464 2465 2466
static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
{
	int bit7;

	bit7 = (gpte >> 7) & 1;
	return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
}

A
Avi Kivity 已提交
2467 2468 2469 2470 2471 2472 2473 2474
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
{
	struct kvm_mmu *context = &vcpu->arch.mmu;
	int maxphyaddr = cpuid_maxphyaddr(vcpu);
	u64 exb_bit_rsvd = 0;

	if (!is_nx(vcpu))
		exb_bit_rsvd = rsvd_bits(63, 63);
	switch (level) {
	case PT32_ROOT_LEVEL:
		/* no rsvd bits for 2 level 4K page table entries */
		context->rsvd_bits_mask[0][1] = 0;
		context->rsvd_bits_mask[0][0] = 0;
2488 2489 2490 2491 2492 2493 2494
		context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];

		if (!is_pse(vcpu)) {
			context->rsvd_bits_mask[1][1] = 0;
			break;
		}

2495 2496 2497 2498 2499 2500 2501 2502
		if (is_cpuid_PSE36())
			/* 36bits PSE 4MB page */
			context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
		else
			/* 32 bits PSE 4MB page */
			context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
		break;
	case PT32E_ROOT_LEVEL:
2503 2504 2505
		context->rsvd_bits_mask[0][2] =
			rsvd_bits(maxphyaddr, 63) |
			rsvd_bits(7, 8) | rsvd_bits(1, 2);	/* PDPTE */
2506
		context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2507
			rsvd_bits(maxphyaddr, 62);	/* PDE */
2508 2509 2510 2511 2512
		context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 62); 	/* PTE */
		context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 62) |
			rsvd_bits(13, 20);		/* large page */
2513
		context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2514 2515 2516 2517 2518 2519 2520
		break;
	case PT64_ROOT_LEVEL:
		context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
		context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
		context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2521
			rsvd_bits(maxphyaddr, 51);
2522 2523 2524
		context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51);
		context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2525 2526 2527
		context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51) |
			rsvd_bits(13, 29);
2528
		context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2529 2530
			rsvd_bits(maxphyaddr, 51) |
			rsvd_bits(13, 20);		/* large page */
2531
		context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2532 2533 2534 2535
		break;
	}
}

2536
static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
A
Avi Kivity 已提交
2537
{
2538
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
2539 2540 2541 2542 2543

	ASSERT(is_pae(vcpu));
	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
2544
	context->prefetch_page = paging64_prefetch_page;
2545
	context->sync_page = paging64_sync_page;
M
Marcelo Tosatti 已提交
2546
	context->invlpg = paging64_invlpg;
A
Avi Kivity 已提交
2547
	context->free = paging_free;
2548 2549
	context->root_level = level;
	context->shadow_root_level = level;
A
Avi Kivity 已提交
2550
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
2551 2552 2553
	return 0;
}

2554 2555
static int paging64_init_context(struct kvm_vcpu *vcpu)
{
2556
	reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2557 2558 2559
	return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
}

A
Avi Kivity 已提交
2560 2561
static int paging32_init_context(struct kvm_vcpu *vcpu)
{
2562
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
2563

2564
	reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
A
Avi Kivity 已提交
2565 2566 2567 2568
	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
	context->free = paging_free;
2569
	context->prefetch_page = paging32_prefetch_page;
2570
	context->sync_page = paging32_sync_page;
M
Marcelo Tosatti 已提交
2571
	context->invlpg = paging32_invlpg;
A
Avi Kivity 已提交
2572 2573
	context->root_level = PT32_ROOT_LEVEL;
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
2574
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
2575 2576 2577 2578 2579
	return 0;
}

static int paging32E_init_context(struct kvm_vcpu *vcpu)
{
2580
	reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2581
	return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
2582 2583
}

2584 2585 2586 2587 2588 2589 2590 2591
static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->arch.mmu;

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = tdp_page_fault;
	context->free = nonpaging_free;
	context->prefetch_page = nonpaging_prefetch_page;
2592
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
2593
	context->invlpg = nonpaging_invlpg;
2594
	context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2595 2596 2597 2598 2599 2600
	context->root_hpa = INVALID_PAGE;

	if (!is_paging(vcpu)) {
		context->gva_to_gpa = nonpaging_gva_to_gpa;
		context->root_level = 0;
	} else if (is_long_mode(vcpu)) {
2601
		reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2602 2603 2604
		context->gva_to_gpa = paging64_gva_to_gpa;
		context->root_level = PT64_ROOT_LEVEL;
	} else if (is_pae(vcpu)) {
2605
		reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2606 2607 2608
		context->gva_to_gpa = paging64_gva_to_gpa;
		context->root_level = PT32E_ROOT_LEVEL;
	} else {
2609
		reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2610 2611 2612 2613 2614 2615 2616 2617
		context->gva_to_gpa = paging32_gva_to_gpa;
		context->root_level = PT32_ROOT_LEVEL;
	}

	return 0;
}

static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2618
{
2619 2620
	int r;

A
Avi Kivity 已提交
2621
	ASSERT(vcpu);
2622
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
2623 2624

	if (!is_paging(vcpu))
2625
		r = nonpaging_init_context(vcpu);
A
Avi Kivity 已提交
2626
	else if (is_long_mode(vcpu))
2627
		r = paging64_init_context(vcpu);
A
Avi Kivity 已提交
2628
	else if (is_pae(vcpu))
2629
		r = paging32E_init_context(vcpu);
A
Avi Kivity 已提交
2630
	else
2631 2632
		r = paging32_init_context(vcpu);

2633
	vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
2634
	vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
2635 2636

	return r;
A
Avi Kivity 已提交
2637 2638
}

2639 2640
static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{
2641 2642
	vcpu->arch.update_pte.pfn = bad_pfn;

2643 2644 2645 2646 2647 2648
	if (tdp_enabled)
		return init_kvm_tdp_mmu(vcpu);
	else
		return init_kvm_softmmu(vcpu);
}

A
Avi Kivity 已提交
2649 2650 2651
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
2652 2653
	if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
		/* mmu.free() should set root_hpa = INVALID_PAGE */
2654
		vcpu->arch.mmu.free(vcpu);
A
Avi Kivity 已提交
2655 2656 2657
}

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2658 2659 2660 2661
{
	destroy_kvm_mmu(vcpu);
	return init_kvm_mmu(vcpu);
}
2662
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
2663 2664

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2665
{
2666 2667
	int r;

2668
	r = mmu_topup_memory_caches(vcpu);
A
Avi Kivity 已提交
2669 2670
	if (r)
		goto out;
2671
	r = mmu_alloc_roots(vcpu);
2672
	spin_lock(&vcpu->kvm->mmu_lock);
2673
	mmu_sync_roots(vcpu);
2674
	spin_unlock(&vcpu->kvm->mmu_lock);
2675 2676
	if (r)
		goto out;
2677
	/* set_cr3() should ensure TLB has been flushed */
2678
	kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2679 2680
out:
	return r;
A
Avi Kivity 已提交
2681
}
A
Avi Kivity 已提交
2682 2683 2684 2685 2686 2687
EXPORT_SYMBOL_GPL(kvm_mmu_load);

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
	mmu_free_roots(vcpu);
}
A
Avi Kivity 已提交
2688

2689
static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2690
				  struct kvm_mmu_page *sp,
2691 2692 2693 2694 2695 2696
				  u64 *spte)
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
2697
	if (is_shadow_present_pte(pte)) {
2698
		if (is_last_spte(pte, sp->role.level))
A
Avi Kivity 已提交
2699
			drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
2700 2701
		else {
			child = page_header(pte & PT64_BASE_ADDR_MASK);
2702
			mmu_page_remove_parent_pte(child, spte);
2703 2704
		}
	}
A
Avi Kivity 已提交
2705
	__set_spte(spte, shadow_trap_nonpresent_pte);
M
Marcelo Tosatti 已提交
2706 2707
	if (is_large_pte(pte))
		--vcpu->kvm->stat.lpages;
2708 2709
}

2710
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2711
				  struct kvm_mmu_page *sp,
2712
				  u64 *spte,
2713
				  const void *new)
2714
{
2715
	if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2716 2717
		++vcpu->kvm->stat.mmu_pde_zapped;
		return;
2718
        }
2719

2720 2721 2722
	if (is_rsvd_bits_set(vcpu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
		return;

A
Avi Kivity 已提交
2723
	++vcpu->kvm->stat.mmu_pte_updated;
2724
	if (!sp->role.cr4_pae)
2725
		paging32_update_pte(vcpu, sp, spte, new);
2726
	else
2727
		paging64_update_pte(vcpu, sp, spte, new);
2728 2729
}

2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
	old ^= PT64_NX_MASK;
	new ^= PT64_NX_MASK;
	return (old & ~new & PT64_PERM_MASK) != 0;
}

2743 2744
static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
				    bool remote_flush, bool local_flush)
2745
{
2746 2747 2748 2749
	if (zap_page)
		return;

	if (remote_flush)
2750
		kvm_flush_remote_tlbs(vcpu->kvm);
2751
	else if (local_flush)
2752 2753 2754
		kvm_mmu_flush_tlb(vcpu);
}

2755 2756
static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{
2757
	u64 *spte = vcpu->arch.last_pte_updated;
2758

S
Sheng Yang 已提交
2759
	return !!(spte && (*spte & shadow_accessed_mask));
2760 2761
}

2762
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2763
					  u64 gpte)
2764 2765
{
	gfn_t gfn;
2766
	pfn_t pfn;
2767

2768
	if (!is_present_gpte(gpte))
2769 2770
		return;
	gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2771

2772
	vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2773
	smp_rmb();
2774
	pfn = gfn_to_pfn(vcpu->kvm, gfn);
2775

2776 2777
	if (is_error_pfn(pfn)) {
		kvm_release_pfn_clean(pfn);
2778 2779
		return;
	}
2780
	vcpu->arch.update_pte.gfn = gfn;
2781
	vcpu->arch.update_pte.pfn = pfn;
2782 2783
}

2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795
static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	u64 *spte = vcpu->arch.last_pte_updated;

	if (spte
	    && vcpu->arch.last_pte_gfn == gfn
	    && shadow_accessed_mask
	    && !(*spte & shadow_accessed_mask)
	    && is_shadow_present_pte(*spte))
		set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
}

2796
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2797 2798
		       const u8 *new, int bytes,
		       bool guest_initiated)
2799
{
2800
	gfn_t gfn = gpa >> PAGE_SHIFT;
2801
	union kvm_mmu_page_role mask = { .word = 0 };
2802
	struct kvm_mmu_page *sp;
2803
	struct hlist_node *node;
2804
	LIST_HEAD(invalid_list);
2805
	u64 entry, gentry;
2806 2807
	u64 *spte;
	unsigned offset = offset_in_page(gpa);
2808
	unsigned pte_size;
2809
	unsigned page_offset;
2810
	unsigned misaligned;
2811
	unsigned quadrant;
2812
	int level;
2813
	int flooded = 0;
2814
	int npte;
2815
	int r;
2816
	int invlpg_counter;
2817 2818 2819
	bool remote_flush, local_flush, zap_page;

	zap_page = remote_flush = local_flush = false;
2820

2821
	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2822

2823
	invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
2824 2825 2826 2827 2828 2829 2830

	/*
	 * Assume that the pte write on a page table of the same type
	 * as the current vcpu paging mode.  This is nearly always true
	 * (might be false while changing modes).  Note it is verified later
	 * by update_pte().
	 */
2831
	if ((is_pae(vcpu) && bytes == 4) || !new) {
2832
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2833 2834 2835 2836 2837
		if (is_pae(vcpu)) {
			gpa &= ~(gpa_t)7;
			bytes = 8;
		}
		r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
2838 2839
		if (r)
			gentry = 0;
2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
		new = (const u8 *)&gentry;
	}

	switch (bytes) {
	case 4:
		gentry = *(const u32 *)new;
		break;
	case 8:
		gentry = *(const u64 *)new;
		break;
	default:
		gentry = 0;
		break;
2853 2854 2855
	}

	mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
2856
	spin_lock(&vcpu->kvm->mmu_lock);
2857 2858
	if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
		gentry = 0;
2859
	kvm_mmu_access_page(vcpu, gfn);
2860
	kvm_mmu_free_some_pages(vcpu);
A
Avi Kivity 已提交
2861
	++vcpu->kvm->stat.mmu_pte_write;
2862
	kvm_mmu_audit(vcpu, "pre pte write");
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873
	if (guest_initiated) {
		if (gfn == vcpu->arch.last_pt_write_gfn
		    && !last_updated_pte_accessed(vcpu)) {
			++vcpu->arch.last_pt_write_count;
			if (vcpu->arch.last_pt_write_count >= 3)
				flooded = 1;
		} else {
			vcpu->arch.last_pt_write_gfn = gfn;
			vcpu->arch.last_pt_write_count = 1;
			vcpu->arch.last_pte_updated = NULL;
		}
2874
	}
2875

2876
	mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
2877
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
2878
		pte_size = sp->role.cr4_pae ? 8 : 4;
2879
		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2880
		misaligned |= bytes < 4;
2881
		if (misaligned || flooded) {
2882 2883 2884 2885
			/*
			 * Misaligned accesses are too much trouble to fix
			 * up; also, they usually indicate a page is not used
			 * as a page table.
2886 2887 2888 2889 2890
			 *
			 * If we're seeing too many writes to a page,
			 * it may no longer be a page table, or we may be
			 * forking, in which case it is better to unmap the
			 * page.
2891 2892
			 */
			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2893
				 gpa, bytes, sp->role.word);
2894
			zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2895
						     &invalid_list);
A
Avi Kivity 已提交
2896
			++vcpu->kvm->stat.mmu_flooded;
2897 2898
			continue;
		}
2899
		page_offset = offset;
2900
		level = sp->role.level;
2901
		npte = 1;
2902
		if (!sp->role.cr4_pae) {
2903 2904 2905 2906 2907 2908 2909
			page_offset <<= 1;	/* 32->64 */
			/*
			 * A 32-bit pde maps 4MB while the shadow pdes map
			 * only 2MB.  So we need to double the offset again
			 * and zap two pdes instead of one.
			 */
			if (level == PT32_ROOT_LEVEL) {
2910
				page_offset &= ~7; /* kill rounding error */
2911 2912 2913
				page_offset <<= 1;
				npte = 2;
			}
2914
			quadrant = page_offset >> PAGE_SHIFT;
2915
			page_offset &= ~PAGE_MASK;
2916
			if (quadrant != sp->role.quadrant)
2917
				continue;
2918
		}
2919
		local_flush = true;
2920
		spte = &sp->spt[page_offset / sizeof(*spte)];
2921
		while (npte--) {
2922
			entry = *spte;
2923
			mmu_pte_write_zap_pte(vcpu, sp, spte);
2924 2925 2926
			if (gentry &&
			      !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
			      & mask.word))
2927
				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
2928 2929
			if (!remote_flush && need_remote_flush(entry, *spte))
				remote_flush = true;
2930
			++spte;
2931 2932
		}
	}
2933
	mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
2934
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2935
	kvm_mmu_audit(vcpu, "post pte write");
2936
	spin_unlock(&vcpu->kvm->mmu_lock);
2937 2938 2939
	if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
		kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
		vcpu->arch.update_pte.pfn = bad_pfn;
2940
	}
2941 2942
}

2943 2944
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
2945 2946
	gpa_t gpa;
	int r;
2947

2948 2949 2950
	if (tdp_enabled)
		return 0;

2951
	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2952

2953
	spin_lock(&vcpu->kvm->mmu_lock);
2954
	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2955
	spin_unlock(&vcpu->kvm->mmu_lock);
2956
	return r;
2957
}
2958
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2959

2960
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
2961
{
2962
	LIST_HEAD(invalid_list);
2963

2964
	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
2965
	       !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2966
		struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
2967

2968
		sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2969
				  struct kvm_mmu_page, link);
2970
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
A
Avi Kivity 已提交
2971
		++vcpu->kvm->stat.mmu_recycled;
A
Avi Kivity 已提交
2972
	}
2973
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
A
Avi Kivity 已提交
2974 2975
}

2976 2977 2978 2979 2980
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
{
	int r;
	enum emulation_result er;

2981
	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2982 2983 2984 2985 2986 2987 2988 2989
	if (r < 0)
		goto out;

	if (!r) {
		r = 1;
		goto out;
	}

2990 2991 2992 2993
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		goto out;

A
Avi Kivity 已提交
2994
	er = emulate_instruction(vcpu, cr2, error_code, 0);
2995 2996 2997 2998 2999 3000

	switch (er) {
	case EMULATE_DONE:
		return 1;
	case EMULATE_DO_MMIO:
		++vcpu->stat.mmio_exits;
3001
		/* fall through */
3002
	case EMULATE_FAIL:
3003
		return 0;
3004 3005 3006 3007 3008 3009 3010 3011
	default:
		BUG();
	}
out:
	return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

M
Marcelo Tosatti 已提交
3012 3013 3014 3015 3016 3017 3018 3019
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
	vcpu->arch.mmu.invlpg(vcpu, gva);
	kvm_mmu_flush_tlb(vcpu);
	++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

3020 3021 3022 3023 3024 3025
void kvm_enable_tdp(void)
{
	tdp_enabled = true;
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);

3026 3027 3028 3029 3030 3031
void kvm_disable_tdp(void)
{
	tdp_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_tdp);

A
Avi Kivity 已提交
3032 3033
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
3034
	free_page((unsigned long)vcpu->arch.mmu.pae_root);
A
Avi Kivity 已提交
3035 3036 3037 3038
}

static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
3039
	struct page *page;
A
Avi Kivity 已提交
3040 3041 3042 3043
	int i;

	ASSERT(vcpu);

3044 3045 3046 3047 3048 3049 3050
	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.
	 */
	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
	if (!page)
3051 3052
		return -ENOMEM;

3053
	vcpu->arch.mmu.pae_root = page_address(page);
3054
	for (i = 0; i < 4; ++i)
3055
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3056

A
Avi Kivity 已提交
3057 3058 3059
	return 0;
}

3060
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
3061 3062
{
	ASSERT(vcpu);
3063
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
3064

3065 3066
	return alloc_mmu_pages(vcpu);
}
A
Avi Kivity 已提交
3067

3068 3069 3070
int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
3071
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3072

3073
	return init_kvm_mmu(vcpu);
A
Avi Kivity 已提交
3074 3075 3076 3077 3078 3079 3080 3081
}

void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);

	destroy_kvm_mmu(vcpu);
	free_mmu_pages(vcpu);
3082
	mmu_free_memory_caches(vcpu);
A
Avi Kivity 已提交
3083 3084
}

3085
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
A
Avi Kivity 已提交
3086
{
3087
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
3088

3089
	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
A
Avi Kivity 已提交
3090 3091 3092
		int i;
		u64 *pt;

3093
		if (!test_bit(slot, sp->slot_bitmap))
A
Avi Kivity 已提交
3094 3095
			continue;

3096
		pt = sp->spt;
A
Avi Kivity 已提交
3097 3098
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
			/* avoid RMW */
3099
			if (is_writable_pte(pt[i]))
A
Avi Kivity 已提交
3100 3101
				pt[i] &= ~PT_WRITABLE_MASK;
	}
3102
	kvm_flush_remote_tlbs(kvm);
A
Avi Kivity 已提交
3103
}
3104

3105
void kvm_mmu_zap_all(struct kvm *kvm)
D
Dor Laor 已提交
3106
{
3107
	struct kvm_mmu_page *sp, *node;
3108
	LIST_HEAD(invalid_list);
D
Dor Laor 已提交
3109

3110
	spin_lock(&kvm->mmu_lock);
3111
restart:
3112
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
3113
		if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
3114 3115
			goto restart;

3116
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3117
	spin_unlock(&kvm->mmu_lock);
D
Dor Laor 已提交
3118 3119
}

3120 3121
static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
					       struct list_head *invalid_list)
3122 3123 3124 3125 3126
{
	struct kvm_mmu_page *page;

	page = container_of(kvm->arch.active_mmu_pages.prev,
			    struct kvm_mmu_page, link);
3127
	return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3128 3129
}

3130
static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3131 3132 3133 3134 3135 3136 3137 3138
{
	struct kvm *kvm;
	struct kvm *kvm_freed = NULL;
	int cache_count = 0;

	spin_lock(&kvm_lock);

	list_for_each_entry(kvm, &vm_list, vm_list) {
G
Gui Jianfeng 已提交
3139
		int npages, idx, freed_pages;
3140
		LIST_HEAD(invalid_list);
3141

3142
		idx = srcu_read_lock(&kvm->srcu);
3143
		spin_lock(&kvm->mmu_lock);
3144
		npages = kvm->arch.n_max_mmu_pages -
3145
			 kvm_mmu_available_pages(kvm);
3146 3147
		cache_count += npages;
		if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
3148 3149
			freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
							  &invalid_list);
G
Gui Jianfeng 已提交
3150
			cache_count -= freed_pages;
3151 3152 3153 3154
			kvm_freed = kvm;
		}
		nr_to_scan--;

3155
		kvm_mmu_commit_zap_page(kvm, &invalid_list);
3156
		spin_unlock(&kvm->mmu_lock);
3157
		srcu_read_unlock(&kvm->srcu, idx);
3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171
	}
	if (kvm_freed)
		list_move_tail(&kvm_freed->vm_list, &vm_list);

	spin_unlock(&kvm_lock);

	return cache_count;
}

static struct shrinker mmu_shrinker = {
	.shrink = mmu_shrink,
	.seeks = DEFAULT_SEEKS * 10,
};

I
Ingo Molnar 已提交
3172
static void mmu_destroy_caches(void)
3173 3174 3175 3176 3177
{
	if (pte_chain_cache)
		kmem_cache_destroy(pte_chain_cache);
	if (rmap_desc_cache)
		kmem_cache_destroy(rmap_desc_cache);
3178 3179
	if (mmu_page_header_cache)
		kmem_cache_destroy(mmu_page_header_cache);
3180 3181
}

3182 3183 3184 3185 3186 3187
void kvm_mmu_module_exit(void)
{
	mmu_destroy_caches();
	unregister_shrinker(&mmu_shrinker);
}

3188 3189 3190 3191
int kvm_mmu_module_init(void)
{
	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
					    sizeof(struct kvm_pte_chain),
3192
					    0, 0, NULL);
3193 3194 3195 3196
	if (!pte_chain_cache)
		goto nomem;
	rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
					    sizeof(struct kvm_rmap_desc),
3197
					    0, 0, NULL);
3198 3199 3200
	if (!rmap_desc_cache)
		goto nomem;

3201 3202
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
3203
						  0, 0, NULL);
3204 3205 3206
	if (!mmu_page_header_cache)
		goto nomem;

3207 3208
	register_shrinker(&mmu_shrinker);

3209 3210 3211
	return 0;

nomem:
3212
	mmu_destroy_caches();
3213 3214 3215
	return -ENOMEM;
}

3216 3217 3218 3219 3220 3221 3222 3223
/*
 * Caculate mmu pages needed for kvm.
 */
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
	int i;
	unsigned int nr_mmu_pages;
	unsigned int  nr_pages = 0;
3224
	struct kvm_memslots *slots;
3225

3226 3227
	slots = kvm_memslots(kvm);

3228 3229
	for (i = 0; i < slots->nmemslots; i++)
		nr_pages += slots->memslots[i].npages;
3230 3231 3232 3233 3234 3235 3236 3237

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
	nr_mmu_pages = max(nr_mmu_pages,
			(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);

	return nr_mmu_pages;
}

3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272
static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
				unsigned len)
{
	if (len > buffer->len)
		return NULL;
	return buffer->ptr;
}

static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
				unsigned len)
{
	void *ret;

	ret = pv_mmu_peek_buffer(buffer, len);
	if (!ret)
		return ret;
	buffer->ptr += len;
	buffer->len -= len;
	buffer->processed += len;
	return ret;
}

static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
			     gpa_t addr, gpa_t value)
{
	int bytes = 8;
	int r;

	if (!is_long_mode(vcpu) && !is_pae(vcpu))
		bytes = 4;

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

3273
	if (!emulator_write_phys(vcpu, addr, &value, bytes))
3274 3275 3276 3277 3278 3279 3280
		return -EFAULT;

	return 1;
}

static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
3281
	(void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334
	return 1;
}

static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
{
	spin_lock(&vcpu->kvm->mmu_lock);
	mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
	spin_unlock(&vcpu->kvm->mmu_lock);
	return 1;
}

static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
			     struct kvm_pv_mmu_op_buffer *buffer)
{
	struct kvm_mmu_op_header *header;

	header = pv_mmu_peek_buffer(buffer, sizeof *header);
	if (!header)
		return 0;
	switch (header->op) {
	case KVM_MMU_OP_WRITE_PTE: {
		struct kvm_mmu_op_write_pte *wpte;

		wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
		if (!wpte)
			return 0;
		return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
					wpte->pte_val);
	}
	case KVM_MMU_OP_FLUSH_TLB: {
		struct kvm_mmu_op_flush_tlb *ftlb;

		ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
		if (!ftlb)
			return 0;
		return kvm_pv_mmu_flush_tlb(vcpu);
	}
	case KVM_MMU_OP_RELEASE_PT: {
		struct kvm_mmu_op_release_pt *rpt;

		rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
		if (!rpt)
			return 0;
		return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
	}
	default: return 0;
	}
}

int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
		  gpa_t addr, unsigned long *ret)
{
	int r;
3335
	struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3336

3337 3338 3339
	buffer->ptr = buffer->buf;
	buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
	buffer->processed = 0;
3340

3341
	r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3342 3343 3344
	if (r)
		goto out;

3345 3346
	while (buffer->len) {
		r = kvm_pv_mmu_op_one(vcpu, buffer);
3347 3348 3349 3350 3351 3352 3353 3354
		if (r < 0)
			goto out;
		if (r == 0)
			break;
	}

	r = 1;
out:
3355
	*ret = buffer->processed;
3356 3357 3358
	return r;
}

3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
{
	struct kvm_shadow_walk_iterator iterator;
	int nr_sptes = 0;

	spin_lock(&vcpu->kvm->mmu_lock);
	for_each_shadow_entry(vcpu, addr, iterator) {
		sptes[iterator.level-1] = *iterator.sptep;
		nr_sptes++;
		if (!is_shadow_present_pte(*iterator.sptep))
			break;
	}
	spin_unlock(&vcpu->kvm->mmu_lock);

	return nr_sptes;
}
EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);

3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388
#ifdef AUDIT

static const char *audit_msg;

static gva_t canonicalize(gva_t gva)
{
#ifdef CONFIG_X86_64
	gva = (long long)(gva << 16) >> 16;
#endif
	return gva;
}

3389

3390
typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
3391 3392 3393 3394 3395 3396 3397 3398 3399 3400

static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
			    inspect_spte_fn fn)
{
	int i;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		u64 ent = sp->spt[i];

		if (is_shadow_present_pte(ent)) {
3401
			if (!is_last_spte(ent, sp->role.level)) {
3402 3403 3404
				struct kvm_mmu_page *child;
				child = page_header(ent & PT64_BASE_ADDR_MASK);
				__mmu_spte_walk(kvm, child, fn);
3405
			} else
3406
				fn(kvm, &sp->spt[i]);
3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435
		}
	}
}

static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
{
	int i;
	struct kvm_mmu_page *sp;

	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return;
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
		sp = page_header(root);
		__mmu_spte_walk(vcpu->kvm, sp, fn);
		return;
	}
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->arch.mmu.pae_root[i];

		if (root && VALID_PAGE(root)) {
			root &= PT64_BASE_ADDR_MASK;
			sp = page_header(root);
			__mmu_spte_walk(vcpu->kvm, sp, fn);
		}
	}
	return;
}

3436 3437 3438 3439 3440 3441 3442 3443 3444 3445
static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
				gva_t va, int level)
{
	u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
	int i;
	gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
		u64 ent = pt[i];

3446
		if (ent == shadow_trap_nonpresent_pte)
3447 3448 3449
			continue;

		va = canonicalize(va);
3450 3451 3452
		if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
			audit_mappings_page(vcpu, ent, va, level - 1);
		else {
3453
			gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
J
Jan Kiszka 已提交
3454 3455 3456
			gfn_t gfn = gpa >> PAGE_SHIFT;
			pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
			hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3457

3458 3459 3460 3461 3462
			if (is_error_pfn(pfn)) {
				kvm_release_pfn_clean(pfn);
				continue;
			}

3463
			if (is_shadow_present_pte(ent)
3464
			    && (ent & PT64_BASE_ADDR_MASK) != hpa)
3465 3466
				printk(KERN_ERR "xx audit error: (%s) levels %d"
				       " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3467
				       audit_msg, vcpu->arch.mmu.root_level,
M
Mike Day 已提交
3468 3469
				       va, gpa, hpa, ent,
				       is_shadow_present_pte(ent));
3470 3471 3472 3473
			else if (ent == shadow_notrap_nonpresent_pte
				 && !is_error_hpa(hpa))
				printk(KERN_ERR "audit: (%s) notrap shadow,"
				       " valid guest gva %lx\n", audit_msg, va);
3474
			kvm_release_pfn_clean(pfn);
3475

3476 3477 3478 3479 3480 3481
		}
	}
}

static void audit_mappings(struct kvm_vcpu *vcpu)
{
3482
	unsigned i;
3483

3484 3485
	if (vcpu->arch.mmu.root_level == 4)
		audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3486 3487
	else
		for (i = 0; i < 4; ++i)
3488
			if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3489
				audit_mappings_page(vcpu,
3490
						    vcpu->arch.mmu.pae_root[i],
3491 3492 3493 3494 3495 3496
						    i << 30,
						    2);
}

static int count_rmaps(struct kvm_vcpu *vcpu)
{
3497 3498
	struct kvm *kvm = vcpu->kvm;
	struct kvm_memslots *slots;
3499
	int nmaps = 0;
3500
	int i, j, k, idx;
3501

3502
	idx = srcu_read_lock(&kvm->srcu);
3503
	slots = kvm_memslots(kvm);
3504
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3505
		struct kvm_memory_slot *m = &slots->memslots[i];
3506 3507 3508
		struct kvm_rmap_desc *d;

		for (j = 0; j < m->npages; ++j) {
3509
			unsigned long *rmapp = &m->rmap[j];
3510

3511
			if (!*rmapp)
3512
				continue;
3513
			if (!(*rmapp & 1)) {
3514 3515 3516
				++nmaps;
				continue;
			}
3517
			d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3518 3519
			while (d) {
				for (k = 0; k < RMAP_EXT; ++k)
A
Avi Kivity 已提交
3520
					if (d->sptes[k])
3521 3522 3523 3524 3525 3526 3527
						++nmaps;
					else
						break;
				d = d->more;
			}
		}
	}
3528
	srcu_read_unlock(&kvm->srcu, idx);
3529 3530 3531
	return nmaps;
}

3532
void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
3533 3534 3535 3536 3537
{
	unsigned long *rmapp;
	struct kvm_mmu_page *rev_sp;
	gfn_t gfn;

3538
	if (is_writable_pte(*sptep)) {
3539
		rev_sp = page_header(__pa(sptep));
3540
		gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
3541 3542 3543 3544 3545 3546 3547

		if (!gfn_to_memslot(kvm, gfn)) {
			if (!printk_ratelimit())
				return;
			printk(KERN_ERR "%s: no memslot for gfn %ld\n",
					 audit_msg, gfn);
			printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
3548
			       audit_msg, (long int)(sptep - rev_sp->spt),
3549 3550 3551 3552 3553
					rev_sp->gfn);
			dump_stack();
			return;
		}

3554
		rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571
		if (!*rmapp) {
			if (!printk_ratelimit())
				return;
			printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
					 audit_msg, *sptep);
			dump_stack();
		}
	}

}

void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
{
	mmu_spte_walk(vcpu, inspect_spte_has_rmap);
}

static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
3572
{
3573
	struct kvm_mmu_page *sp;
3574 3575
	int i;

3576
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3577
		u64 *pt = sp->spt;
3578

3579
		if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3580 3581 3582 3583 3584 3585 3586
			continue;

		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
			u64 ent = pt[i];

			if (!(ent & PT_PRESENT_MASK))
				continue;
3587
			if (!is_writable_pte(ent))
3588
				continue;
3589
			inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
3590 3591
		}
	}
3592
	return;
3593 3594 3595 3596
}

static void audit_rmap(struct kvm_vcpu *vcpu)
{
3597 3598
	check_writable_mappings_rmap(vcpu);
	count_rmaps(vcpu);
3599 3600 3601 3602
}

static void audit_write_protection(struct kvm_vcpu *vcpu)
{
3603
	struct kvm_mmu_page *sp;
3604 3605
	struct kvm_memory_slot *slot;
	unsigned long *rmapp;
3606
	u64 *spte;
3607
	gfn_t gfn;
3608

3609
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3610
		if (sp->role.direct)
3611
			continue;
3612 3613
		if (sp->unsync)
			continue;
3614

A
Avi Kivity 已提交
3615
		slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
3616
		rmapp = &slot->rmap[gfn - slot->base_gfn];
3617 3618 3619

		spte = rmap_next(vcpu->kvm, rmapp, NULL);
		while (spte) {
3620
			if (is_writable_pte(*spte))
3621 3622
				printk(KERN_ERR "%s: (%s) shadow page has "
				"writable mappings: gfn %lx role %x\n",
3623
			       __func__, audit_msg, sp->gfn,
3624
			       sp->role.word);
3625 3626
			spte = rmap_next(vcpu->kvm, rmapp, spte);
		}
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637
	}
}

static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
{
	int olddbg = dbg;

	dbg = 0;
	audit_msg = msg;
	audit_rmap(vcpu);
	audit_write_protection(vcpu);
3638 3639
	if (strcmp("pre pte write", audit_msg) != 0)
		audit_mappings(vcpu);
3640
	audit_writable_sptes_have_rmaps(vcpu);
3641 3642 3643 3644
	dbg = olddbg;
}

#endif