mmu.c 135.5 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
10
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
11 12 13 14 15 16 17 18 19
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
A
Avi Kivity 已提交
20

21
#include "irq.h"
22
#include "mmu.h"
23
#include "x86.h"
A
Avi Kivity 已提交
24
#include "kvm_cache_regs.h"
25
#include "cpuid.h"
A
Avi Kivity 已提交
26

27
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
28 29 30 31
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
32 33
#include <linux/moduleparam.h>
#include <linux/export.h>
34
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
35
#include <linux/hugetlb.h>
36
#include <linux/compiler.h>
37
#include <linux/srcu.h>
38
#include <linux/slab.h>
39
#include <linux/uaccess.h>
40
#include <linux/hash.h>
41
#include <linux/kern_levels.h>
A
Avi Kivity 已提交
42

A
Avi Kivity 已提交
43 44
#include <asm/page.h>
#include <asm/cmpxchg.h>
45
#include <asm/io.h>
46
#include <asm/vmx.h>
47
#include <asm/kvm_page_track.h>
A
Avi Kivity 已提交
48

49 50 51 52 53 54 55
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
56
bool tdp_enabled = false;
57

58 59 60 61
enum {
	AUDIT_PRE_PAGE_FAULT,
	AUDIT_POST_PAGE_FAULT,
	AUDIT_PRE_PTE_WRITE,
62 63 64
	AUDIT_POST_PTE_WRITE,
	AUDIT_PRE_SYNC,
	AUDIT_POST_SYNC
65
};
66

67
#undef MMU_DEBUG
68 69

#ifdef MMU_DEBUG
70 71
static bool dbg = 0;
module_param(dbg, bool, 0644);
72 73 74

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
75
#define MMU_WARN_ON(x) WARN_ON(x)
76 77 78
#else
#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)
79
#define MMU_WARN_ON(x) do { } while (0)
80
#endif
A
Avi Kivity 已提交
81

82 83
#define PTE_PREFETCH_NUM		8

84
#define PT_FIRST_AVAIL_BITS_SHIFT 10
A
Avi Kivity 已提交
85 86 87 88 89
#define PT64_SECOND_AVAIL_BITS_SHIFT 52

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
M
Mike Day 已提交
90
		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
A
Avi Kivity 已提交
91 92 93 94 95 96 97 98

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
99
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
100

101 102 103
#define PT32_LVL_OFFSET_MASK(level) \
	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
104 105 106 107 108

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


109
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
A
Avi Kivity 已提交
110 111
#define PT64_DIR_BASE_ADDR_MASK \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
112 113 114 115 116 117
#define PT64_LVL_ADDR_MASK(level) \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
#define PT64_LVL_OFFSET_MASK(level) \
	(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
118 119 120 121

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
122 123 124
#define PT32_LVL_ADDR_MASK(level) \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
					    * PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
125

126 127
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
			| shadow_x_mask | shadow_nx_mask)
A
Avi Kivity 已提交
128

129 130 131 132 133
#define ACC_EXEC_MASK    1
#define ACC_WRITE_MASK   PT_WRITABLE_MASK
#define ACC_USER_MASK    PT_USER_MASK
#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)

134 135 136 137
/* The mask for the R/X bits in EPT PTEs */
#define PT64_EPT_READABLE_MASK			0x1ull
#define PT64_EPT_EXECUTABLE_MASK		0x4ull

138 139
#include <trace/events/kvm.h>

140 141 142
#define CREATE_TRACE_POINTS
#include "mmutrace.h"

143 144
#define SPTE_HOST_WRITEABLE	(1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define SPTE_MMU_WRITEABLE	(1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
145

146 147
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)

148 149 150
/* make pte_list_desc fit well in cache line */
#define PTE_LIST_EXT 3

151 152 153
struct pte_list_desc {
	u64 *sptes[PTE_LIST_EXT];
	struct pte_list_desc *more;
154 155
};

156 157 158 159
struct kvm_shadow_walk_iterator {
	u64 addr;
	hpa_t shadow_addr;
	u64 *sptep;
160
	int level;
161 162 163 164 165 166 167 168
	unsigned index;
};

#define for_each_shadow_entry(_vcpu, _addr, _walker)    \
	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
	     shadow_walk_okay(&(_walker));			\
	     shadow_walk_next(&(_walker)))

169 170 171 172 173 174
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
	     shadow_walk_okay(&(_walker)) &&				\
		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
	     __shadow_walk_next(&(_walker), spte))

175
static struct kmem_cache *pte_list_desc_cache;
176
static struct kmem_cache *mmu_page_header_cache;
177
static struct percpu_counter kvm_total_used_mmu_pages;
178

S
Sheng Yang 已提交
179 180 181 182 183
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
184
static u64 __read_mostly shadow_mmio_mask;
185
static u64 __read_mostly shadow_present_mask;
186

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
/*
 * The mask/value to distinguish a PTE that has been marked not-present for
 * access tracking purposes.
 * The mask would be either 0 if access tracking is disabled, or
 * SPTE_SPECIAL_MASK|VMX_EPT_RWX_MASK if access tracking is enabled.
 */
static u64 __read_mostly shadow_acc_track_mask;
static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK;

/*
 * The mask/shift to use for saving the original R/X bits when marking the PTE
 * as not-present for access tracking purposes. We do not save the W bit as the
 * PTEs being access tracked also need to be dirty tracked, so the W bit will be
 * restored only when a write is attempted to the page.
 */
static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
						    PT64_EPT_EXECUTABLE_MASK;
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;

206
static void mmu_spte_set(u64 *sptep, u64 spte);
207
static void mmu_free_roots(struct kvm_vcpu *vcpu);
208 209 210

void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
{
211
	shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
212 213 214
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);

215 216 217 218 219 220
static inline bool is_access_track_spte(u64 spte)
{
	/* Always false if shadow_acc_track_mask is zero.  */
	return (spte & shadow_acc_track_mask) == shadow_acc_track_value;
}

221
/*
222 223 224 225 226 227 228
 * the low bit of the generation number is always presumed to be zero.
 * This disables mmio caching during memslot updates.  The concept is
 * similar to a seqcount but instead of retrying the access we just punt
 * and ignore the cache.
 *
 * spte bits 3-11 are used as bits 1-9 of the generation number,
 * the bits 52-61 are used as bits 10-19 of the generation number.
229
 */
230
#define MMIO_SPTE_GEN_LOW_SHIFT		2
231 232
#define MMIO_SPTE_GEN_HIGH_SHIFT	52

233 234 235
#define MMIO_GEN_SHIFT			20
#define MMIO_GEN_LOW_SHIFT		10
#define MMIO_GEN_LOW_MASK		((1 << MMIO_GEN_LOW_SHIFT) - 2)
236
#define MMIO_GEN_MASK			((1 << MMIO_GEN_SHIFT) - 1)
237 238 239 240 241

static u64 generation_mmio_spte_mask(unsigned int gen)
{
	u64 mask;

T
Tiejun Chen 已提交
242
	WARN_ON(gen & ~MMIO_GEN_MASK);
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259

	mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
	mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
	return mask;
}

static unsigned int get_mmio_spte_generation(u64 spte)
{
	unsigned int gen;

	spte &= ~shadow_mmio_mask;

	gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK;
	gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT;
	return gen;
}

260
static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
261
{
262
	return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK;
263 264
}

265
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
266
			   unsigned access)
267
{
268
	unsigned int gen = kvm_current_mmio_generation(vcpu);
269
	u64 mask = generation_mmio_spte_mask(gen);
270

271
	access &= ACC_WRITE_MASK | ACC_USER_MASK;
272 273
	mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT;

274
	trace_mark_mmio_spte(sptep, gfn, access, gen);
275
	mmu_spte_set(sptep, mask);
276 277 278 279 280 281 282 283 284
}

static bool is_mmio_spte(u64 spte)
{
	return (spte & shadow_mmio_mask) == shadow_mmio_mask;
}

static gfn_t get_mmio_spte_gfn(u64 spte)
{
T
Tiejun Chen 已提交
285
	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
286
	return (spte & ~mask) >> PAGE_SHIFT;
287 288 289 290
}

static unsigned get_mmio_spte_access(u64 spte)
{
T
Tiejun Chen 已提交
291
	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
292
	return (spte & ~mask) & ~PAGE_MASK;
293 294
}

295
static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
D
Dan Williams 已提交
296
			  kvm_pfn_t pfn, unsigned access)
297 298
{
	if (unlikely(is_noslot_pfn(pfn))) {
299
		mark_mmio_spte(vcpu, sptep, gfn, access);
300 301 302 303 304
		return true;
	}

	return false;
}
305

306
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
307
{
308 309
	unsigned int kvm_gen, spte_gen;

310
	kvm_gen = kvm_current_mmio_generation(vcpu);
311 312 313 314
	spte_gen = get_mmio_spte_generation(spte);

	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
	return likely(kvm_gen == spte_gen);
315 316
}

S
Sheng Yang 已提交
317
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
318 319
		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
		u64 acc_track_mask)
S
Sheng Yang 已提交
320
{
321 322 323
	if (acc_track_mask != 0)
		acc_track_mask |= SPTE_SPECIAL_MASK;

S
Sheng Yang 已提交
324 325 326 327 328
	shadow_user_mask = user_mask;
	shadow_accessed_mask = accessed_mask;
	shadow_dirty_mask = dirty_mask;
	shadow_nx_mask = nx_mask;
	shadow_x_mask = x_mask;
329
	shadow_present_mask = p_mask;
330 331
	shadow_acc_track_mask = acc_track_mask;
	WARN_ON(shadow_accessed_mask != 0 && shadow_acc_track_mask != 0);
S
Sheng Yang 已提交
332 333 334
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

335 336 337 338 339 340 341 342 343 344 345 346
void kvm_mmu_clear_all_pte_masks(void)
{
	shadow_user_mask = 0;
	shadow_accessed_mask = 0;
	shadow_dirty_mask = 0;
	shadow_nx_mask = 0;
	shadow_x_mask = 0;
	shadow_mmio_mask = 0;
	shadow_present_mask = 0;
	shadow_acc_track_mask = 0;
}

A
Avi Kivity 已提交
347 348 349 350 351
static int is_cpuid_PSE36(void)
{
	return 1;
}

352 353
static int is_nx(struct kvm_vcpu *vcpu)
{
354
	return vcpu->arch.efer & EFER_NX;
355 356
}

357 358
static int is_shadow_present_pte(u64 pte)
{
359
	return (pte != 0) && !is_mmio_spte(pte);
360 361
}

M
Marcelo Tosatti 已提交
362 363 364 365 366
static int is_large_pte(u64 pte)
{
	return pte & PT_PAGE_SIZE_MASK;
}

367 368 369 370
static int is_last_spte(u64 pte, int level)
{
	if (level == PT_PAGE_TABLE_LEVEL)
		return 1;
371
	if (is_large_pte(pte))
372 373 374 375
		return 1;
	return 0;
}

D
Dan Williams 已提交
376
static kvm_pfn_t spte_to_pfn(u64 pte)
377
{
378
	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
379 380
}

381 382 383 384 385 386 387
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

388
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
389
static void __set_spte(u64 *sptep, u64 spte)
390
{
391
	WRITE_ONCE(*sptep, spte);
392 393
}

394
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
395
{
396
	WRITE_ONCE(*sptep, spte);
397 398 399 400 401 402
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	return xchg(sptep, spte);
}
403 404 405 406 407

static u64 __get_spte_lockless(u64 *sptep)
{
	return ACCESS_ONCE(*sptep);
}
408
#else
409 410 411 412 413 414 415
union split_spte {
	struct {
		u32 spte_low;
		u32 spte_high;
	};
	u64 spte;
};
416

417 418 419 420 421 422 423 424 425 426 427 428
static void count_spte_clear(u64 *sptep, u64 spte)
{
	struct kvm_mmu_page *sp =  page_header(__pa(sptep));

	if (is_shadow_present_pte(spte))
		return;

	/* Ensure the spte is completely set before we increase the count */
	smp_wmb();
	sp->clear_spte_count++;
}

429 430 431
static void __set_spte(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;
432

433 434 435 436 437 438 439 440 441 442 443 444
	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	ssptep->spte_high = sspte.spte_high;

	/*
	 * If we map the spte from nonpresent to present, We should store
	 * the high bits firstly, then set present bit, so cpu can not
	 * fetch this spte while we are setting the spte.
	 */
	smp_wmb();

445
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
446 447
}

448 449 450 451 452 453 454
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

455
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
456 457 458 459 460 461 462 463

	/*
	 * If we map the spte from present to nonpresent, we should clear
	 * present bit firstly to avoid vcpu fetch the old high bits.
	 */
	smp_wmb();

	ssptep->spte_high = sspte.spte_high;
464
	count_spte_clear(sptep, spte);
465 466 467 468 469 470 471 472 473 474 475
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte, orig;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	/* xchg acts as a barrier before the setting of the high bits */
	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
476 477
	orig.spte_high = ssptep->spte_high;
	ssptep->spte_high = sspte.spte_high;
478
	count_spte_clear(sptep, spte);
479 480 481

	return orig.spte;
}
482 483 484 485

/*
 * The idea using the light way get the spte on x86_32 guest is from
 * gup_get_pte(arch/x86/mm/gup.c).
486 487 488 489 490 491 492 493 494 495 496 497 498 499
 *
 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
 * coalesces them and we are running out of the MMU lock.  Therefore
 * we need to protect against in-progress updates of the spte.
 *
 * Reading the spte while an update is in progress may get the old value
 * for the high part of the spte.  The race is fine for a present->non-present
 * change (because the high part of the spte is ignored for non-present spte),
 * but for a present->present change we must reread the spte.
 *
 * All such changes are done in two steps (present->non-present and
 * non-present->present), hence it is enough to count the number of
 * present->non-present updates: if it changed while reading the spte,
 * we might have hit the race.  This is done using clear_spte_count.
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
 */
static u64 __get_spte_lockless(u64 *sptep)
{
	struct kvm_mmu_page *sp =  page_header(__pa(sptep));
	union split_spte spte, *orig = (union split_spte *)sptep;
	int count;

retry:
	count = sp->clear_spte_count;
	smp_rmb();

	spte.spte_low = orig->spte_low;
	smp_rmb();

	spte.spte_high = orig->spte_high;
	smp_rmb();

	if (unlikely(spte.spte_low != orig->spte_low ||
	      count != sp->clear_spte_count))
		goto retry;

	return spte.spte;
}
523 524
#endif

525
static bool spte_can_locklessly_be_made_writable(u64 spte)
526
{
527 528
	return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
		(SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
529 530
}

531 532
static bool spte_has_volatile_bits(u64 spte)
{
533 534 535
	if (!is_shadow_present_pte(spte))
		return false;

536
	/*
537
	 * Always atomically update spte if it can be updated
538 539 540 541
	 * out of mmu-lock, it can ensure dirty bit is not lost,
	 * also, it can help us to get a stable is_writable_pte()
	 * to ensure tlb flush is not missed.
	 */
542 543
	if (spte_can_locklessly_be_made_writable(spte) ||
	    is_access_track_spte(spte))
544 545
		return true;

546 547 548 549 550
	if (shadow_accessed_mask) {
		if ((spte & shadow_accessed_mask) == 0 ||
	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
			return true;
	}
551

552
	return false;
553 554
}

555
static bool is_accessed_spte(u64 spte)
556
{
557
	return shadow_accessed_mask ? spte & shadow_accessed_mask
558
				    : !is_access_track_spte(spte);
559 560
}

561
static bool is_dirty_spte(u64 spte)
562
{
563 564
	return shadow_dirty_mask ? spte & shadow_dirty_mask
				 : spte & PT_WRITABLE_MASK;
565 566
}

567 568 569 570 571 572 573 574 575 576 577 578
/* Rules for using mmu_spte_set:
 * Set the sptep from nonpresent to present.
 * Note: the sptep being assigned *must* be either not present
 * or in a state where the hardware will not attempt to update
 * the spte.
 */
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{
	WARN_ON(is_shadow_present_pte(*sptep));
	__set_spte(sptep, new_spte);
}

579 580 581
/*
 * Update the SPTE (excluding the PFN), but do not track changes in its
 * accessed/dirty status.
582
 */
583
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
584
{
585
	u64 old_spte = *sptep;
586

587
	WARN_ON(!is_shadow_present_pte(new_spte));
588

589 590
	if (!is_shadow_present_pte(old_spte)) {
		mmu_spte_set(sptep, new_spte);
591
		return old_spte;
592
	}
593

594
	if (!spte_has_volatile_bits(old_spte))
595
		__update_clear_spte_fast(sptep, new_spte);
596
	else
597
		old_spte = __update_clear_spte_slow(sptep, new_spte);
598

599 600
	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));

601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
	return old_spte;
}

/* Rules for using mmu_spte_update:
 * Update the state bits, it means the mapped pfn is not changed.
 *
 * Whenever we overwrite a writable spte with a read-only one we
 * should flush remote TLBs. Otherwise rmap_write_protect
 * will find a read-only spte, even though the writable spte
 * might be cached on a CPU's TLB, the return value indicates this
 * case.
 *
 * Returns true if the TLB needs to be flushed
 */
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
	bool flush = false;
	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);

	if (!is_shadow_present_pte(old_spte))
		return false;

623 624
	/*
	 * For the spte updated out of mmu-lock is safe, since
625
	 * we always atomically update it, see the comments in
626 627
	 * spte_has_volatile_bits().
	 */
628
	if (spte_can_locklessly_be_made_writable(old_spte) &&
629
	      !is_writable_pte(new_spte))
630
		flush = true;
631

632
	/*
633
	 * Flush TLB when accessed/dirty states are changed in the page tables,
634 635 636
	 * to guarantee consistency between TLB and page tables.
	 */

637 638
	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
		flush = true;
639
		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
640 641 642 643
	}

	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
		flush = true;
644
		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
645
	}
646

647
	return flush;
648 649
}

650 651 652 653
/*
 * Rules for using mmu_spte_clear_track_bits:
 * It sets the sptep from present to nonpresent, and track the
 * state bits, it is used to clear the last level sptep.
654
 * Returns non-zero if the PTE was previously valid.
655 656 657
 */
static int mmu_spte_clear_track_bits(u64 *sptep)
{
D
Dan Williams 已提交
658
	kvm_pfn_t pfn;
659 660 661
	u64 old_spte = *sptep;

	if (!spte_has_volatile_bits(old_spte))
662
		__update_clear_spte_fast(sptep, 0ull);
663
	else
664
		old_spte = __update_clear_spte_slow(sptep, 0ull);
665

666
	if (!is_shadow_present_pte(old_spte))
667 668 669
		return 0;

	pfn = spte_to_pfn(old_spte);
670 671 672 673 674 675

	/*
	 * KVM does not hold the refcount of the page used by
	 * kvm mmu, before reclaiming the page, we should
	 * unmap it from mmu first.
	 */
676
	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
677

678
	if (is_accessed_spte(old_spte))
679
		kvm_set_pfn_accessed(pfn);
680 681

	if (is_dirty_spte(old_spte))
682
		kvm_set_pfn_dirty(pfn);
683

684 685 686 687 688 689 690 691 692 693
	return 1;
}

/*
 * Rules for using mmu_spte_clear_no_track:
 * Directly clear spte without caring the state bits of sptep,
 * it is used to set the upper level spte.
 */
static void mmu_spte_clear_no_track(u64 *sptep)
{
694
	__update_clear_spte_fast(sptep, 0ull);
695 696
}

697 698 699 700 701
static u64 mmu_spte_get_lockless(u64 *sptep)
{
	return __get_spte_lockless(sptep);
}

702 703 704 705 706 707 708 709 710
static u64 mark_spte_for_access_track(u64 spte)
{
	if (shadow_accessed_mask != 0)
		return spte & ~shadow_accessed_mask;

	if (shadow_acc_track_mask == 0 || is_access_track_spte(spte))
		return spte;

	/*
711 712 713
	 * Making an Access Tracking PTE will result in removal of write access
	 * from the PTE. So, verify that we will be able to restore the write
	 * access in the fast page fault path later on.
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
	 */
	WARN_ONCE((spte & PT_WRITABLE_MASK) &&
		  !spte_can_locklessly_be_made_writable(spte),
		  "kvm: Writable SPTE is not locklessly dirty-trackable\n");

	WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask <<
			  shadow_acc_track_saved_bits_shift),
		  "kvm: Access Tracking saved bit locations are not zero\n");

	spte |= (spte & shadow_acc_track_saved_bits_mask) <<
		shadow_acc_track_saved_bits_shift;
	spte &= ~shadow_acc_track_mask;
	spte |= shadow_acc_track_value;

	return spte;
}

/* Returns the Accessed status of the PTE and resets it at the same time. */
static bool mmu_spte_age(u64 *sptep)
{
	u64 spte = mmu_spte_get_lockless(sptep);

	if (!is_accessed_spte(spte))
		return false;

	if (shadow_accessed_mask) {
		clear_bit((ffs(shadow_accessed_mask) - 1),
			  (unsigned long *)sptep);
	} else {
		/*
		 * Capture the dirty status of the page, so that it doesn't get
		 * lost when the SPTE is marked for access tracking.
		 */
		if (is_writable_pte(spte))
			kvm_set_pfn_dirty(spte_to_pfn(spte));

		spte = mark_spte_for_access_track(spte);
		mmu_spte_update_no_track(sptep, spte);
	}

	return true;
}

757 758
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
759 760 761 762 763
	/*
	 * Prevent page table teardown by making any free-er wait during
	 * kvm_flush_remote_tlbs() IPI to all active vcpus.
	 */
	local_irq_disable();
764

765 766 767 768
	/*
	 * Make sure a following spte read is not reordered ahead of the write
	 * to vcpu->mode.
	 */
769
	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
770 771 772 773
}

static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
774 775 776 777 778
	/*
	 * Make sure the write to vcpu->mode is not reordered in front of
	 * reads to sptes.  If it does, kvm_commit_zap_page() can see us
	 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
	 */
779
	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
780
	local_irq_enable();
781 782
}

783
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
784
				  struct kmem_cache *base_cache, int min)
785 786 787 788
{
	void *obj;

	if (cache->nobjs >= min)
789
		return 0;
790
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
791
		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
792
		if (!obj)
793
			return -ENOMEM;
794 795
		cache->objects[cache->nobjs++] = obj;
	}
796
	return 0;
797 798
}

799 800 801 802 803
static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
{
	return cache->nobjs;
}

804 805
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
				  struct kmem_cache *cache)
806 807
{
	while (mc->nobjs)
808
		kmem_cache_free(cache, mc->objects[--mc->nobjs]);
809 810
}

A
Avi Kivity 已提交
811
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
812
				       int min)
A
Avi Kivity 已提交
813
{
814
	void *page;
A
Avi Kivity 已提交
815 816 817 818

	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
819
		page = (void *)__get_free_page(GFP_KERNEL);
A
Avi Kivity 已提交
820 821
		if (!page)
			return -ENOMEM;
822
		cache->objects[cache->nobjs++] = page;
A
Avi Kivity 已提交
823 824 825 826 827 828 829
	}
	return 0;
}

static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
830
		free_page((unsigned long)mc->objects[--mc->nobjs]);
A
Avi Kivity 已提交
831 832
}

833
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
834
{
835 836
	int r;

837
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
838
				   pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
839 840
	if (r)
		goto out;
841
	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
842 843
	if (r)
		goto out;
844
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
845
				   mmu_page_header_cache, 4);
846 847
out:
	return r;
848 849 850 851
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
852 853
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
				pte_list_desc_cache);
854
	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
855 856
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
				mmu_page_header_cache);
857 858
}

859
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
860 861 862 863 864 865 866 867
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	return p;
}

868
static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
869
{
870
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
871 872
}

873
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
874
{
875
	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
876 877
}

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
	if (!sp->role.direct)
		return sp->gfns[index];

	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}

static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
	if (sp->role.direct)
		BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
	else
		sp->gfns[index] = gfn;
}

M
Marcelo Tosatti 已提交
894
/*
895 896
 * Return the pointer to the large page information for a given gfn,
 * handling slots that are not large page aligned.
M
Marcelo Tosatti 已提交
897
 */
898 899 900
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
					      struct kvm_memory_slot *slot,
					      int level)
M
Marcelo Tosatti 已提交
901 902 903
{
	unsigned long idx;

904
	idx = gfn_to_index(gfn, slot->base_gfn, level);
905
	return &slot->arch.lpage_info[level - 2][idx];
M
Marcelo Tosatti 已提交
906 907
}

908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
					    gfn_t gfn, int count)
{
	struct kvm_lpage_info *linfo;
	int i;

	for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
		linfo = lpage_info_slot(gfn, slot, i);
		linfo->disallow_lpage += count;
		WARN_ON(linfo->disallow_lpage < 0);
	}
}

void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
	update_gfn_disallow_lpage_count(slot, gfn, 1);
}

void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
	update_gfn_disallow_lpage_count(slot, gfn, -1);
}

931
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
932
{
933
	struct kvm_memslots *slots;
934
	struct kvm_memory_slot *slot;
935
	gfn_t gfn;
M
Marcelo Tosatti 已提交
936

937
	kvm->arch.indirect_shadow_pages++;
938
	gfn = sp->gfn;
939 940
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
941 942 943 944 945 946

	/* the non-leaf shadow pages are keeping readonly. */
	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return kvm_slot_page_track_add_page(kvm, slot, gfn,
						    KVM_PAGE_TRACK_WRITE);

947
	kvm_mmu_gfn_disallow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
948 949
}

950
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
951
{
952
	struct kvm_memslots *slots;
953
	struct kvm_memory_slot *slot;
954
	gfn_t gfn;
M
Marcelo Tosatti 已提交
955

956
	kvm->arch.indirect_shadow_pages--;
957
	gfn = sp->gfn;
958 959
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
960 961 962 963
	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
						       KVM_PAGE_TRACK_WRITE);

964
	kvm_mmu_gfn_allow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
965 966
}

967 968
static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
					  struct kvm_memory_slot *slot)
M
Marcelo Tosatti 已提交
969
{
970
	struct kvm_lpage_info *linfo;
M
Marcelo Tosatti 已提交
971 972

	if (slot) {
973
		linfo = lpage_info_slot(gfn, slot, level);
974
		return !!linfo->disallow_lpage;
M
Marcelo Tosatti 已提交
975 976
	}

977
	return true;
M
Marcelo Tosatti 已提交
978 979
}

980 981
static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
					int level)
982 983 984 985
{
	struct kvm_memory_slot *slot;

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
986
	return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
987 988
}

989
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
M
Marcelo Tosatti 已提交
990
{
J
Joerg Roedel 已提交
991
	unsigned long page_size;
992
	int i, ret = 0;
M
Marcelo Tosatti 已提交
993

J
Joerg Roedel 已提交
994
	page_size = kvm_host_page_size(kvm, gfn);
M
Marcelo Tosatti 已提交
995

996
	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
997 998 999 1000 1001 1002
		if (page_size >= KVM_HPAGE_SIZE(i))
			ret = i;
		else
			break;
	}

1003
	return ret;
M
Marcelo Tosatti 已提交
1004 1005
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
					  bool no_dirty_log)
{
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
		return false;
	if (no_dirty_log && slot->dirty_bitmap)
		return false;

	return true;
}

1017 1018 1019
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
			    bool no_dirty_log)
M
Marcelo Tosatti 已提交
1020 1021
{
	struct kvm_memory_slot *slot;
1022

1023
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1024
	if (!memslot_valid_for_gpte(slot, no_dirty_log))
1025 1026 1027 1028 1029
		slot = NULL;

	return slot;
}

1030 1031
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
			 bool *force_pt_level)
1032 1033
{
	int host_level, level, max_level;
1034 1035
	struct kvm_memory_slot *slot;

1036 1037
	if (unlikely(*force_pt_level))
		return PT_PAGE_TABLE_LEVEL;
M
Marcelo Tosatti 已提交
1038

1039 1040
	slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
	*force_pt_level = !memslot_valid_for_gpte(slot, true);
1041 1042 1043
	if (unlikely(*force_pt_level))
		return PT_PAGE_TABLE_LEVEL;

1044 1045 1046 1047 1048
	host_level = host_mapping_level(vcpu->kvm, large_gfn);

	if (host_level == PT_PAGE_TABLE_LEVEL)
		return host_level;

X
Xiao Guangrong 已提交
1049
	max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
1050 1051

	for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
1052
		if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
1053 1054 1055
			break;

	return level - 1;
M
Marcelo Tosatti 已提交
1056 1057
}

1058
/*
1059
 * About rmap_head encoding:
1060
 *
1061 1062
 * If the bit zero of rmap_head->val is clear, then it points to the only spte
 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
1063
 * pte_list_desc containing more mappings.
1064 1065 1066 1067
 */

/*
 * Returns the number of pointers in the rmap chain, not counting the new one.
1068
 */
1069
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
1070
			struct kvm_rmap_head *rmap_head)
1071
{
1072
	struct pte_list_desc *desc;
1073
	int i, count = 0;
1074

1075
	if (!rmap_head->val) {
1076
		rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
1077 1078
		rmap_head->val = (unsigned long)spte;
	} else if (!(rmap_head->val & 1)) {
1079 1080
		rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
		desc = mmu_alloc_pte_list_desc(vcpu);
1081
		desc->sptes[0] = (u64 *)rmap_head->val;
A
Avi Kivity 已提交
1082
		desc->sptes[1] = spte;
1083
		rmap_head->val = (unsigned long)desc | 1;
1084
		++count;
1085
	} else {
1086
		rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
1087
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1088
		while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
1089
			desc = desc->more;
1090
			count += PTE_LIST_EXT;
1091
		}
1092 1093
		if (desc->sptes[PTE_LIST_EXT-1]) {
			desc->more = mmu_alloc_pte_list_desc(vcpu);
1094 1095
			desc = desc->more;
		}
A
Avi Kivity 已提交
1096
		for (i = 0; desc->sptes[i]; ++i)
1097
			++count;
A
Avi Kivity 已提交
1098
		desc->sptes[i] = spte;
1099
	}
1100
	return count;
1101 1102
}

1103
static void
1104 1105 1106
pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
			   struct pte_list_desc *desc, int i,
			   struct pte_list_desc *prev_desc)
1107 1108 1109
{
	int j;

1110
	for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
1111
		;
A
Avi Kivity 已提交
1112 1113
	desc->sptes[i] = desc->sptes[j];
	desc->sptes[j] = NULL;
1114 1115 1116
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
1117
		rmap_head->val = (unsigned long)desc->sptes[0];
1118 1119 1120 1121
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
1122
			rmap_head->val = (unsigned long)desc->more | 1;
1123
	mmu_free_pte_list_desc(desc);
1124 1125
}

1126
static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
1127
{
1128 1129
	struct pte_list_desc *desc;
	struct pte_list_desc *prev_desc;
1130 1131
	int i;

1132
	if (!rmap_head->val) {
1133
		printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
1134
		BUG();
1135
	} else if (!(rmap_head->val & 1)) {
1136
		rmap_printk("pte_list_remove:  %p 1->0\n", spte);
1137
		if ((u64 *)rmap_head->val != spte) {
1138
			printk(KERN_ERR "pte_list_remove:  %p 1->BUG\n", spte);
1139 1140
			BUG();
		}
1141
		rmap_head->val = 0;
1142
	} else {
1143
		rmap_printk("pte_list_remove:  %p many->many\n", spte);
1144
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1145 1146
		prev_desc = NULL;
		while (desc) {
1147
			for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
A
Avi Kivity 已提交
1148
				if (desc->sptes[i] == spte) {
1149 1150
					pte_list_desc_remove_entry(rmap_head,
							desc, i, prev_desc);
1151 1152
					return;
				}
1153
			}
1154 1155 1156
			prev_desc = desc;
			desc = desc->more;
		}
1157
		pr_err("pte_list_remove: %p many->many\n", spte);
1158 1159 1160 1161
		BUG();
	}
}

1162 1163
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
					   struct kvm_memory_slot *slot)
1164
{
1165
	unsigned long idx;
1166

1167
	idx = gfn_to_index(gfn, slot->base_gfn, level);
1168
	return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
1169 1170
}

1171 1172
static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
					 struct kvm_mmu_page *sp)
1173
{
1174
	struct kvm_memslots *slots;
1175 1176
	struct kvm_memory_slot *slot;

1177 1178
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
1179
	return __gfn_to_rmap(gfn, sp->role.level, slot);
1180 1181
}

1182 1183 1184 1185 1186 1187 1188 1189
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_memory_cache *cache;

	cache = &vcpu->arch.mmu_pte_list_desc_cache;
	return mmu_memory_cache_free_objects(cache);
}

1190 1191 1192
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
	struct kvm_mmu_page *sp;
1193
	struct kvm_rmap_head *rmap_head;
1194 1195 1196

	sp = page_header(__pa(spte));
	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1197 1198
	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
	return pte_list_add(vcpu, spte, rmap_head);
1199 1200 1201 1202 1203 1204
}

static void rmap_remove(struct kvm *kvm, u64 *spte)
{
	struct kvm_mmu_page *sp;
	gfn_t gfn;
1205
	struct kvm_rmap_head *rmap_head;
1206 1207 1208

	sp = page_header(__pa(spte));
	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1209 1210
	rmap_head = gfn_to_rmap(kvm, gfn, sp);
	pte_list_remove(spte, rmap_head);
1211 1212
}

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
/*
 * Used by the following functions to iterate through the sptes linked by a
 * rmap.  All fields are private and not assumed to be used outside.
 */
struct rmap_iterator {
	/* private fields */
	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
	int pos;			/* index of the sptep */
};

/*
 * Iteration must be started by this function.  This should also be used after
 * removing/dropping sptes from the rmap link because in such cases the
 * information in the itererator may not be valid.
 *
 * Returns sptep if found, NULL otherwise.
 */
1230 1231
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
			   struct rmap_iterator *iter)
1232
{
1233 1234
	u64 *sptep;

1235
	if (!rmap_head->val)
1236 1237
		return NULL;

1238
	if (!(rmap_head->val & 1)) {
1239
		iter->desc = NULL;
1240 1241
		sptep = (u64 *)rmap_head->val;
		goto out;
1242 1243
	}

1244
	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1245
	iter->pos = 0;
1246 1247 1248 1249
	sptep = iter->desc->sptes[iter->pos];
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1250 1251 1252 1253 1254 1255 1256 1257 1258
}

/*
 * Must be used with a valid iterator: e.g. after rmap_get_first().
 *
 * Returns sptep if found, NULL otherwise.
 */
static u64 *rmap_get_next(struct rmap_iterator *iter)
{
1259 1260
	u64 *sptep;

1261 1262 1263 1264 1265
	if (iter->desc) {
		if (iter->pos < PTE_LIST_EXT - 1) {
			++iter->pos;
			sptep = iter->desc->sptes[iter->pos];
			if (sptep)
1266
				goto out;
1267 1268 1269 1270 1271 1272 1273
		}

		iter->desc = iter->desc->more;

		if (iter->desc) {
			iter->pos = 0;
			/* desc->sptes[0] cannot be NULL */
1274 1275
			sptep = iter->desc->sptes[iter->pos];
			goto out;
1276 1277 1278 1279
		}
	}

	return NULL;
1280 1281 1282
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1283 1284
}

1285 1286
#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1287
	     _spte_; _spte_ = rmap_get_next(_iter_))
1288

1289
static void drop_spte(struct kvm *kvm, u64 *sptep)
1290
{
1291
	if (mmu_spte_clear_track_bits(sptep))
1292
		rmap_remove(kvm, sptep);
A
Avi Kivity 已提交
1293 1294
}

1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315

static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
{
	if (is_large_pte(*sptep)) {
		WARN_ON(page_header(__pa(sptep))->role.level ==
			PT_PAGE_TABLE_LEVEL);
		drop_spte(kvm, sptep);
		--kvm->stat.lpages;
		return true;
	}

	return false;
}

static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
	if (__drop_large_spte(vcpu->kvm, sptep))
		kvm_flush_remote_tlbs(vcpu->kvm);
}

/*
1316
 * Write-protect on the specified @sptep, @pt_protect indicates whether
1317
 * spte write-protection is caused by protecting shadow page table.
1318
 *
T
Tiejun Chen 已提交
1319
 * Note: write protection is difference between dirty logging and spte
1320 1321 1322 1323 1324
 * protection:
 * - for dirty logging, the spte can be set to writable at anytime if
 *   its dirty bitmap is properly set.
 * - for spte protection, the spte can be writable only after unsync-ing
 *   shadow page.
1325
 *
1326
 * Return true if tlb need be flushed.
1327
 */
1328
static bool spte_write_protect(u64 *sptep, bool pt_protect)
1329 1330 1331
{
	u64 spte = *sptep;

1332
	if (!is_writable_pte(spte) &&
1333
	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1334 1335 1336 1337
		return false;

	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);

1338 1339
	if (pt_protect)
		spte &= ~SPTE_MMU_WRITEABLE;
1340
	spte = spte & ~PT_WRITABLE_MASK;
1341

1342
	return mmu_spte_update(sptep, spte);
1343 1344
}

1345 1346
static bool __rmap_write_protect(struct kvm *kvm,
				 struct kvm_rmap_head *rmap_head,
1347
				 bool pt_protect)
1348
{
1349 1350
	u64 *sptep;
	struct rmap_iterator iter;
1351
	bool flush = false;
1352

1353
	for_each_rmap_spte(rmap_head, &iter, sptep)
1354
		flush |= spte_write_protect(sptep, pt_protect);
1355

1356
	return flush;
1357 1358
}

1359
static bool spte_clear_dirty(u64 *sptep)
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
{
	u64 spte = *sptep;

	rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);

	spte &= ~shadow_dirty_mask;

	return mmu_spte_update(sptep, spte);
}

1370
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1371 1372 1373 1374 1375
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1376
	for_each_rmap_spte(rmap_head, &iter, sptep)
1377
		flush |= spte_clear_dirty(sptep);
1378 1379 1380 1381

	return flush;
}

1382
static bool spte_set_dirty(u64 *sptep)
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
{
	u64 spte = *sptep;

	rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);

	spte |= shadow_dirty_mask;

	return mmu_spte_update(sptep, spte);
}

1393
static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1394 1395 1396 1397 1398
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1399
	for_each_rmap_spte(rmap_head, &iter, sptep)
1400
		flush |= spte_set_dirty(sptep);
1401 1402 1403 1404

	return flush;
}

1405
/**
1406
 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1407 1408 1409 1410 1411 1412 1413 1414
 * @kvm: kvm instance
 * @slot: slot to protect
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should protect
 *
 * Used when we do not need to care about huge page mappings: e.g. during dirty
 * logging we do not have any such mappings.
 */
1415
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1416 1417
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
1418
{
1419
	struct kvm_rmap_head *rmap_head;
1420

1421
	while (mask) {
1422 1423 1424
		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					  PT_PAGE_TABLE_LEVEL, slot);
		__rmap_write_protect(kvm, rmap_head, false);
M
Marcelo Tosatti 已提交
1425

1426 1427 1428
		/* clear the first set bit */
		mask &= mask - 1;
	}
1429 1430
}

1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
/**
 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages
 * @kvm: kvm instance
 * @slot: slot to clear D-bit
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should clear D-bit
 *
 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
 */
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
{
1444
	struct kvm_rmap_head *rmap_head;
1445 1446

	while (mask) {
1447 1448 1449
		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					  PT_PAGE_TABLE_LEVEL, slot);
		__rmap_clear_dirty(kvm, rmap_head);
1450 1451 1452 1453 1454 1455 1456

		/* clear the first set bit */
		mask &= mask - 1;
	}
}
EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);

1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
/**
 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
 * PT level pages.
 *
 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
 * enable dirty logging for them.
 *
 * Used when we do not need to care about huge page mappings: e.g. during dirty
 * logging we do not have any such mappings.
 */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
				struct kvm_memory_slot *slot,
				gfn_t gfn_offset, unsigned long mask)
{
1471 1472 1473 1474 1475
	if (kvm_x86_ops->enable_log_dirty_pt_masked)
		kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
				mask);
	else
		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1476 1477
}

1478 1479
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
				    struct kvm_memory_slot *slot, u64 gfn)
1480
{
1481
	struct kvm_rmap_head *rmap_head;
1482
	int i;
1483
	bool write_protected = false;
1484

1485
	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
1486
		rmap_head = __gfn_to_rmap(gfn, i, slot);
1487
		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1488 1489 1490
	}

	return write_protected;
1491 1492
}

1493 1494 1495 1496 1497 1498 1499 1500
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{
	struct kvm_memory_slot *slot;

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
}

1501
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1502
{
1503 1504
	u64 *sptep;
	struct rmap_iterator iter;
1505
	bool flush = false;
1506

1507
	while ((sptep = rmap_get_first(rmap_head, &iter))) {
1508
		rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
1509 1510

		drop_spte(kvm, sptep);
1511
		flush = true;
1512
	}
1513

1514 1515 1516
	return flush;
}

1517
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1518 1519 1520
			   struct kvm_memory_slot *slot, gfn_t gfn, int level,
			   unsigned long data)
{
1521
	return kvm_zap_rmapp(kvm, rmap_head);
1522 1523
}

1524
static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1525 1526
			     struct kvm_memory_slot *slot, gfn_t gfn, int level,
			     unsigned long data)
1527
{
1528 1529
	u64 *sptep;
	struct rmap_iterator iter;
1530
	int need_flush = 0;
1531
	u64 new_spte;
1532
	pte_t *ptep = (pte_t *)data;
D
Dan Williams 已提交
1533
	kvm_pfn_t new_pfn;
1534 1535 1536

	WARN_ON(pte_huge(*ptep));
	new_pfn = pte_pfn(*ptep);
1537

1538
restart:
1539
	for_each_rmap_spte(rmap_head, &iter, sptep) {
1540
		rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1541
			    sptep, *sptep, gfn, level);
1542

1543
		need_flush = 1;
1544

1545
		if (pte_write(*ptep)) {
1546
			drop_spte(kvm, sptep);
1547
			goto restart;
1548
		} else {
1549
			new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
1550 1551 1552 1553
			new_spte |= (u64)new_pfn << PAGE_SHIFT;

			new_spte &= ~PT_WRITABLE_MASK;
			new_spte &= ~SPTE_HOST_WRITEABLE;
1554 1555

			new_spte = mark_spte_for_access_track(new_spte);
1556 1557 1558

			mmu_spte_clear_track_bits(sptep);
			mmu_spte_set(sptep, new_spte);
1559 1560
		}
	}
1561

1562 1563 1564 1565 1566 1567
	if (need_flush)
		kvm_flush_remote_tlbs(kvm);

	return 0;
}

1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
struct slot_rmap_walk_iterator {
	/* input fields. */
	struct kvm_memory_slot *slot;
	gfn_t start_gfn;
	gfn_t end_gfn;
	int start_level;
	int end_level;

	/* output fields. */
	gfn_t gfn;
1578
	struct kvm_rmap_head *rmap;
1579 1580 1581
	int level;

	/* private field. */
1582
	struct kvm_rmap_head *end_rmap;
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
};

static void
rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
{
	iterator->level = level;
	iterator->gfn = iterator->start_gfn;
	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
					   iterator->slot);
}

static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
		    struct kvm_memory_slot *slot, int start_level,
		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
{
	iterator->slot = slot;
	iterator->start_level = start_level;
	iterator->end_level = end_level;
	iterator->start_gfn = start_gfn;
	iterator->end_gfn = end_gfn;

	rmap_walk_init_level(iterator, iterator->start_level);
}

static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
{
	return !!iterator->rmap;
}

static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{
	if (++iterator->rmap <= iterator->end_rmap) {
		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
		return;
	}

	if (++iterator->level > iterator->end_level) {
		iterator->rmap = NULL;
		return;
	}

	rmap_walk_init_level(iterator, iterator->level);
}

#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
	   _start_gfn, _end_gfn, _iter_)				\
	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
				 _end_level_, _start_gfn, _end_gfn);	\
	     slot_rmap_walk_okay(_iter_);				\
	     slot_rmap_walk_next(_iter_))

1636 1637 1638 1639 1640
static int kvm_handle_hva_range(struct kvm *kvm,
				unsigned long start,
				unsigned long end,
				unsigned long data,
				int (*handler)(struct kvm *kvm,
1641
					       struct kvm_rmap_head *rmap_head,
1642
					       struct kvm_memory_slot *slot,
1643 1644
					       gfn_t gfn,
					       int level,
1645
					       unsigned long data))
1646
{
1647
	struct kvm_memslots *slots;
1648
	struct kvm_memory_slot *memslot;
1649 1650
	struct slot_rmap_walk_iterator iterator;
	int ret = 0;
1651
	int i;
1652

1653 1654 1655 1656 1657
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
		kvm_for_each_memslot(memslot, slots) {
			unsigned long hva_start, hva_end;
			gfn_t gfn_start, gfn_end;
1658

1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
			hva_start = max(start, memslot->userspace_addr);
			hva_end = min(end, memslot->userspace_addr +
				      (memslot->npages << PAGE_SHIFT));
			if (hva_start >= hva_end)
				continue;
			/*
			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
			 */
			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);

			for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
						 PT_MAX_HUGEPAGE_LEVEL,
						 gfn_start, gfn_end - 1,
						 &iterator)
				ret |= handler(kvm, iterator.rmap, memslot,
					       iterator.gfn, iterator.level, data);
		}
1678 1679
	}

1680
	return ret;
1681 1682
}

1683 1684
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
			  unsigned long data,
1685 1686
			  int (*handler)(struct kvm *kvm,
					 struct kvm_rmap_head *rmap_head,
1687
					 struct kvm_memory_slot *slot,
1688
					 gfn_t gfn, int level,
1689 1690 1691
					 unsigned long data))
{
	return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1692 1693 1694 1695
}

int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
1696 1697 1698
	return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
}

1699 1700 1701 1702 1703
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
	return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}

1704 1705
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
F
Frederik Deweerdt 已提交
1706
	kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1707 1708
}

1709
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1710 1711
			 struct kvm_memory_slot *slot, gfn_t gfn, int level,
			 unsigned long data)
1712
{
1713
	u64 *sptep;
1714
	struct rmap_iterator uninitialized_var(iter);
1715 1716
	int young = 0;

1717 1718
	for_each_rmap_spte(rmap_head, &iter, sptep)
		young |= mmu_spte_age(sptep);
1719

1720
	trace_kvm_age_page(gfn, level, slot, young);
1721 1722 1723
	return young;
}

1724
static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1725 1726
			      struct kvm_memory_slot *slot, gfn_t gfn,
			      int level, unsigned long data)
A
Andrea Arcangeli 已提交
1727
{
1728 1729
	u64 *sptep;
	struct rmap_iterator iter;
A
Andrea Arcangeli 已提交
1730 1731

	/*
1732 1733 1734
	 * If there's no access bit in the secondary pte set by the hardware and
	 * fast access tracking is also not enabled, it's up to gup-fast/gup to
	 * set the access bit in the primary pte or in the page structure.
A
Andrea Arcangeli 已提交
1735
	 */
1736
	if (!shadow_accessed_mask && !shadow_acc_track_mask)
A
Andrea Arcangeli 已提交
1737 1738
		goto out;

1739 1740 1741
	for_each_rmap_spte(rmap_head, &iter, sptep)
		if (is_accessed_spte(*sptep))
			return 1;
A
Andrea Arcangeli 已提交
1742
out:
1743
	return 0;
A
Andrea Arcangeli 已提交
1744 1745
}

1746 1747
#define RMAP_RECYCLE_THRESHOLD 1000

1748
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1749
{
1750
	struct kvm_rmap_head *rmap_head;
1751 1752 1753
	struct kvm_mmu_page *sp;

	sp = page_header(__pa(spte));
1754

1755
	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1756

1757
	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
1758 1759 1760
	kvm_flush_remote_tlbs(vcpu->kvm);
}

A
Andres Lagar-Cavilla 已提交
1761
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1762
{
A
Andres Lagar-Cavilla 已提交
1763 1764 1765 1766 1767 1768 1769 1770
	/*
	 * In case of absence of EPT Access and Dirty Bits supports,
	 * emulate the accessed bit for EPT, by checking if this page has
	 * an EPT mapping, and clearing it if it does. On the next access,
	 * a new EPT mapping will be established.
	 * This has some overhead, but not as much as the cost of swapping
	 * out actively used pages or breaking up actively used hugepages.
	 */
1771
	if (!shadow_accessed_mask && !shadow_acc_track_mask)
A
Andres Lagar-Cavilla 已提交
1772 1773 1774 1775
		return kvm_handle_hva_range(kvm, start, end, 0,
					    kvm_unmap_rmapp);

	return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1776 1777
}

A
Andrea Arcangeli 已提交
1778 1779 1780 1781 1782
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
	return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
}

1783
#ifdef MMU_DEBUG
1784
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
1785
{
1786 1787 1788
	u64 *pos;
	u64 *end;

1789
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1790
		if (is_shadow_present_pte(*pos)) {
1791
			printk(KERN_ERR "%s: %p %llx\n", __func__,
1792
			       pos, *pos);
A
Avi Kivity 已提交
1793
			return 0;
1794
		}
A
Avi Kivity 已提交
1795 1796
	return 1;
}
1797
#endif
A
Avi Kivity 已提交
1798

1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
/*
 * This value is the sum of all of the kvm instances's
 * kvm->arch.n_used_mmu_pages values.  We need a global,
 * aggregate version in order to make the slab shrinker
 * faster
 */
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
{
	kvm->arch.n_used_mmu_pages += nr;
	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}

1811
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1812
{
1813
	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1814
	hlist_del(&sp->hash_link);
1815 1816
	list_del(&sp->link);
	free_page((unsigned long)sp->spt);
1817 1818
	if (!sp->role.direct)
		free_page((unsigned long)sp->gfns);
1819
	kmem_cache_free(mmu_page_header_cache, sp);
1820 1821
}

1822 1823
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
1824
	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1825 1826
}

1827
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1828
				    struct kvm_mmu_page *sp, u64 *parent_pte)
1829 1830 1831 1832
{
	if (!parent_pte)
		return;

1833
	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1834 1835
}

1836
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1837 1838
				       u64 *parent_pte)
{
1839
	pte_list_remove(parent_pte, &sp->parent_ptes);
1840 1841
}

1842 1843 1844 1845
static void drop_parent_pte(struct kvm_mmu_page *sp,
			    u64 *parent_pte)
{
	mmu_page_remove_parent_pte(sp, parent_pte);
1846
	mmu_spte_clear_no_track(parent_pte);
1847 1848
}

1849
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
M
Marcelo Tosatti 已提交
1850
{
1851
	struct kvm_mmu_page *sp;
1852

1853 1854
	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1855
	if (!direct)
1856
		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1857
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1858 1859 1860 1861 1862 1863

	/*
	 * The active_mmu_pages list is the FIFO list, do not move the
	 * page until it is zapped. kvm_zap_obsolete_pages depends on
	 * this feature. See the comments in kvm_zap_obsolete_pages().
	 */
1864 1865 1866
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
	return sp;
M
Marcelo Tosatti 已提交
1867 1868
}

1869
static void mark_unsync(u64 *spte);
1870
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1871
{
1872 1873 1874 1875 1876 1877
	u64 *sptep;
	struct rmap_iterator iter;

	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
		mark_unsync(sptep);
	}
1878 1879
}

1880
static void mark_unsync(u64 *spte)
1881
{
1882
	struct kvm_mmu_page *sp;
1883
	unsigned int index;
1884

1885
	sp = page_header(__pa(spte));
1886 1887
	index = spte - sp->spt;
	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1888
		return;
1889
	if (sp->unsync_children++)
1890
		return;
1891
	kvm_mmu_mark_parents_unsync(sp);
1892 1893
}

1894
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1895
			       struct kvm_mmu_page *sp)
1896
{
1897
	return 0;
1898 1899
}

M
Marcelo Tosatti 已提交
1900 1901 1902 1903
static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
}

1904 1905
static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
				 struct kvm_mmu_page *sp, u64 *spte,
1906
				 const void *pte)
1907 1908 1909 1910
{
	WARN_ON(1);
}

1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
#define KVM_PAGE_ARRAY_NR 16

struct kvm_mmu_pages {
	struct mmu_page_and_offset {
		struct kvm_mmu_page *sp;
		unsigned int idx;
	} page[KVM_PAGE_ARRAY_NR];
	unsigned int nr;
};

1921 1922
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
			 int idx)
1923
{
1924
	int i;
1925

1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
	if (sp->unsync)
		for (i=0; i < pvec->nr; i++)
			if (pvec->page[i].sp == sp)
				return 0;

	pvec->page[pvec->nr].sp = sp;
	pvec->page[pvec->nr].idx = idx;
	pvec->nr++;
	return (pvec->nr == KVM_PAGE_ARRAY_NR);
}

1937 1938 1939 1940 1941 1942 1943
static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{
	--sp->unsync_children;
	WARN_ON((int)sp->unsync_children < 0);
	__clear_bit(idx, sp->unsync_child_bitmap);
}

1944 1945 1946 1947
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	int i, ret, nr_unsync_leaf = 0;
1948

1949
	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1950
		struct kvm_mmu_page *child;
1951 1952
		u64 ent = sp->spt[i];

1953 1954 1955 1956
		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
			clear_unsync_child_bit(sp, i);
			continue;
		}
1957 1958 1959 1960 1961 1962 1963 1964

		child = page_header(ent & PT64_BASE_ADDR_MASK);

		if (child->unsync_children) {
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;

			ret = __mmu_unsync_walk(child, pvec);
1965 1966 1967 1968
			if (!ret) {
				clear_unsync_child_bit(sp, i);
				continue;
			} else if (ret > 0) {
1969
				nr_unsync_leaf += ret;
1970
			} else
1971 1972 1973 1974 1975 1976
				return ret;
		} else if (child->unsync) {
			nr_unsync_leaf++;
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;
		} else
1977
			clear_unsync_child_bit(sp, i);
1978 1979
	}

1980 1981 1982
	return nr_unsync_leaf;
}

1983 1984
#define INVALID_INDEX (-1)

1985 1986 1987
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
P
Paolo Bonzini 已提交
1988
	pvec->nr = 0;
1989 1990 1991
	if (!sp->unsync_children)
		return 0;

1992
	mmu_pages_add(pvec, sp, INVALID_INDEX);
1993
	return __mmu_unsync_walk(sp, pvec);
1994 1995 1996 1997 1998
}

static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	WARN_ON(!sp->unsync);
1999
	trace_kvm_mmu_sync_page(sp);
2000 2001 2002 2003
	sp->unsync = 0;
	--kvm->stat.mmu_unsync;
}

2004 2005 2006 2007
static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				    struct list_head *invalid_list);
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list);
2008

2009 2010 2011 2012 2013 2014
/*
 * NOTE: we should pay more attention on the zapped-obsolete page
 * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
 * since it has been deleted from active_mmu_pages but still can be found
 * at hast list.
 *
2015
 * for_each_valid_sp() has skipped that kind of pages.
2016
 */
2017
#define for_each_valid_sp(_kvm, _sp, _gfn)				\
2018 2019
	hlist_for_each_entry(_sp,					\
	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
2020 2021
		if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) {    \
		} else
2022 2023

#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
2024 2025
	for_each_valid_sp(_kvm, _sp, _gfn)				\
		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
2026

2027
/* @sp->gfn should be write-protected at the call site */
2028 2029
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			    struct list_head *invalid_list)
2030
{
2031
	if (sp->role.cr4_pae != !!is_pae(vcpu)) {
2032
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2033
		return false;
2034 2035
	}

2036
	if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
2037
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2038
		return false;
2039 2040
	}

2041
	return true;
2042 2043
}

2044 2045 2046
static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
				 struct list_head *invalid_list,
				 bool remote_flush, bool local_flush)
2047
{
2048 2049 2050 2051
	if (!list_empty(invalid_list)) {
		kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
		return;
	}
2052

2053 2054 2055 2056
	if (remote_flush)
		kvm_flush_remote_tlbs(vcpu->kvm);
	else if (local_flush)
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2057 2058
}

2059 2060 2061 2062 2063 2064 2065
#ifdef CONFIG_KVM_MMU_AUDIT
#include "mmu_audit.c"
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
#endif

2066 2067 2068 2069 2070
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
}

2071
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2072
			 struct list_head *invalid_list)
2073
{
2074 2075
	kvm_unlink_unsync_page(vcpu->kvm, sp);
	return __kvm_sync_page(vcpu, sp, invalid_list);
2076 2077
}

2078
/* @gfn should be write-protected at the call site */
2079 2080
static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
			   struct list_head *invalid_list)
2081 2082
{
	struct kvm_mmu_page *s;
2083
	bool ret = false;
2084

2085
	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2086
		if (!s->unsync)
2087 2088 2089
			continue;

		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
2090
		ret |= kvm_sync_page(vcpu, s, invalid_list);
2091 2092
	}

2093
	return ret;
2094 2095
}

2096
struct mmu_page_path {
P
Paolo Bonzini 已提交
2097 2098
	struct kvm_mmu_page *parent[PT64_ROOT_LEVEL];
	unsigned int idx[PT64_ROOT_LEVEL];
2099 2100
};

2101
#define for_each_sp(pvec, sp, parents, i)			\
P
Paolo Bonzini 已提交
2102
		for (i = mmu_pages_first(&pvec, &parents);	\
2103 2104 2105
			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
			i = mmu_pages_next(&pvec, &parents, i))

2106 2107 2108
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
			  struct mmu_page_path *parents,
			  int i)
2109 2110 2111 2112 2113
{
	int n;

	for (n = i+1; n < pvec->nr; n++) {
		struct kvm_mmu_page *sp = pvec->page[n].sp;
P
Paolo Bonzini 已提交
2114 2115
		unsigned idx = pvec->page[n].idx;
		int level = sp->role.level;
2116

P
Paolo Bonzini 已提交
2117 2118 2119
		parents->idx[level-1] = idx;
		if (level == PT_PAGE_TABLE_LEVEL)
			break;
2120

P
Paolo Bonzini 已提交
2121
		parents->parent[level-2] = sp;
2122 2123 2124 2125 2126
	}

	return n;
}

P
Paolo Bonzini 已提交
2127 2128 2129 2130 2131 2132 2133 2134 2135
static int mmu_pages_first(struct kvm_mmu_pages *pvec,
			   struct mmu_page_path *parents)
{
	struct kvm_mmu_page *sp;
	int level;

	if (pvec->nr == 0)
		return 0;

2136 2137
	WARN_ON(pvec->page[0].idx != INVALID_INDEX);

P
Paolo Bonzini 已提交
2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150
	sp = pvec->page[0].sp;
	level = sp->role.level;
	WARN_ON(level == PT_PAGE_TABLE_LEVEL);

	parents->parent[level-2] = sp;

	/* Also set up a sentinel.  Further entries in pvec are all
	 * children of sp, so this element is never overwritten.
	 */
	parents->parent[level-1] = NULL;
	return mmu_pages_next(pvec, parents, 0);
}

2151
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2152
{
2153 2154 2155 2156 2157 2158 2159 2160 2161
	struct kvm_mmu_page *sp;
	unsigned int level = 0;

	do {
		unsigned int idx = parents->idx[level];
		sp = parents->parent[level];
		if (!sp)
			return;

2162
		WARN_ON(idx == INVALID_INDEX);
2163
		clear_unsync_child_bit(sp, idx);
2164
		level++;
P
Paolo Bonzini 已提交
2165
	} while (!sp->unsync_children);
2166
}
2167

2168 2169 2170 2171 2172 2173 2174
static void mmu_sync_children(struct kvm_vcpu *vcpu,
			      struct kvm_mmu_page *parent)
{
	int i;
	struct kvm_mmu_page *sp;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2175
	LIST_HEAD(invalid_list);
2176
	bool flush = false;
2177 2178

	while (mmu_unsync_walk(parent, &pages)) {
2179
		bool protected = false;
2180 2181

		for_each_sp(pages, sp, parents, i)
2182
			protected |= rmap_write_protect(vcpu, sp->gfn);
2183

2184
		if (protected) {
2185
			kvm_flush_remote_tlbs(vcpu->kvm);
2186 2187
			flush = false;
		}
2188

2189
		for_each_sp(pages, sp, parents, i) {
2190
			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2191 2192
			mmu_pages_clear_parents(&parents);
		}
2193 2194 2195 2196 2197
		if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
			cond_resched_lock(&vcpu->kvm->mmu_lock);
			flush = false;
		}
2198
	}
2199 2200

	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2201 2202
}

2203 2204
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
2205
	atomic_set(&sp->write_flooding_count,  0);
2206 2207 2208 2209 2210 2211 2212 2213 2214
}

static void clear_sp_write_flooding_count(u64 *spte)
{
	struct kvm_mmu_page *sp =  page_header(__pa(spte));

	__clear_sp_write_flooding_count(sp);
}

2215 2216 2217 2218
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
2219
					     int direct,
2220
					     unsigned access)
2221 2222 2223
{
	union kvm_mmu_page_role role;
	unsigned quadrant;
2224 2225
	struct kvm_mmu_page *sp;
	bool need_sync = false;
2226
	bool flush = false;
2227
	int collisions = 0;
2228
	LIST_HEAD(invalid_list);
2229

2230
	role = vcpu->arch.mmu.base_role;
2231
	role.level = level;
2232
	role.direct = direct;
2233
	if (role.direct)
2234
		role.cr4_pae = 0;
2235
	role.access = access;
2236 2237
	if (!vcpu->arch.mmu.direct_map
	    && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
2238 2239 2240 2241
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
2242 2243 2244 2245 2246 2247
	for_each_valid_sp(vcpu->kvm, sp, gfn) {
		if (sp->gfn != gfn) {
			collisions++;
			continue;
		}

2248 2249
		if (!need_sync && sp->unsync)
			need_sync = true;
2250

2251 2252
		if (sp->role.word != role.word)
			continue;
2253

2254 2255 2256 2257 2258 2259 2260 2261 2262 2263
		if (sp->unsync) {
			/* The page is good, but __kvm_sync_page might still end
			 * up zapping it.  If so, break in order to rebuild it.
			 */
			if (!__kvm_sync_page(vcpu, sp, &invalid_list))
				break;

			WARN_ON(!list_empty(&invalid_list));
			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
		}
2264

2265
		if (sp->unsync_children)
2266
			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2267

2268
		__clear_sp_write_flooding_count(sp);
2269
		trace_kvm_mmu_get_page(sp, false);
2270
		goto out;
2271
	}
2272

A
Avi Kivity 已提交
2273
	++vcpu->kvm->stat.mmu_cache_miss;
2274 2275 2276

	sp = kvm_mmu_alloc_page(vcpu, direct);

2277 2278
	sp->gfn = gfn;
	sp->role = role;
2279 2280
	hlist_add_head(&sp->hash_link,
		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
2281
	if (!direct) {
2282 2283 2284 2285 2286 2287 2288 2289
		/*
		 * we should do write protection before syncing pages
		 * otherwise the content of the synced shadow page may
		 * be inconsistent with guest page table.
		 */
		account_shadowed(vcpu->kvm, sp);
		if (level == PT_PAGE_TABLE_LEVEL &&
		      rmap_write_protect(vcpu, gfn))
2290
			kvm_flush_remote_tlbs(vcpu->kvm);
2291 2292

		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
2293
			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2294
	}
2295
	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
2296
	clear_page(sp->spt);
A
Avi Kivity 已提交
2297
	trace_kvm_mmu_get_page(sp, true);
2298 2299

	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2300 2301 2302
out:
	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2303
	return sp;
2304 2305
}

2306 2307 2308 2309 2310 2311
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
			     struct kvm_vcpu *vcpu, u64 addr)
{
	iterator->addr = addr;
	iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
	iterator->level = vcpu->arch.mmu.shadow_root_level;
2312 2313 2314 2315 2316 2317

	if (iterator->level == PT64_ROOT_LEVEL &&
	    vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
	    !vcpu->arch.mmu.direct_map)
		--iterator->level;

2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
	if (iterator->level == PT32E_ROOT_LEVEL) {
		iterator->shadow_addr
			= vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
		--iterator->level;
		if (!iterator->shadow_addr)
			iterator->level = 0;
	}
}

static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
	if (iterator->level < PT_PAGE_TABLE_LEVEL)
		return false;
2332

2333 2334 2335 2336 2337
	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
	return true;
}

2338 2339
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
			       u64 spte)
2340
{
2341
	if (is_last_spte(spte, iterator->level)) {
2342 2343 2344 2345
		iterator->level = 0;
		return;
	}

2346
	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2347 2348 2349
	--iterator->level;
}

2350 2351 2352 2353 2354
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
	return __shadow_walk_next(iterator, *iterator->sptep);
}

2355 2356
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
			     struct kvm_mmu_page *sp)
2357 2358 2359
{
	u64 spte;

2360
	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2361

2362
	spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
2363
	       shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
X
Xiao Guangrong 已提交
2364

2365
	mmu_spte_set(sptep, spte);
2366 2367 2368 2369 2370

	mmu_page_add_parent_pte(vcpu, sp, sptep);

	if (sp->unsync_children || sp->unsync)
		mark_unsync(sptep);
2371 2372
}

2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
				   unsigned direct_access)
{
	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
		struct kvm_mmu_page *child;

		/*
		 * For the direct sp, if the guest pte's dirty bit
		 * changed form clean to dirty, it will corrupt the
		 * sp's access: allow writable in the read-only sp,
		 * so we should update the spte at this point to get
		 * a new sp with the correct access.
		 */
		child = page_header(*sptep & PT64_BASE_ADDR_MASK);
		if (child->role.access == direct_access)
			return;

2390
		drop_parent_pte(child, sptep);
2391 2392 2393 2394
		kvm_flush_remote_tlbs(vcpu->kvm);
	}
}

X
Xiao Guangrong 已提交
2395
static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2396 2397 2398 2399 2400 2401 2402
			     u64 *spte)
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
	if (is_shadow_present_pte(pte)) {
X
Xiao Guangrong 已提交
2403
		if (is_last_spte(pte, sp->role.level)) {
2404
			drop_spte(kvm, spte);
X
Xiao Guangrong 已提交
2405 2406 2407
			if (is_large_pte(pte))
				--kvm->stat.lpages;
		} else {
2408
			child = page_header(pte & PT64_BASE_ADDR_MASK);
2409
			drop_parent_pte(child, spte);
2410
		}
X
Xiao Guangrong 已提交
2411 2412 2413 2414
		return true;
	}

	if (is_mmio_spte(pte))
2415
		mmu_spte_clear_no_track(spte);
2416

X
Xiao Guangrong 已提交
2417
	return false;
2418 2419
}

2420
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
2421
					 struct kvm_mmu_page *sp)
2422
{
2423 2424
	unsigned i;

2425 2426
	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		mmu_page_zap_pte(kvm, sp, sp->spt + i);
2427 2428
}

2429
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2430
{
2431 2432
	u64 *sptep;
	struct rmap_iterator iter;
2433

2434
	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2435
		drop_parent_pte(sp, sptep);
2436 2437
}

2438
static int mmu_zap_unsync_children(struct kvm *kvm,
2439 2440
				   struct kvm_mmu_page *parent,
				   struct list_head *invalid_list)
2441
{
2442 2443 2444
	int i, zapped = 0;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2445

2446
	if (parent->role.level == PT_PAGE_TABLE_LEVEL)
2447
		return 0;
2448 2449 2450 2451 2452

	while (mmu_unsync_walk(parent, &pages)) {
		struct kvm_mmu_page *sp;

		for_each_sp(pages, sp, parents, i) {
2453
			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2454
			mmu_pages_clear_parents(&parents);
2455
			zapped++;
2456 2457 2458 2459
		}
	}

	return zapped;
2460 2461
}

2462 2463
static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				    struct list_head *invalid_list)
2464
{
2465
	int ret;
A
Avi Kivity 已提交
2466

2467
	trace_kvm_mmu_prepare_zap_page(sp);
2468
	++kvm->stat.mmu_shadow_zapped;
2469
	ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
2470
	kvm_mmu_page_unlink_children(kvm, sp);
2471
	kvm_mmu_unlink_parents(kvm, sp);
2472

2473
	if (!sp->role.invalid && !sp->role.direct)
2474
		unaccount_shadowed(kvm, sp);
2475

2476 2477
	if (sp->unsync)
		kvm_unlink_unsync_page(kvm, sp);
2478
	if (!sp->root_count) {
2479 2480
		/* Count self */
		ret++;
2481
		list_move(&sp->link, invalid_list);
2482
		kvm_mod_used_mmu_pages(kvm, -1);
2483
	} else {
A
Avi Kivity 已提交
2484
		list_move(&sp->link, &kvm->arch.active_mmu_pages);
2485 2486 2487 2488 2489 2490 2491

		/*
		 * The obsolete pages can not be used on any vcpus.
		 * See the comments in kvm_mmu_invalidate_zap_all_pages().
		 */
		if (!sp->role.invalid && !is_obsolete_sp(kvm, sp))
			kvm_reload_remote_mmus(kvm);
2492
	}
2493 2494

	sp->role.invalid = 1;
2495
	return ret;
2496 2497
}

2498 2499 2500
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list)
{
2501
	struct kvm_mmu_page *sp, *nsp;
2502 2503 2504 2505

	if (list_empty(invalid_list))
		return;

2506
	/*
2507 2508 2509 2510 2511 2512 2513
	 * We need to make sure everyone sees our modifications to
	 * the page tables and see changes to vcpu->mode here. The barrier
	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
	 *
	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
	 * guest mode and/or lockless shadow page table walks.
2514 2515
	 */
	kvm_flush_remote_tlbs(kvm);
2516

2517
	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2518
		WARN_ON(!sp->role.invalid || sp->root_count);
2519
		kvm_mmu_free_page(sp);
2520
	}
2521 2522
}

2523 2524 2525 2526 2527 2528 2529 2530
static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
					struct list_head *invalid_list)
{
	struct kvm_mmu_page *sp;

	if (list_empty(&kvm->arch.active_mmu_pages))
		return false;

G
Geliang Tang 已提交
2531 2532
	sp = list_last_entry(&kvm->arch.active_mmu_pages,
			     struct kvm_mmu_page, link);
2533 2534 2535 2536 2537
	kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);

	return true;
}

2538 2539
/*
 * Changing the number of mmu pages allocated to the vm
2540
 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2541
 */
2542
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
2543
{
2544
	LIST_HEAD(invalid_list);
2545

2546 2547
	spin_lock(&kvm->mmu_lock);

2548
	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2549 2550 2551 2552
		/* Need to free some mmu pages to achieve the goal. */
		while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
			if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
				break;
2553

2554
		kvm_mmu_commit_zap_page(kvm, &invalid_list);
2555
		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2556 2557
	}

2558
	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2559 2560

	spin_unlock(&kvm->mmu_lock);
2561 2562
}

2563
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2564
{
2565
	struct kvm_mmu_page *sp;
2566
	LIST_HEAD(invalid_list);
2567 2568
	int r;

2569
	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2570
	r = 0;
2571
	spin_lock(&kvm->mmu_lock);
2572
	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2573
		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2574 2575
			 sp->role.word);
		r = 1;
2576
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2577
	}
2578
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2579 2580
	spin_unlock(&kvm->mmu_lock);

2581
	return r;
2582
}
2583
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
2584

2585
static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2586 2587 2588 2589 2590 2591 2592 2593
{
	trace_kvm_mmu_unsync_page(sp);
	++vcpu->kvm->stat.mmu_unsync;
	sp->unsync = 1;

	kvm_mmu_mark_parents_unsync(sp);
}

2594 2595
static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
				   bool can_unsync)
2596
{
2597
	struct kvm_mmu_page *sp;
2598

2599 2600
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
		return true;
2601

2602
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2603
		if (!can_unsync)
2604
			return true;
2605

2606 2607
		if (sp->unsync)
			continue;
2608

2609 2610
		WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
		kvm_unsync_page(vcpu, sp);
2611
	}
2612 2613

	return false;
2614 2615
}

D
Dan Williams 已提交
2616
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
2617 2618 2619 2620 2621 2622 2623
{
	if (pfn_valid(pfn))
		return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));

	return true;
}

A
Avi Kivity 已提交
2624
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2625
		    unsigned pte_access, int level,
D
Dan Williams 已提交
2626
		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2627
		    bool can_unsync, bool host_writable)
2628
{
2629
	u64 spte = 0;
M
Marcelo Tosatti 已提交
2630
	int ret = 0;
S
Sheng Yang 已提交
2631

2632
	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
2633 2634
		return 0;

2635 2636 2637 2638 2639 2640
	/*
	 * For the EPT case, shadow_present_mask is 0 if hardware
	 * supports exec-only page table entries.  In that case,
	 * ACC_USER_MASK and shadow_user_mask are used to represent
	 * read access.  See FNAME(gpte_access) in paging_tmpl.h.
	 */
2641
	spte |= shadow_present_mask;
2642
	if (!speculative)
2643
		spte |= shadow_accessed_mask;
2644

S
Sheng Yang 已提交
2645 2646 2647 2648
	if (pte_access & ACC_EXEC_MASK)
		spte |= shadow_x_mask;
	else
		spte |= shadow_nx_mask;
2649

2650
	if (pte_access & ACC_USER_MASK)
S
Sheng Yang 已提交
2651
		spte |= shadow_user_mask;
2652

2653
	if (level > PT_PAGE_TABLE_LEVEL)
M
Marcelo Tosatti 已提交
2654
		spte |= PT_PAGE_SIZE_MASK;
2655
	if (tdp_enabled)
2656
		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2657
			kvm_is_mmio_pfn(pfn));
2658

2659
	if (host_writable)
2660
		spte |= SPTE_HOST_WRITEABLE;
2661 2662
	else
		pte_access &= ~ACC_WRITE_MASK;
2663

2664
	spte |= (u64)pfn << PAGE_SHIFT;
2665

2666
	if (pte_access & ACC_WRITE_MASK) {
2667

X
Xiao Guangrong 已提交
2668
		/*
2669 2670 2671 2672
		 * Other vcpu creates new sp in the window between
		 * mapping_level() and acquiring mmu-lock. We can
		 * allow guest to retry the access, the mapping can
		 * be fixed if guest refault.
X
Xiao Guangrong 已提交
2673
		 */
2674
		if (level > PT_PAGE_TABLE_LEVEL &&
2675
		    mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
A
Avi Kivity 已提交
2676
			goto done;
2677

2678
		spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
2679

2680 2681 2682 2683 2684 2685
		/*
		 * Optimization: for pte sync, if spte was writable the hash
		 * lookup is unnecessary (and expensive). Write protection
		 * is responsibility of mmu_get_page / kvm_sync_page.
		 * Same reasoning can be applied to dirty page accounting.
		 */
2686
		if (!can_unsync && is_writable_pte(*sptep))
2687 2688
			goto set_pte;

2689
		if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2690
			pgprintk("%s: found shadow page for %llx, marking ro\n",
2691
				 __func__, gfn);
M
Marcelo Tosatti 已提交
2692
			ret = 1;
2693
			pte_access &= ~ACC_WRITE_MASK;
2694
			spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
2695 2696 2697
		}
	}

2698
	if (pte_access & ACC_WRITE_MASK) {
2699
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
2700 2701
		spte |= shadow_dirty_mask;
	}
2702

2703 2704 2705
	if (speculative)
		spte = mark_spte_for_access_track(spte);

2706
set_pte:
2707
	if (mmu_spte_update(sptep, spte))
2708
		kvm_flush_remote_tlbs(vcpu->kvm);
A
Avi Kivity 已提交
2709
done:
M
Marcelo Tosatti 已提交
2710 2711 2712
	return ret;
}

2713
static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
D
Dan Williams 已提交
2714
			 int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
2715
			 bool speculative, bool host_writable)
M
Marcelo Tosatti 已提交
2716 2717
{
	int was_rmapped = 0;
2718
	int rmap_count;
2719
	bool emulate = false;
M
Marcelo Tosatti 已提交
2720

2721 2722
	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
		 *sptep, write_fault, gfn);
M
Marcelo Tosatti 已提交
2723

2724
	if (is_shadow_present_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2725 2726 2727 2728
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
2729 2730
		if (level > PT_PAGE_TABLE_LEVEL &&
		    !is_large_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2731
			struct kvm_mmu_page *child;
A
Avi Kivity 已提交
2732
			u64 pte = *sptep;
M
Marcelo Tosatti 已提交
2733 2734

			child = page_header(pte & PT64_BASE_ADDR_MASK);
2735
			drop_parent_pte(child, sptep);
2736
			kvm_flush_remote_tlbs(vcpu->kvm);
A
Avi Kivity 已提交
2737
		} else if (pfn != spte_to_pfn(*sptep)) {
2738
			pgprintk("hfn old %llx new %llx\n",
A
Avi Kivity 已提交
2739
				 spte_to_pfn(*sptep), pfn);
2740
			drop_spte(vcpu->kvm, sptep);
2741
			kvm_flush_remote_tlbs(vcpu->kvm);
2742 2743
		} else
			was_rmapped = 1;
M
Marcelo Tosatti 已提交
2744
	}
2745

2746 2747
	if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
	      true, host_writable)) {
M
Marcelo Tosatti 已提交
2748
		if (write_fault)
2749
			emulate = true;
2750
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2751
	}
M
Marcelo Tosatti 已提交
2752

2753 2754
	if (unlikely(is_mmio_spte(*sptep)))
		emulate = true;
2755

A
Avi Kivity 已提交
2756
	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2757
	pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
A
Avi Kivity 已提交
2758
		 is_large_pte(*sptep)? "2MB" : "4kB",
2759
		 *sptep & PT_WRITABLE_MASK ? "RW" : "R", gfn,
2760
		 *sptep, sptep);
A
Avi Kivity 已提交
2761
	if (!was_rmapped && is_large_pte(*sptep))
M
Marcelo Tosatti 已提交
2762 2763
		++vcpu->kvm->stat.lpages;

2764 2765 2766 2767 2768 2769
	if (is_shadow_present_pte(*sptep)) {
		if (!was_rmapped) {
			rmap_count = rmap_add(vcpu, sptep, gfn);
			if (rmap_count > RMAP_RECYCLE_THRESHOLD)
				rmap_recycle(vcpu, sptep, gfn);
		}
2770
	}
2771

X
Xiao Guangrong 已提交
2772
	kvm_release_pfn_clean(pfn);
2773 2774

	return emulate;
2775 2776
}

D
Dan Williams 已提交
2777
static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2778 2779 2780 2781
				     bool no_dirty_log)
{
	struct kvm_memory_slot *slot;

2782
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2783
	if (!slot)
2784
		return KVM_PFN_ERR_FAULT;
2785

2786
	return gfn_to_pfn_memslot_atomic(slot, gfn);
2787 2788 2789 2790 2791 2792 2793
}

static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp,
				    u64 *start, u64 *end)
{
	struct page *pages[PTE_PREFETCH_NUM];
2794
	struct kvm_memory_slot *slot;
2795 2796 2797 2798 2799
	unsigned access = sp->role.access;
	int i, ret;
	gfn_t gfn;

	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2800 2801
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
	if (!slot)
2802 2803
		return -1;

2804
	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2805 2806 2807 2808
	if (ret <= 0)
		return -1;

	for (i = 0; i < ret; i++, gfn++, start++)
2809 2810
		mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
			     page_to_pfn(pages[i]), true, true);
2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826

	return 0;
}

static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *sptep)
{
	u64 *spte, *start = NULL;
	int i;

	WARN_ON(!sp->role.direct);

	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
	spte = sp->spt + i;

	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2827
		if (is_shadow_present_pte(*spte) || spte == sptep) {
2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857
			if (!start)
				continue;
			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
				break;
			start = NULL;
		} else if (!start)
			start = spte;
	}
}

static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{
	struct kvm_mmu_page *sp;

	/*
	 * Since it's no accessed bit on EPT, it's no way to
	 * distinguish between actually accessed translations
	 * and prefetched, so disable pte prefetch if EPT is
	 * enabled.
	 */
	if (!shadow_accessed_mask)
		return;

	sp = page_header(__pa(sptep));
	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return;

	__direct_pte_prefetch(vcpu, sp, sptep);
}

2858
static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
D
Dan Williams 已提交
2859
			int level, gfn_t gfn, kvm_pfn_t pfn, bool prefault)
2860
{
2861
	struct kvm_shadow_walk_iterator iterator;
2862
	struct kvm_mmu_page *sp;
2863
	int emulate = 0;
2864
	gfn_t pseudo_gfn;
A
Avi Kivity 已提交
2865

2866 2867 2868
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return 0;

2869
	for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
2870
		if (iterator.level == level) {
2871 2872 2873
			emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
					       write, level, gfn, pfn, prefault,
					       map_writable);
2874
			direct_pte_prefetch(vcpu, iterator.sptep);
2875 2876
			++vcpu->stat.pf_fixed;
			break;
A
Avi Kivity 已提交
2877 2878
		}

2879
		drop_large_spte(vcpu, iterator.sptep);
2880
		if (!is_shadow_present_pte(*iterator.sptep)) {
2881 2882 2883 2884
			u64 base_addr = iterator.addr;

			base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
			pseudo_gfn = base_addr >> PAGE_SHIFT;
2885
			sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
2886
					      iterator.level - 1, 1, ACC_ALL);
2887

2888
			link_shadow_page(vcpu, iterator.sptep, sp);
2889 2890
		}
	}
2891
	return emulate;
A
Avi Kivity 已提交
2892 2893
}

H
Huang Ying 已提交
2894
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2895
{
H
Huang Ying 已提交
2896 2897 2898 2899 2900 2901 2902
	siginfo_t info;

	info.si_signo	= SIGBUS;
	info.si_errno	= 0;
	info.si_code	= BUS_MCEERR_AR;
	info.si_addr	= (void __user *)address;
	info.si_addr_lsb = PAGE_SHIFT;
2903

H
Huang Ying 已提交
2904
	send_sig_info(SIGBUS, &info, tsk);
2905 2906
}

D
Dan Williams 已提交
2907
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2908
{
X
Xiao Guangrong 已提交
2909 2910 2911 2912 2913 2914 2915 2916 2917
	/*
	 * Do not cache the mmio info caused by writing the readonly gfn
	 * into the spte otherwise read access on readonly gfn also can
	 * caused mmio page fault and treat it as mmio access.
	 * Return 1 to tell kvm to emulate it.
	 */
	if (pfn == KVM_PFN_ERR_RO_FAULT)
		return 1;

2918
	if (pfn == KVM_PFN_ERR_HWPOISON) {
2919
		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
2920
		return 0;
2921
	}
2922

2923
	return -EFAULT;
2924 2925
}

2926
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
D
Dan Williams 已提交
2927 2928
					gfn_t *gfnp, kvm_pfn_t *pfnp,
					int *levelp)
2929
{
D
Dan Williams 已提交
2930
	kvm_pfn_t pfn = *pfnp;
2931 2932 2933 2934 2935 2936 2937 2938 2939
	gfn_t gfn = *gfnp;
	int level = *levelp;

	/*
	 * Check if it's a transparent hugepage. If this would be an
	 * hugetlbfs page, level wouldn't be set to
	 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
	 * here.
	 */
2940
	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
2941
	    level == PT_PAGE_TABLE_LEVEL &&
2942
	    PageTransCompoundMap(pfn_to_page(pfn)) &&
2943
	    !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961
		unsigned long mask;
		/*
		 * mmu_notifier_retry was successful and we hold the
		 * mmu_lock here, so the pmd can't become splitting
		 * from under us, and in turn
		 * __split_huge_page_refcount() can't run from under
		 * us and we can safely transfer the refcount from
		 * PG_tail to PG_head as we switch the pfn to tail to
		 * head.
		 */
		*levelp = level = PT_DIRECTORY_LEVEL;
		mask = KVM_PAGES_PER_HPAGE(level) - 1;
		VM_BUG_ON((gfn & mask) != (pfn & mask));
		if (pfn & mask) {
			gfn &= ~mask;
			*gfnp = gfn;
			kvm_release_pfn_clean(pfn);
			pfn &= ~mask;
2962
			kvm_get_pfn(pfn);
2963 2964 2965 2966 2967
			*pfnp = pfn;
		}
	}
}

2968
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
D
Dan Williams 已提交
2969
				kvm_pfn_t pfn, unsigned access, int *ret_val)
2970 2971
{
	/* The pfn is invalid, report the error! */
2972
	if (unlikely(is_error_pfn(pfn))) {
2973
		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2974
		return true;
2975 2976
	}

2977
	if (unlikely(is_noslot_pfn(pfn)))
2978 2979
		vcpu_cache_mmio_info(vcpu, gva, gfn, access);

2980
	return false;
2981 2982
}

2983
static bool page_fault_can_be_fast(u32 error_code)
2984
{
2985 2986 2987 2988 2989 2990 2991
	/*
	 * Do not fix the mmio spte with invalid generation number which
	 * need to be updated by slow page fault path.
	 */
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return false;

2992 2993 2994 2995 2996
	/* See if the page fault is due to an NX violation */
	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
		return false;

2997
	/*
2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008
	 * #PF can be fast if:
	 * 1. The shadow page table entry is not present, which could mean that
	 *    the fault is potentially caused by access tracking (if enabled).
	 * 2. The shadow page table entry is present and the fault
	 *    is caused by write-protect, that means we just need change the W
	 *    bit of the spte which can be done out of mmu-lock.
	 *
	 * However, if access tracking is disabled we know that a non-present
	 * page must be a genuine page fault where we have to create a new SPTE.
	 * So, if access tracking is disabled, we return true only for write
	 * accesses to a present page.
3009 3010
	 */

3011 3012 3013
	return shadow_acc_track_mask != 0 ||
	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3014 3015
}

3016 3017 3018 3019
/*
 * Returns true if the SPTE was fixed successfully. Otherwise,
 * someone else modified the SPTE from its original value.
 */
3020
static bool
3021
fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3022 3023
			u64 *sptep, u64 old_spte,
			bool remove_write_prot, bool remove_acc_track)
3024 3025
{
	gfn_t gfn;
3026
	u64 new_spte = old_spte;
3027 3028 3029

	WARN_ON(!sp->role.direct);

3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041
	if (remove_acc_track) {
		u64 saved_bits = (old_spte >> shadow_acc_track_saved_bits_shift)
				 & shadow_acc_track_saved_bits_mask;

		new_spte &= ~shadow_acc_track_mask;
		new_spte &= ~(shadow_acc_track_saved_bits_mask <<
			      shadow_acc_track_saved_bits_shift);
		new_spte |= saved_bits;
	}

	if (remove_write_prot)
		new_spte |= PT_WRITABLE_MASK;
3042

3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
	/*
	 * Theoretically we could also set dirty bit (and flush TLB) here in
	 * order to eliminate unnecessary PML logging. See comments in
	 * set_spte. But fast_page_fault is very unlikely to happen with PML
	 * enabled, so we do not do this. This might result in the same GPA
	 * to be logged in PML buffer again when the write really happens, and
	 * eventually to be called by mark_page_dirty twice. But it's also no
	 * harm. This also avoids the TLB flush needed after setting dirty bit
	 * so non-PML cases won't be impacted.
	 *
	 * Compare with set_spte where instead shadow_dirty_mask is set.
	 */
3055
	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3056 3057
		return false;

3058 3059 3060 3061 3062 3063 3064 3065
	if (remove_write_prot) {
		/*
		 * The gfn of direct spte is stable since it is
		 * calculated by sp->gfn.
		 */
		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
	}
3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078

	return true;
}

/*
 * Return value:
 * - true: let the vcpu to access on the same address again.
 * - false: let the real page fault path to fix it.
 */
static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
			    u32 error_code)
{
	struct kvm_shadow_walk_iterator iterator;
3079
	struct kvm_mmu_page *sp;
3080
	bool fault_handled = false;
3081
	u64 spte = 0ull;
3082
	uint retry_count = 0;
3083

3084 3085 3086
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return false;

3087
	if (!page_fault_can_be_fast(error_code))
3088 3089 3090 3091 3092 3093 3094
		return false;

	walk_shadow_page_lockless_begin(vcpu);
	for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
		if (!is_shadow_present_pte(spte) || iterator.level < level)
			break;

3095
	do {
3096 3097
		bool remove_write_prot = false;
		bool remove_acc_track;
3098

3099 3100 3101
		sp = page_header(__pa(iterator.sptep));
		if (!is_last_spte(spte, sp->role.level))
			break;
3102

3103
		/*
3104 3105 3106 3107 3108
		 * Check whether the memory access that caused the fault would
		 * still cause it if it were to be performed right now. If not,
		 * then this is a spurious fault caused by TLB lazily flushed,
		 * or some other CPU has already fixed the PTE after the
		 * current CPU took the fault.
3109 3110 3111 3112
		 *
		 * Need not check the access of upper level table entries since
		 * they are always ACC_ALL.
		 */
3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138

		if (error_code & PFERR_FETCH_MASK) {
			if ((spte & (shadow_x_mask | shadow_nx_mask))
			    == shadow_x_mask) {
				fault_handled = true;
				break;
			}
		} else if (error_code & PFERR_WRITE_MASK) {
			if (is_writable_pte(spte)) {
				fault_handled = true;
				break;
			}

			/*
			 * Currently, to simplify the code, write-protection can
			 * be removed in the fast path only if the SPTE was
			 * write-protected for dirty-logging.
			 */
			remove_write_prot =
				spte_can_locklessly_be_made_writable(spte);
		} else {
			/* Fault was on Read access */
			if (spte & PT_PRESENT_MASK) {
				fault_handled = true;
				break;
			}
3139
		}
3140

3141 3142 3143 3144
		remove_acc_track = is_access_track_spte(spte);

		/* Verify that the fault can be handled in the fast path */
		if (!remove_acc_track && !remove_write_prot)
3145
			break;
3146

3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157
		/*
		 * Do not fix write-permission on the large spte since we only
		 * dirty the first page into the dirty-bitmap in
		 * fast_pf_fix_direct_spte() that means other pages are missed
		 * if its slot is dirty-logged.
		 *
		 * Instead, we let the slow page fault path create a normal spte
		 * to fix the access.
		 *
		 * See the comments in kvm_arch_commit_memory_region().
		 */
3158
		if (sp->role.level > PT_PAGE_TABLE_LEVEL && remove_write_prot)
3159 3160 3161 3162 3163 3164 3165 3166
			break;

		/*
		 * Currently, fast page fault only works for direct mapping
		 * since the gfn is not stable for indirect shadow page. See
		 * Documentation/virtual/kvm/locking.txt to get more detail.
		 */
		fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
3167 3168 3169
							iterator.sptep, spte,
							remove_write_prot,
							remove_acc_track);
3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181
		if (fault_handled)
			break;

		if (++retry_count > 4) {
			printk_once(KERN_WARNING
				"kvm: Fast #PF retrying more than 4 times.\n");
			break;
		}

		spte = mmu_spte_get_lockless(iterator.sptep);

	} while (true);
3182

X
Xiao Guangrong 已提交
3183
	trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
3184
			      spte, fault_handled);
3185 3186
	walk_shadow_page_lockless_end(vcpu);

3187
	return fault_handled;
3188 3189
}

3190
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
D
Dan Williams 已提交
3191
			 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
3192
static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
3193

3194 3195
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
			 gfn_t gfn, bool prefault)
3196 3197
{
	int r;
3198
	int level;
3199
	bool force_pt_level = false;
D
Dan Williams 已提交
3200
	kvm_pfn_t pfn;
3201
	unsigned long mmu_seq;
3202
	bool map_writable, write = error_code & PFERR_WRITE_MASK;
3203

3204
	level = mapping_level(vcpu, gfn, &force_pt_level);
3205 3206 3207 3208 3209 3210 3211 3212
	if (likely(!force_pt_level)) {
		/*
		 * This path builds a PAE pagetable - so we can map
		 * 2mb pages at maximum. Therefore check if the level
		 * is larger than that.
		 */
		if (level > PT_DIRECTORY_LEVEL)
			level = PT_DIRECTORY_LEVEL;
3213

3214
		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
3215
	}
M
Marcelo Tosatti 已提交
3216

3217 3218 3219
	if (fast_page_fault(vcpu, v, level, error_code))
		return 0;

3220
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
3221
	smp_rmb();
3222

3223
	if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
3224
		return 0;
3225

3226 3227
	if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
		return r;
3228

3229
	spin_lock(&vcpu->kvm->mmu_lock);
3230
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3231
		goto out_unlock;
3232
	make_mmu_pages_available(vcpu);
3233 3234
	if (likely(!force_pt_level))
		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
3235
	r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
3236 3237
	spin_unlock(&vcpu->kvm->mmu_lock);

3238
	return r;
3239 3240 3241 3242 3243

out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
	return 0;
3244 3245 3246
}


3247 3248 3249
static void mmu_free_roots(struct kvm_vcpu *vcpu)
{
	int i;
3250
	struct kvm_mmu_page *sp;
3251
	LIST_HEAD(invalid_list);
3252

3253
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
A
Avi Kivity 已提交
3254
		return;
3255

3256 3257 3258
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
	    (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
	     vcpu->arch.mmu.direct_map)) {
3259
		hpa_t root = vcpu->arch.mmu.root_hpa;
3260

3261
		spin_lock(&vcpu->kvm->mmu_lock);
3262 3263
		sp = page_header(root);
		--sp->root_count;
3264 3265 3266 3267
		if (!sp->root_count && sp->role.invalid) {
			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
			kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
		}
3268
		spin_unlock(&vcpu->kvm->mmu_lock);
3269
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3270 3271
		return;
	}
3272 3273

	spin_lock(&vcpu->kvm->mmu_lock);
3274
	for (i = 0; i < 4; ++i) {
3275
		hpa_t root = vcpu->arch.mmu.pae_root[i];
3276

A
Avi Kivity 已提交
3277 3278
		if (root) {
			root &= PT64_BASE_ADDR_MASK;
3279 3280
			sp = page_header(root);
			--sp->root_count;
3281
			if (!sp->root_count && sp->role.invalid)
3282 3283
				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
							 &invalid_list);
A
Avi Kivity 已提交
3284
		}
3285
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3286
	}
3287
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3288
	spin_unlock(&vcpu->kvm->mmu_lock);
3289
	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3290 3291
}

3292 3293 3294 3295 3296
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
	int ret = 0;

	if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
3297
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3298 3299 3300 3301 3302 3303
		ret = 1;
	}

	return ret;
}

3304 3305 3306
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_page *sp;
3307
	unsigned i;
3308 3309 3310

	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		spin_lock(&vcpu->kvm->mmu_lock);
3311
		make_mmu_pages_available(vcpu);
3312
		sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL);
3313 3314 3315 3316 3317 3318 3319
		++sp->root_count;
		spin_unlock(&vcpu->kvm->mmu_lock);
		vcpu->arch.mmu.root_hpa = __pa(sp->spt);
	} else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
		for (i = 0; i < 4; ++i) {
			hpa_t root = vcpu->arch.mmu.pae_root[i];

3320
			MMU_WARN_ON(VALID_PAGE(root));
3321
			spin_lock(&vcpu->kvm->mmu_lock);
3322
			make_mmu_pages_available(vcpu);
3323
			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
3324
					i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
3325 3326 3327 3328 3329
			root = __pa(sp->spt);
			++sp->root_count;
			spin_unlock(&vcpu->kvm->mmu_lock);
			vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
		}
3330
		vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
3331 3332 3333 3334 3335 3336 3337
	} else
		BUG();

	return 0;
}

static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3338
{
3339
	struct kvm_mmu_page *sp;
3340 3341 3342
	u64 pdptr, pm_mask;
	gfn_t root_gfn;
	int i;
3343

3344
	root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
3345

3346 3347 3348 3349 3350 3351 3352 3353
	if (mmu_check_root(vcpu, root_gfn))
		return 1;

	/*
	 * Do we shadow a long mode page table? If so we need to
	 * write-protect the guests page table root.
	 */
	if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
3354
		hpa_t root = vcpu->arch.mmu.root_hpa;
3355

3356
		MMU_WARN_ON(VALID_PAGE(root));
3357

3358
		spin_lock(&vcpu->kvm->mmu_lock);
3359
		make_mmu_pages_available(vcpu);
3360
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
3361
				      0, ACC_ALL);
3362 3363
		root = __pa(sp->spt);
		++sp->root_count;
3364
		spin_unlock(&vcpu->kvm->mmu_lock);
3365
		vcpu->arch.mmu.root_hpa = root;
3366
		return 0;
3367
	}
3368

3369 3370
	/*
	 * We shadow a 32 bit page table. This may be a legacy 2-level
3371 3372
	 * or a PAE 3-level page table. In either case we need to be aware that
	 * the shadow page table may be a PAE or a long mode page table.
3373
	 */
3374 3375 3376 3377
	pm_mask = PT_PRESENT_MASK;
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;

3378
	for (i = 0; i < 4; ++i) {
3379
		hpa_t root = vcpu->arch.mmu.pae_root[i];
3380

3381
		MMU_WARN_ON(VALID_PAGE(root));
3382
		if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
3383
			pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
B
Bandan Das 已提交
3384
			if (!(pdptr & PT_PRESENT_MASK)) {
3385
				vcpu->arch.mmu.pae_root[i] = 0;
A
Avi Kivity 已提交
3386 3387
				continue;
			}
A
Avi Kivity 已提交
3388
			root_gfn = pdptr >> PAGE_SHIFT;
3389 3390
			if (mmu_check_root(vcpu, root_gfn))
				return 1;
3391
		}
3392
		spin_lock(&vcpu->kvm->mmu_lock);
3393
		make_mmu_pages_available(vcpu);
3394 3395
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
				      0, ACC_ALL);
3396 3397
		root = __pa(sp->spt);
		++sp->root_count;
3398 3399
		spin_unlock(&vcpu->kvm->mmu_lock);

3400
		vcpu->arch.mmu.pae_root[i] = root | pm_mask;
3401
	}
3402
	vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428

	/*
	 * If we shadow a 32 bit page table with a long mode page
	 * table we enter this path.
	 */
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		if (vcpu->arch.mmu.lm_root == NULL) {
			/*
			 * The additional page necessary for this is only
			 * allocated on demand.
			 */

			u64 *lm_root;

			lm_root = (void*)get_zeroed_page(GFP_KERNEL);
			if (lm_root == NULL)
				return 1;

			lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;

			vcpu->arch.mmu.lm_root = lm_root;
		}

		vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
	}

3429
	return 0;
3430 3431
}

3432 3433 3434 3435 3436 3437 3438 3439
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
	if (vcpu->arch.mmu.direct_map)
		return mmu_alloc_direct_roots(vcpu);
	else
		return mmu_alloc_shadow_roots(vcpu);
}

3440 3441 3442 3443 3444
static void mmu_sync_roots(struct kvm_vcpu *vcpu)
{
	int i;
	struct kvm_mmu_page *sp;

3445 3446 3447
	if (vcpu->arch.mmu.direct_map)
		return;

3448 3449
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return;
3450

3451
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3452
	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3453
	if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
3454 3455 3456
		hpa_t root = vcpu->arch.mmu.root_hpa;
		sp = page_header(root);
		mmu_sync_children(vcpu, sp);
3457
		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3458 3459 3460 3461 3462
		return;
	}
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->arch.mmu.pae_root[i];

3463
		if (root && VALID_PAGE(root)) {
3464 3465 3466 3467 3468
			root &= PT64_BASE_ADDR_MASK;
			sp = page_header(root);
			mmu_sync_children(vcpu, sp);
		}
	}
3469
	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3470 3471 3472 3473 3474 3475
}

void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
{
	spin_lock(&vcpu->kvm->mmu_lock);
	mmu_sync_roots(vcpu);
3476
	spin_unlock(&vcpu->kvm->mmu_lock);
3477
}
N
Nadav Har'El 已提交
3478
EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
3479

3480
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
3481
				  u32 access, struct x86_exception *exception)
A
Avi Kivity 已提交
3482
{
3483 3484
	if (exception)
		exception->error_code = 0;
A
Avi Kivity 已提交
3485 3486 3487
	return vaddr;
}

3488
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
3489 3490
					 u32 access,
					 struct x86_exception *exception)
3491
{
3492 3493
	if (exception)
		exception->error_code = 0;
3494
	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3495 3496
}

3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515
static bool
__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
{
	int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f;

	return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) |
		((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0);
}

static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
{
	return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level);
}

static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
{
	return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
}

3516
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3517 3518 3519 3520 3521 3522 3523
{
	if (direct)
		return vcpu_match_mmio_gpa(vcpu, addr);

	return vcpu_match_mmio_gva(vcpu, addr);
}

3524 3525 3526
/* return true if reserved bit is detected on spte. */
static bool
walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3527 3528
{
	struct kvm_shadow_walk_iterator iterator;
3529 3530 3531
	u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
	int root, leaf;
	bool reserved = false;
3532

3533
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3534
		goto exit;
3535

3536
	walk_shadow_page_lockless_begin(vcpu);
3537

3538 3539
	for (shadow_walk_init(&iterator, vcpu, addr),
		 leaf = root = iterator.level;
3540 3541 3542 3543 3544
	     shadow_walk_okay(&iterator);
	     __shadow_walk_next(&iterator, spte)) {
		spte = mmu_spte_get_lockless(iterator.sptep);

		sptes[leaf - 1] = spte;
3545
		leaf--;
3546

3547 3548
		if (!is_shadow_present_pte(spte))
			break;
3549 3550

		reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
3551
						    iterator.level);
3552 3553
	}

3554 3555
	walk_shadow_page_lockless_end(vcpu);

3556 3557 3558
	if (reserved) {
		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
		       __func__, addr);
3559
		while (root > leaf) {
3560 3561 3562 3563 3564 3565 3566 3567
			pr_err("------ spte 0x%llx level %d.\n",
			       sptes[root - 1], root);
			root--;
		}
	}
exit:
	*sptep = spte;
	return reserved;
3568 3569
}

3570
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3571 3572
{
	u64 spte;
3573
	bool reserved;
3574

3575
	if (mmio_info_in_cache(vcpu, addr, direct))
3576
		return RET_MMIO_PF_EMULATE;
3577

3578
	reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
3579
	if (WARN_ON(reserved))
3580
		return RET_MMIO_PF_BUG;
3581 3582 3583 3584 3585

	if (is_mmio_spte(spte)) {
		gfn_t gfn = get_mmio_spte_gfn(spte);
		unsigned access = get_mmio_spte_access(spte);

3586
		if (!check_mmio_spte(vcpu, spte))
3587 3588
			return RET_MMIO_PF_INVALID;

3589 3590
		if (direct)
			addr = 0;
X
Xiao Guangrong 已提交
3591 3592

		trace_handle_mmio_page_fault(addr, gfn, access);
3593
		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3594
		return RET_MMIO_PF_EMULATE;
3595 3596 3597 3598 3599 3600
	}

	/*
	 * If the page table is zapped by other cpus, let CPU fault again on
	 * the address.
	 */
3601
	return RET_MMIO_PF_RETRY;
3602
}
3603
EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
3604

3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
					 u32 error_code, gfn_t gfn)
{
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return false;

	if (!(error_code & PFERR_PRESENT_MASK) ||
	      !(error_code & PFERR_WRITE_MASK))
		return false;

	/*
	 * guest is writing the page which is write tracked which can
	 * not be fixed by page fault handler.
	 */
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
		return true;

	return false;
}

3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 spte;

	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return;

	walk_shadow_page_lockless_begin(vcpu);
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
		clear_sp_write_flooding_count(iterator.sptep);
		if (!is_shadow_present_pte(spte))
			break;
	}
	walk_shadow_page_lockless_end(vcpu);
}

A
Avi Kivity 已提交
3642
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3643
				u32 error_code, bool prefault)
A
Avi Kivity 已提交
3644
{
3645
	gfn_t gfn = gva >> PAGE_SHIFT;
3646
	int r;
A
Avi Kivity 已提交
3647

3648
	pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
3649

3650 3651
	if (page_fault_handle_page_track(vcpu, error_code, gfn))
		return 1;
3652

3653 3654 3655
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
3656

3657
	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
3658 3659


3660
	return nonpaging_map(vcpu, gva & PAGE_MASK,
3661
			     error_code, gfn, prefault);
A
Avi Kivity 已提交
3662 3663
}

3664
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3665 3666
{
	struct kvm_arch_async_pf arch;
X
Xiao Guangrong 已提交
3667

3668
	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3669
	arch.gfn = gfn;
3670
	arch.direct_map = vcpu->arch.mmu.direct_map;
X
Xiao Guangrong 已提交
3671
	arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
3672

3673
	return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3674 3675 3676 3677
}

static bool can_do_async_pf(struct kvm_vcpu *vcpu)
{
3678
	if (unlikely(!lapic_in_kernel(vcpu) ||
3679 3680 3681 3682 3683 3684
		     kvm_event_needs_reinjection(vcpu)))
		return false;

	return kvm_x86_ops->interrupt_allowed(vcpu);
}

3685
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
D
Dan Williams 已提交
3686
			 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable)
3687
{
3688
	struct kvm_memory_slot *slot;
3689 3690
	bool async;

3691
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3692 3693
	async = false;
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
3694 3695 3696
	if (!async)
		return false; /* *pfn has correct page already */

3697
	if (!prefault && can_do_async_pf(vcpu)) {
3698
		trace_kvm_try_async_get_page(gva, gfn);
3699 3700 3701 3702 3703 3704 3705 3706
		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
			trace_kvm_async_pf_doublefault(gva, gfn);
			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
			return true;
		} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
			return true;
	}

3707
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
3708 3709 3710
	return false;
}

3711 3712 3713 3714 3715 3716 3717 3718 3719 3720
static bool
check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
{
	int page_num = KVM_PAGES_PER_HPAGE(level);

	gfn &= ~(page_num - 1);

	return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
}

G
Gleb Natapov 已提交
3721
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
3722
			  bool prefault)
3723
{
D
Dan Williams 已提交
3724
	kvm_pfn_t pfn;
3725
	int r;
3726
	int level;
3727
	bool force_pt_level;
M
Marcelo Tosatti 已提交
3728
	gfn_t gfn = gpa >> PAGE_SHIFT;
3729
	unsigned long mmu_seq;
3730 3731
	int write = error_code & PFERR_WRITE_MASK;
	bool map_writable;
3732

3733
	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3734

3735 3736
	if (page_fault_handle_page_track(vcpu, error_code, gfn))
		return 1;
3737

3738 3739 3740 3741
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

3742 3743 3744
	force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
							   PT_DIRECTORY_LEVEL);
	level = mapping_level(vcpu, gfn, &force_pt_level);
3745
	if (likely(!force_pt_level)) {
3746 3747 3748
		if (level > PT_DIRECTORY_LEVEL &&
		    !check_hugepage_cache_consistency(vcpu, gfn, level))
			level = PT_DIRECTORY_LEVEL;
3749
		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
3750
	}
3751

3752 3753 3754
	if (fast_page_fault(vcpu, gpa, level, error_code))
		return 0;

3755
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
3756
	smp_rmb();
3757

3758
	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
3759 3760
		return 0;

3761 3762 3763
	if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
		return r;

3764
	spin_lock(&vcpu->kvm->mmu_lock);
3765
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3766
		goto out_unlock;
3767
	make_mmu_pages_available(vcpu);
3768 3769
	if (likely(!force_pt_level))
		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
3770
	r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
3771 3772 3773
	spin_unlock(&vcpu->kvm->mmu_lock);

	return r;
3774 3775 3776 3777 3778

out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
	return 0;
3779 3780
}

3781 3782
static void nonpaging_init_context(struct kvm_vcpu *vcpu,
				   struct kvm_mmu *context)
A
Avi Kivity 已提交
3783 3784 3785
{
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
3786
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
3787
	context->invlpg = nonpaging_invlpg;
3788
	context->update_pte = nonpaging_update_pte;
3789
	context->root_level = 0;
A
Avi Kivity 已提交
3790
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
3791
	context->root_hpa = INVALID_PAGE;
3792
	context->direct_map = true;
3793
	context->nx = false;
A
Avi Kivity 已提交
3794 3795
}

3796
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
3797
{
3798
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
3799 3800
}

3801 3802
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
3803
	return kvm_read_cr3(vcpu);
3804 3805
}

3806 3807
static void inject_page_fault(struct kvm_vcpu *vcpu,
			      struct x86_exception *fault)
A
Avi Kivity 已提交
3808
{
3809
	vcpu->arch.mmu.inject_page_fault(vcpu, fault);
A
Avi Kivity 已提交
3810 3811
}

3812
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
3813
			   unsigned access, int *nr_present)
3814 3815 3816 3817 3818 3819 3820 3821
{
	if (unlikely(is_mmio_spte(*sptep))) {
		if (gfn != get_mmio_spte_gfn(*sptep)) {
			mmu_spte_clear_no_track(sptep);
			return true;
		}

		(*nr_present)++;
3822
		mark_mmio_spte(vcpu, sptep, gfn, access);
3823 3824 3825 3826 3827 3828
		return true;
	}

	return false;
}

3829 3830
static inline bool is_last_gpte(struct kvm_mmu *mmu,
				unsigned level, unsigned gpte)
A
Avi Kivity 已提交
3831
{
3832 3833 3834 3835 3836 3837
	/*
	 * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
	 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
	 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
	 */
	gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
A
Avi Kivity 已提交
3838

3839 3840 3841 3842 3843 3844 3845 3846
	/*
	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
	 * If it is clear, there are no large pages at this level, so clear
	 * PT_PAGE_SIZE_MASK in gpte if that is the case.
	 */
	gpte &= level - mmu->last_nonleaf_level;

	return gpte & PT_PAGE_SIZE_MASK;
A
Avi Kivity 已提交
3847 3848
}

3849 3850 3851 3852 3853
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
#undef PTTYPE

A
Avi Kivity 已提交
3854 3855 3856 3857 3858 3859 3860 3861
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

3862 3863 3864 3865
static void
__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
			struct rsvd_bits_validate *rsvd_check,
			int maxphyaddr, int level, bool nx, bool gbpages,
3866
			bool pse, bool amd)
3867 3868
{
	u64 exb_bit_rsvd = 0;
3869
	u64 gbpages_bit_rsvd = 0;
3870
	u64 nonleaf_bit8_rsvd = 0;
3871

3872
	rsvd_check->bad_mt_xwr = 0;
3873

3874
	if (!nx)
3875
		exb_bit_rsvd = rsvd_bits(63, 63);
3876
	if (!gbpages)
3877
		gbpages_bit_rsvd = rsvd_bits(7, 7);
3878 3879 3880 3881 3882

	/*
	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
	 * leaf entries) on AMD CPUs only.
	 */
3883
	if (amd)
3884 3885
		nonleaf_bit8_rsvd = rsvd_bits(8, 8);

3886
	switch (level) {
3887 3888
	case PT32_ROOT_LEVEL:
		/* no rsvd bits for 2 level 4K page table entries */
3889 3890 3891 3892
		rsvd_check->rsvd_bits_mask[0][1] = 0;
		rsvd_check->rsvd_bits_mask[0][0] = 0;
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
3893

3894
		if (!pse) {
3895
			rsvd_check->rsvd_bits_mask[1][1] = 0;
3896 3897 3898
			break;
		}

3899 3900
		if (is_cpuid_PSE36())
			/* 36bits PSE 4MB page */
3901
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
3902 3903
		else
			/* 32 bits PSE 4MB page */
3904
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
3905 3906
		break;
	case PT32E_ROOT_LEVEL:
3907
		rsvd_check->rsvd_bits_mask[0][2] =
3908
			rsvd_bits(maxphyaddr, 63) |
3909
			rsvd_bits(5, 8) | rsvd_bits(1, 2);	/* PDPTE */
3910
		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
3911
			rsvd_bits(maxphyaddr, 62);	/* PDE */
3912
		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
3913
			rsvd_bits(maxphyaddr, 62); 	/* PTE */
3914
		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
3915 3916
			rsvd_bits(maxphyaddr, 62) |
			rsvd_bits(13, 20);		/* large page */
3917 3918
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
3919 3920
		break;
	case PT64_ROOT_LEVEL:
3921 3922
		rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
3923
			rsvd_bits(maxphyaddr, 51);
3924 3925
		rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | gbpages_bit_rsvd |
3926
			rsvd_bits(maxphyaddr, 51);
3927 3928 3929 3930 3931 3932 3933
		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[1][3] =
			rsvd_check->rsvd_bits_mask[0][3];
		rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
3934
			gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
3935
			rsvd_bits(13, 29);
3936
		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
3937 3938
			rsvd_bits(maxphyaddr, 51) |
			rsvd_bits(13, 20);		/* large page */
3939 3940
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
3941 3942 3943 3944
		break;
	}
}

3945 3946 3947 3948 3949 3950
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
{
	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
				cpuid_maxphyaddr(vcpu), context->root_level,
				context->nx, guest_cpuid_has_gbpages(vcpu),
3951
				is_pse(vcpu), guest_cpuid_is_amd(vcpu));
3952 3953
}

3954 3955 3956
static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
			    int maxphyaddr, bool execonly)
3957
{
3958
	u64 bad_mt_xwr;
3959

3960
	rsvd_check->rsvd_bits_mask[0][3] =
3961
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
3962
	rsvd_check->rsvd_bits_mask[0][2] =
3963
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
3964
	rsvd_check->rsvd_bits_mask[0][1] =
3965
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
3966
	rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
3967 3968

	/* large page */
3969 3970
	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
	rsvd_check->rsvd_bits_mask[1][2] =
3971
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
3972
	rsvd_check->rsvd_bits_mask[1][1] =
3973
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
3974
	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
3975

3976 3977 3978 3979 3980 3981 3982 3983
	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
	if (!execonly) {
		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
3984
	}
3985
	rsvd_check->bad_mt_xwr = bad_mt_xwr;
3986 3987
}

3988 3989 3990 3991 3992 3993 3994
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
		struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
				    cpuid_maxphyaddr(vcpu), execonly);
}

3995 3996 3997 3998 3999 4000 4001 4002
/*
 * the page table on host is the shadow page table for the page
 * table in guest or amd nested guest, its mmu features completely
 * follow the features in guest.
 */
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
4003 4004
	bool uses_nx = context->nx || context->base_role.smep_andnot_wp;

4005 4006 4007 4008
	/*
	 * Passing "true" to the last argument is okay; it adds a check
	 * on bit 8 of the SPTEs which KVM doesn't use anyway.
	 */
4009 4010
	__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
				boot_cpu_data.x86_phys_bits,
4011
				context->shadow_root_level, uses_nx,
4012 4013
				guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
				true);
4014 4015 4016
}
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);

4017 4018 4019 4020 4021 4022
static inline bool boot_cpu_is_amd(void)
{
	WARN_ON_ONCE(!tdp_enabled);
	return shadow_x_mask == 0;
}

4023 4024 4025 4026 4027 4028 4029 4030
/*
 * the direct page table on host, use as much mmu features as
 * possible, however, kvm currently does not do execution-protection.
 */
static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context)
{
4031
	if (boot_cpu_is_amd())
4032 4033 4034
		__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
					boot_cpu_data.x86_phys_bits,
					context->shadow_root_level, false,
4035 4036
					boot_cpu_has(X86_FEATURE_GBPAGES),
					true, true);
4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055
	else
		__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
					    boot_cpu_data.x86_phys_bits,
					    false);

}

/*
 * as the comments in reset_shadow_zero_bits_mask() except it
 * is the shadow page table for intel nested guest.
 */
static void
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
				    boot_cpu_data.x86_phys_bits, execonly);
}

4056 4057
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
				      struct kvm_mmu *mmu, bool ept)
4058 4059 4060
{
	unsigned bit, byte, pfec;
	u8 map;
F
Feng Wu 已提交
4061
	bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0;
4062

F
Feng Wu 已提交
4063
	cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
F
Feng Wu 已提交
4064
	cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4065 4066 4067 4068 4069 4070
	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
		pfec = byte << 1;
		map = 0;
		wf = pfec & PFERR_WRITE_MASK;
		uf = pfec & PFERR_USER_MASK;
		ff = pfec & PFERR_FETCH_MASK;
F
Feng Wu 已提交
4071 4072 4073 4074 4075 4076
		/*
		 * PFERR_RSVD_MASK bit is set in PFEC if the access is not
		 * subject to SMAP restrictions, and cleared otherwise. The
		 * bit is only meaningful if the SMAP bit is set in CR4.
		 */
		smapf = !(pfec & PFERR_RSVD_MASK);
4077 4078 4079 4080 4081
		for (bit = 0; bit < 8; ++bit) {
			x = bit & ACC_EXEC_MASK;
			w = bit & ACC_WRITE_MASK;
			u = bit & ACC_USER_MASK;

4082 4083 4084 4085 4086 4087
			if (!ept) {
				/* Not really needed: !nx will cause pte.nx to fault */
				x |= !mmu->nx;
				/* Allow supervisor writes if !cr0.wp */
				w |= !is_write_protection(vcpu) && !uf;
				/* Disallow supervisor fetches of user code if cr4.smep */
F
Feng Wu 已提交
4088
				x &= !(cr4_smep && u && !uf);
F
Feng Wu 已提交
4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108

				/*
				 * SMAP:kernel-mode data accesses from user-mode
				 * mappings should fault. A fault is considered
				 * as a SMAP violation if all of the following
				 * conditions are ture:
				 *   - X86_CR4_SMAP is set in CR4
				 *   - An user page is accessed
				 *   - Page fault in kernel mode
				 *   - if CPL = 3 or X86_EFLAGS_AC is clear
				 *
				 *   Here, we cover the first three conditions.
				 *   The fourth is computed dynamically in
				 *   permission_fault() and is in smapf.
				 *
				 *   Also, SMAP does not affect instruction
				 *   fetches, add the !ff check here to make it
				 *   clearer.
				 */
				smap = cr4_smap && u && !uf && !ff;
4109
			}
4110

F
Feng Wu 已提交
4111 4112
			fault = (ff && !x) || (uf && !u) || (wf && !w) ||
				(smapf && smap);
4113 4114 4115 4116 4117 4118
			map |= fault << bit;
		}
		mmu->permissions[byte] = map;
	}
}

4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193
/*
* PKU is an additional mechanism by which the paging controls access to
* user-mode addresses based on the value in the PKRU register.  Protection
* key violations are reported through a bit in the page fault error code.
* Unlike other bits of the error code, the PK bit is not known at the
* call site of e.g. gva_to_gpa; it must be computed directly in
* permission_fault based on two bits of PKRU, on some machine state (CR4,
* CR0, EFER, CPL), and on other bits of the error code and the page tables.
*
* In particular the following conditions come from the error code, the
* page tables and the machine state:
* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
* - PK is always zero if U=0 in the page tables
* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
*
* The PKRU bitmask caches the result of these four conditions.  The error
* code (minus the P bit) and the page table's U bit form an index into the
* PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
* with the two bits of the PKRU register corresponding to the protection key.
* For the first three conditions above the bits will be 00, thus masking
* away both AD and WD.  For all reads or if the last condition holds, WD
* only will be masked away.
*/
static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
				bool ept)
{
	unsigned bit;
	bool wp;

	if (ept) {
		mmu->pkru_mask = 0;
		return;
	}

	/* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
	if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
		mmu->pkru_mask = 0;
		return;
	}

	wp = is_write_protection(vcpu);

	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
		unsigned pfec, pkey_bits;
		bool check_pkey, check_write, ff, uf, wf, pte_user;

		pfec = bit << 1;
		ff = pfec & PFERR_FETCH_MASK;
		uf = pfec & PFERR_USER_MASK;
		wf = pfec & PFERR_WRITE_MASK;

		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
		pte_user = pfec & PFERR_RSVD_MASK;

		/*
		 * Only need to check the access which is not an
		 * instruction fetch and is to a user page.
		 */
		check_pkey = (!ff && pte_user);
		/*
		 * write access is controlled by PKRU if it is a
		 * user access or CR0.WP = 1.
		 */
		check_write = check_pkey && wf && (uf || wp);

		/* PKRU.AD stops both read and write access. */
		pkey_bits = !!check_pkey;
		/* PKRU.WD stops write access. */
		pkey_bits |= (!!check_write) << 1;

		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
	}
}

4194
static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
A
Avi Kivity 已提交
4195
{
4196 4197 4198 4199 4200
	unsigned root_level = mmu->root_level;

	mmu->last_nonleaf_level = root_level;
	if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
		mmu->last_nonleaf_level++;
A
Avi Kivity 已提交
4201 4202
}

4203 4204 4205
static void paging64_init_context_common(struct kvm_vcpu *vcpu,
					 struct kvm_mmu *context,
					 int level)
A
Avi Kivity 已提交
4206
{
4207
	context->nx = is_nx(vcpu);
4208
	context->root_level = level;
4209

4210
	reset_rsvds_bits_mask(vcpu, context);
4211
	update_permission_bitmask(vcpu, context, false);
4212
	update_pkru_bitmask(vcpu, context, false);
4213
	update_last_nonleaf_level(vcpu, context);
A
Avi Kivity 已提交
4214

4215
	MMU_WARN_ON(!is_pae(vcpu));
A
Avi Kivity 已提交
4216 4217
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
4218
	context->sync_page = paging64_sync_page;
M
Marcelo Tosatti 已提交
4219
	context->invlpg = paging64_invlpg;
4220
	context->update_pte = paging64_update_pte;
4221
	context->shadow_root_level = level;
A
Avi Kivity 已提交
4222
	context->root_hpa = INVALID_PAGE;
4223
	context->direct_map = false;
A
Avi Kivity 已提交
4224 4225
}

4226 4227
static void paging64_init_context(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
4228
{
4229
	paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
4230 4231
}

4232 4233
static void paging32_init_context(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
A
Avi Kivity 已提交
4234
{
4235
	context->nx = false;
4236
	context->root_level = PT32_ROOT_LEVEL;
4237

4238
	reset_rsvds_bits_mask(vcpu, context);
4239
	update_permission_bitmask(vcpu, context, false);
4240
	update_pkru_bitmask(vcpu, context, false);
4241
	update_last_nonleaf_level(vcpu, context);
A
Avi Kivity 已提交
4242 4243 4244

	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
4245
	context->sync_page = paging32_sync_page;
M
Marcelo Tosatti 已提交
4246
	context->invlpg = paging32_invlpg;
4247
	context->update_pte = paging32_update_pte;
A
Avi Kivity 已提交
4248
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
4249
	context->root_hpa = INVALID_PAGE;
4250
	context->direct_map = false;
A
Avi Kivity 已提交
4251 4252
}

4253 4254
static void paging32E_init_context(struct kvm_vcpu *vcpu,
				   struct kvm_mmu *context)
A
Avi Kivity 已提交
4255
{
4256
	paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
4257 4258
}

4259
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4260
{
4261
	struct kvm_mmu *context = &vcpu->arch.mmu;
4262

4263
	context->base_role.word = 0;
4264
	context->base_role.smm = is_smm(vcpu);
4265
	context->page_fault = tdp_page_fault;
4266
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
4267
	context->invlpg = nonpaging_invlpg;
4268
	context->update_pte = nonpaging_update_pte;
4269
	context->shadow_root_level = kvm_x86_ops->get_tdp_level();
4270
	context->root_hpa = INVALID_PAGE;
4271
	context->direct_map = true;
4272
	context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
4273
	context->get_cr3 = get_cr3;
4274
	context->get_pdptr = kvm_pdptr_read;
4275
	context->inject_page_fault = kvm_inject_page_fault;
4276 4277

	if (!is_paging(vcpu)) {
4278
		context->nx = false;
4279 4280 4281
		context->gva_to_gpa = nonpaging_gva_to_gpa;
		context->root_level = 0;
	} else if (is_long_mode(vcpu)) {
4282
		context->nx = is_nx(vcpu);
4283
		context->root_level = PT64_ROOT_LEVEL;
4284 4285
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging64_gva_to_gpa;
4286
	} else if (is_pae(vcpu)) {
4287
		context->nx = is_nx(vcpu);
4288
		context->root_level = PT32E_ROOT_LEVEL;
4289 4290
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging64_gva_to_gpa;
4291
	} else {
4292
		context->nx = false;
4293
		context->root_level = PT32_ROOT_LEVEL;
4294 4295
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging32_gva_to_gpa;
4296 4297
	}

4298
	update_permission_bitmask(vcpu, context, false);
4299
	update_pkru_bitmask(vcpu, context, false);
4300
	update_last_nonleaf_level(vcpu, context);
4301
	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4302 4303
}

4304
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
4305
{
4306
	bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
4307
	bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4308 4309
	struct kvm_mmu *context = &vcpu->arch.mmu;

4310
	MMU_WARN_ON(VALID_PAGE(context->root_hpa));
A
Avi Kivity 已提交
4311 4312

	if (!is_paging(vcpu))
4313
		nonpaging_init_context(vcpu, context);
A
Avi Kivity 已提交
4314
	else if (is_long_mode(vcpu))
4315
		paging64_init_context(vcpu, context);
A
Avi Kivity 已提交
4316
	else if (is_pae(vcpu))
4317
		paging32E_init_context(vcpu, context);
A
Avi Kivity 已提交
4318
	else
4319
		paging32_init_context(vcpu, context);
4320

4321 4322 4323 4324
	context->base_role.nxe = is_nx(vcpu);
	context->base_role.cr4_pae = !!is_pae(vcpu);
	context->base_role.cr0_wp  = is_write_protection(vcpu);
	context->base_role.smep_andnot_wp
4325
		= smep && !is_write_protection(vcpu);
4326 4327
	context->base_role.smap_andnot_wp
		= smap && !is_write_protection(vcpu);
4328
	context->base_role.smm = is_smm(vcpu);
4329
	reset_shadow_zero_bits_mask(vcpu, context);
4330 4331 4332
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

4333
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
N
Nadav Har'El 已提交
4334
{
4335 4336
	struct kvm_mmu *context = &vcpu->arch.mmu;

4337
	MMU_WARN_ON(VALID_PAGE(context->root_hpa));
N
Nadav Har'El 已提交
4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351

	context->shadow_root_level = kvm_x86_ops->get_tdp_level();

	context->nx = true;
	context->page_fault = ept_page_fault;
	context->gva_to_gpa = ept_gva_to_gpa;
	context->sync_page = ept_sync_page;
	context->invlpg = ept_invlpg;
	context->update_pte = ept_update_pte;
	context->root_level = context->shadow_root_level;
	context->root_hpa = INVALID_PAGE;
	context->direct_map = false;

	update_permission_bitmask(vcpu, context, true);
4352
	update_pkru_bitmask(vcpu, context, true);
N
Nadav Har'El 已提交
4353
	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4354
	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
N
Nadav Har'El 已提交
4355 4356 4357
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

4358
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4359
{
4360 4361 4362 4363 4364 4365 4366
	struct kvm_mmu *context = &vcpu->arch.mmu;

	kvm_init_shadow_mmu(vcpu);
	context->set_cr3           = kvm_x86_ops->set_cr3;
	context->get_cr3           = get_cr3;
	context->get_pdptr         = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
A
Avi Kivity 已提交
4367 4368
}

4369
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4370 4371 4372 4373
{
	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;

	g_context->get_cr3           = get_cr3;
4374
	g_context->get_pdptr         = kvm_pdptr_read;
4375 4376 4377
	g_context->inject_page_fault = kvm_inject_page_fault;

	/*
4378 4379 4380 4381 4382 4383
	 * Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using
	 * L1's nested page tables (e.g. EPT12). The nested translation
	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
	 * L2's page tables as the first level of translation and L1's
	 * nested page tables as the second level of translation. Basically
	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4384 4385
	 */
	if (!is_paging(vcpu)) {
4386
		g_context->nx = false;
4387 4388 4389
		g_context->root_level = 0;
		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
	} else if (is_long_mode(vcpu)) {
4390
		g_context->nx = is_nx(vcpu);
4391
		g_context->root_level = PT64_ROOT_LEVEL;
4392
		reset_rsvds_bits_mask(vcpu, g_context);
4393 4394
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
	} else if (is_pae(vcpu)) {
4395
		g_context->nx = is_nx(vcpu);
4396
		g_context->root_level = PT32E_ROOT_LEVEL;
4397
		reset_rsvds_bits_mask(vcpu, g_context);
4398 4399
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
	} else {
4400
		g_context->nx = false;
4401
		g_context->root_level = PT32_ROOT_LEVEL;
4402
		reset_rsvds_bits_mask(vcpu, g_context);
4403 4404 4405
		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
	}

4406
	update_permission_bitmask(vcpu, g_context, false);
4407
	update_pkru_bitmask(vcpu, g_context, false);
4408
	update_last_nonleaf_level(vcpu, g_context);
4409 4410
}

4411
static void init_kvm_mmu(struct kvm_vcpu *vcpu)
4412
{
4413
	if (mmu_is_nested(vcpu))
4414
		init_kvm_nested_mmu(vcpu);
4415
	else if (tdp_enabled)
4416
		init_kvm_tdp_mmu(vcpu);
4417
	else
4418
		init_kvm_softmmu(vcpu);
4419 4420
}

4421
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
4422
{
4423
	kvm_mmu_unload(vcpu);
4424
	init_kvm_mmu(vcpu);
A
Avi Kivity 已提交
4425
}
4426
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
4427 4428

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
4429
{
4430 4431
	int r;

4432
	r = mmu_topup_memory_caches(vcpu);
A
Avi Kivity 已提交
4433 4434
	if (r)
		goto out;
4435
	r = mmu_alloc_roots(vcpu);
4436
	kvm_mmu_sync_roots(vcpu);
4437 4438
	if (r)
		goto out;
4439
	/* set_cr3() should ensure TLB has been flushed */
4440
	vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
4441 4442
out:
	return r;
A
Avi Kivity 已提交
4443
}
A
Avi Kivity 已提交
4444 4445 4446 4447 4448
EXPORT_SYMBOL_GPL(kvm_mmu_load);

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
	mmu_free_roots(vcpu);
4449
	WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
4450
}
4451
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
A
Avi Kivity 已提交
4452

4453
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4454 4455
				  struct kvm_mmu_page *sp, u64 *spte,
				  const void *new)
4456
{
4457
	if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
4458 4459
		++vcpu->kvm->stat.mmu_pde_zapped;
		return;
4460
        }
4461

A
Avi Kivity 已提交
4462
	++vcpu->kvm->stat.mmu_pte_updated;
4463
	vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
4464 4465
}

4466 4467 4468 4469 4470 4471 4472 4473
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
4474 4475
	old ^= shadow_nx_mask;
	new ^= shadow_nx_mask;
4476 4477 4478
	return (old & ~new & PT64_PERM_MASK) != 0;
}

4479 4480
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
				    const u8 *new, int *bytes)
4481
{
4482 4483
	u64 gentry;
	int r;
4484 4485 4486

	/*
	 * Assume that the pte write on a page table of the same type
4487 4488
	 * as the current vcpu paging mode since we update the sptes only
	 * when they have the same mode.
4489
	 */
4490
	if (is_pae(vcpu) && *bytes == 4) {
4491
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
4492 4493
		*gpa &= ~(gpa_t)7;
		*bytes = 8;
4494
		r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
4495 4496
		if (r)
			gentry = 0;
4497 4498 4499
		new = (const u8 *)&gentry;
	}

4500
	switch (*bytes) {
4501 4502 4503 4504 4505 4506 4507 4508 4509
	case 4:
		gentry = *(const u32 *)new;
		break;
	case 8:
		gentry = *(const u64 *)new;
		break;
	default:
		gentry = 0;
		break;
4510 4511
	}

4512 4513 4514 4515 4516 4517 4518
	return gentry;
}

/*
 * If we're seeing too many writes to a page, it may no longer be a page table,
 * or we may be forking, in which case it is better to unmap the page.
 */
4519
static bool detect_write_flooding(struct kvm_mmu_page *sp)
4520
{
4521 4522 4523 4524
	/*
	 * Skip write-flooding detected for the sp whose level is 1, because
	 * it can become unsync, then the guest page is not write-protected.
	 */
4525
	if (sp->role.level == PT_PAGE_TABLE_LEVEL)
4526
		return false;
4527

4528 4529
	atomic_inc(&sp->write_flooding_count);
	return atomic_read(&sp->write_flooding_count) >= 3;
4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545
}

/*
 * Misaligned accesses are too much trouble to fix up; also, they usually
 * indicate a page is not used as a page table.
 */
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
				    int bytes)
{
	unsigned offset, pte_size, misaligned;

	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
		 gpa, bytes, sp->role.word);

	offset = offset_in_page(gpa);
	pte_size = sp->role.cr4_pae ? 8 : 4;
4546 4547 4548 4549 4550 4551 4552 4553

	/*
	 * Sometimes, the OS only writes the last one bytes to update status
	 * bits, for example, in linux, andb instruction is used in clear_bit().
	 */
	if (!(offset & (pte_size - 1)) && bytes == 1)
		return false;

4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590
	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
	misaligned |= bytes < 4;

	return misaligned;
}

static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{
	unsigned page_offset, quadrant;
	u64 *spte;
	int level;

	page_offset = offset_in_page(gpa);
	level = sp->role.level;
	*nspte = 1;
	if (!sp->role.cr4_pae) {
		page_offset <<= 1;	/* 32->64 */
		/*
		 * A 32-bit pde maps 4MB while the shadow pdes map
		 * only 2MB.  So we need to double the offset again
		 * and zap two pdes instead of one.
		 */
		if (level == PT32_ROOT_LEVEL) {
			page_offset &= ~7; /* kill rounding error */
			page_offset <<= 1;
			*nspte = 2;
		}
		quadrant = page_offset >> PAGE_SHIFT;
		page_offset &= ~PAGE_MASK;
		if (quadrant != sp->role.quadrant)
			return NULL;
	}

	spte = &sp->spt[page_offset / sizeof(*spte)];
	return spte;
}

4591
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4592 4593
			      const u8 *new, int bytes,
			      struct kvm_page_track_notifier_node *node)
4594 4595 4596 4597 4598 4599
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_mmu_page *sp;
	LIST_HEAD(invalid_list);
	u64 entry, gentry, *spte;
	int npte;
4600
	bool remote_flush, local_flush;
4601 4602 4603 4604 4605 4606 4607
	union kvm_mmu_page_role mask = { };

	mask.cr0_wp = 1;
	mask.cr4_pae = 1;
	mask.nxe = 1;
	mask.smep_andnot_wp = 1;
	mask.smap_andnot_wp = 1;
4608
	mask.smm = 1;
4609 4610 4611 4612 4613 4614 4615 4616

	/*
	 * If we don't have indirect shadow pages, it means no page is
	 * write-protected, so we can exit simply.
	 */
	if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
		return;

4617
	remote_flush = local_flush = false;
4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631

	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);

	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);

	/*
	 * No need to care whether allocation memory is successful
	 * or not since pte prefetch is skiped if it does not have
	 * enough objects in the cache.
	 */
	mmu_topup_memory_caches(vcpu);

	spin_lock(&vcpu->kvm->mmu_lock);
	++vcpu->kvm->stat.mmu_pte_write;
4632
	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
4633

4634
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
4635
		if (detect_write_misaligned(sp, gpa, bytes) ||
4636
		      detect_write_flooding(sp)) {
4637
			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
A
Avi Kivity 已提交
4638
			++vcpu->kvm->stat.mmu_flooded;
4639 4640
			continue;
		}
4641 4642 4643 4644 4645

		spte = get_written_sptes(sp, gpa, &npte);
		if (!spte)
			continue;

4646
		local_flush = true;
4647
		while (npte--) {
4648
			entry = *spte;
4649
			mmu_page_zap_pte(vcpu->kvm, sp, spte);
4650 4651
			if (gentry &&
			      !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
4652
			      & mask.word) && rmap_can_add(vcpu))
4653
				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
G
Gleb Natapov 已提交
4654
			if (need_remote_flush(entry, *spte))
4655
				remote_flush = true;
4656
			++spte;
4657 4658
		}
	}
4659
	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
4660
	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
4661
	spin_unlock(&vcpu->kvm->mmu_lock);
4662 4663
}

4664 4665
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
4666 4667
	gpa_t gpa;
	int r;
4668

4669
	if (vcpu->arch.mmu.direct_map)
4670 4671
		return 0;

4672
	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
4673 4674

	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4675

4676
	return r;
4677
}
4678
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
4679

4680
static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
4681
{
4682
	LIST_HEAD(invalid_list);
4683

4684 4685 4686
	if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
		return;

4687 4688 4689
	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
		if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
			break;
A
Avi Kivity 已提交
4690

A
Avi Kivity 已提交
4691
		++vcpu->kvm->stat.mmu_recycled;
A
Avi Kivity 已提交
4692
	}
4693
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
A
Avi Kivity 已提交
4694 4695
}

4696
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
4697
		       void *insn, int insn_len)
4698
{
4699
	int r, emulation_type = EMULTYPE_RETRY;
4700
	enum emulation_result er;
4701
	bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
4702

4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713
	if (unlikely(error_code & PFERR_RSVD_MASK)) {
		r = handle_mmio_page_fault(vcpu, cr2, direct);
		if (r == RET_MMIO_PF_EMULATE) {
			emulation_type = 0;
			goto emulate;
		}
		if (r == RET_MMIO_PF_RETRY)
			return 1;
		if (r < 0)
			return r;
	}
4714

4715 4716
	r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
				      false);
4717
	if (r < 0)
4718 4719 4720
		return r;
	if (!r)
		return 1;
4721

4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736
	/*
	 * Before emulating the instruction, check if the error code
	 * was due to a RO violation while translating the guest page.
	 * This can occur when using nested virtualization with nested
	 * paging in both guests. If true, we simply unprotect the page
	 * and resume the guest.
	 *
	 * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
	 *       in PFERR_NEXT_GUEST_PAGE)
	 */
	if (error_code == PFERR_NESTED_GUEST_PAGE) {
		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
		return 1;
	}

4737
	if (mmio_info_in_cache(vcpu, cr2, direct))
4738
		emulation_type = 0;
4739
emulate:
4740
	er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
4741 4742 4743 4744

	switch (er) {
	case EMULATE_DONE:
		return 1;
P
Paolo Bonzini 已提交
4745
	case EMULATE_USER_EXIT:
4746
		++vcpu->stat.mmio_exits;
4747
		/* fall through */
4748
	case EMULATE_FAIL:
4749
		return 0;
4750 4751 4752 4753 4754 4755
	default:
		BUG();
	}
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

M
Marcelo Tosatti 已提交
4756 4757 4758
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
	vcpu->arch.mmu.invlpg(vcpu, gva);
4759
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
M
Marcelo Tosatti 已提交
4760 4761 4762 4763
	++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

4764 4765 4766 4767 4768 4769
void kvm_enable_tdp(void)
{
	tdp_enabled = true;
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);

4770 4771 4772 4773 4774 4775
void kvm_disable_tdp(void)
{
	tdp_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_tdp);

A
Avi Kivity 已提交
4776 4777
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
4778
	free_page((unsigned long)vcpu->arch.mmu.pae_root);
4779 4780
	if (vcpu->arch.mmu.lm_root != NULL)
		free_page((unsigned long)vcpu->arch.mmu.lm_root);
A
Avi Kivity 已提交
4781 4782 4783 4784
}

static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
4785
	struct page *page;
A
Avi Kivity 已提交
4786 4787
	int i;

4788 4789 4790 4791 4792 4793 4794
	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.
	 */
	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
	if (!page)
4795 4796
		return -ENOMEM;

4797
	vcpu->arch.mmu.pae_root = page_address(page);
4798
	for (i = 0; i < 4; ++i)
4799
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
4800

A
Avi Kivity 已提交
4801 4802 4803
	return 0;
}

4804
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
4805
{
4806 4807 4808 4809
	vcpu->arch.walk_mmu = &vcpu->arch.mmu;
	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
	vcpu->arch.mmu.translate_gpa = translate_gpa;
	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
A
Avi Kivity 已提交
4810

4811 4812
	return alloc_mmu_pages(vcpu);
}
A
Avi Kivity 已提交
4813

4814
void kvm_mmu_setup(struct kvm_vcpu *vcpu)
4815
{
4816
	MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
4817

4818
	init_kvm_mmu(vcpu);
A
Avi Kivity 已提交
4819 4820
}

4821
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
4822 4823
			struct kvm_memory_slot *slot,
			struct kvm_page_track_notifier_node *node)
4824 4825 4826 4827
{
	kvm_mmu_invalidate_zap_all_pages(kvm);
}

4828 4829 4830 4831 4832
void kvm_mmu_init_vm(struct kvm *kvm)
{
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;

	node->track_write = kvm_mmu_pte_write;
4833
	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
4834 4835 4836 4837 4838 4839 4840 4841 4842 4843
	kvm_page_track_register_notifier(kvm, node);
}

void kvm_mmu_uninit_vm(struct kvm *kvm)
{
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;

	kvm_page_track_unregister_notifier(kvm, node);
}

4844
/* The return value indicates if tlb flush on all vcpus is needed. */
4845
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912

/* The caller should hold mmu-lock before calling this function. */
static bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
			slot_level_handler fn, int start_level, int end_level,
			gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
{
	struct slot_rmap_walk_iterator iterator;
	bool flush = false;

	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
			end_gfn, &iterator) {
		if (iterator.rmap)
			flush |= fn(kvm, iterator.rmap);

		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
			if (flush && lock_flush_tlb) {
				kvm_flush_remote_tlbs(kvm);
				flush = false;
			}
			cond_resched_lock(&kvm->mmu_lock);
		}
	}

	if (flush && lock_flush_tlb) {
		kvm_flush_remote_tlbs(kvm);
		flush = false;
	}

	return flush;
}

static bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
		  slot_level_handler fn, int start_level, int end_level,
		  bool lock_flush_tlb)
{
	return slot_handle_level_range(kvm, memslot, fn, start_level,
			end_level, memslot->base_gfn,
			memslot->base_gfn + memslot->npages - 1,
			lock_flush_tlb);
}

static bool
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
		      slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
				 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}

static bool
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
			slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
				 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}

static bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
		 slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
				 PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
}

X
Xiao Guangrong 已提交
4913 4914 4915 4916
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
4917
	int i;
X
Xiao Guangrong 已提交
4918 4919

	spin_lock(&kvm->mmu_lock);
4920 4921 4922 4923 4924 4925 4926 4927 4928
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
		kvm_for_each_memslot(memslot, slots) {
			gfn_t start, end;

			start = max(gfn_start, memslot->base_gfn);
			end = min(gfn_end, memslot->base_gfn + memslot->npages);
			if (start >= end)
				continue;
X
Xiao Guangrong 已提交
4929

4930 4931 4932 4933
			slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
						PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
						start, end - 1, true);
		}
X
Xiao Guangrong 已提交
4934 4935 4936 4937 4938
	}

	spin_unlock(&kvm->mmu_lock);
}

4939 4940
static bool slot_rmap_write_protect(struct kvm *kvm,
				    struct kvm_rmap_head *rmap_head)
4941
{
4942
	return __rmap_write_protect(kvm, rmap_head, false);
4943 4944
}

4945 4946
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
				      struct kvm_memory_slot *memslot)
A
Avi Kivity 已提交
4947
{
4948
	bool flush;
A
Avi Kivity 已提交
4949

4950
	spin_lock(&kvm->mmu_lock);
4951 4952
	flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
				      false);
4953
	spin_unlock(&kvm->mmu_lock);
4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972

	/*
	 * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log()
	 * which do tlb flush out of mmu-lock should be serialized by
	 * kvm->slots_lock otherwise tlb flush would be missed.
	 */
	lockdep_assert_held(&kvm->slots_lock);

	/*
	 * We can flush all the TLBs out of the mmu lock without TLB
	 * corruption since we just change the spte from writable to
	 * readonly so that we only need to care the case of changing
	 * spte from present to present (changing the spte from present
	 * to nonpresent will flush all the TLBs immediately), in other
	 * words, the only case we care is mmu_spte_update() where we
	 * haved checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
	 * instead of PT_WRITABLE_MASK, that means it does not depend
	 * on PT_WRITABLE_MASK anymore.
	 */
4973 4974
	if (flush)
		kvm_flush_remote_tlbs(kvm);
A
Avi Kivity 已提交
4975
}
4976

4977
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
4978
					 struct kvm_rmap_head *rmap_head)
4979 4980 4981 4982
{
	u64 *sptep;
	struct rmap_iterator iter;
	int need_tlb_flush = 0;
D
Dan Williams 已提交
4983
	kvm_pfn_t pfn;
4984 4985
	struct kvm_mmu_page *sp;

4986
restart:
4987
	for_each_rmap_spte(rmap_head, &iter, sptep) {
4988 4989 4990 4991
		sp = page_header(__pa(sptep));
		pfn = spte_to_pfn(*sptep);

		/*
4992 4993 4994 4995 4996
		 * We cannot do huge page mapping for indirect shadow pages,
		 * which are found on the last rmap (level = 1) when not using
		 * tdp; such shadow pages are synced with the page table in
		 * the guest, and the guest page table is using 4K page size
		 * mapping if the indirect sp has level = 1.
4997 4998 4999
		 */
		if (sp->role.direct &&
			!kvm_is_reserved_pfn(pfn) &&
5000
			PageTransCompoundMap(pfn_to_page(pfn))) {
5001 5002
			drop_spte(kvm, sptep);
			need_tlb_flush = 1;
5003 5004
			goto restart;
		}
5005 5006 5007 5008 5009 5010
	}

	return need_tlb_flush;
}

void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5011
				   const struct kvm_memory_slot *memslot)
5012
{
5013
	/* FIXME: const-ify all uses of struct kvm_memory_slot.  */
5014
	spin_lock(&kvm->mmu_lock);
5015 5016
	slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
			 kvm_mmu_zap_collapsible_spte, true);
5017 5018 5019
	spin_unlock(&kvm->mmu_lock);
}

5020 5021 5022
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
				   struct kvm_memory_slot *memslot)
{
5023
	bool flush;
5024 5025

	spin_lock(&kvm->mmu_lock);
5026
	flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044
	spin_unlock(&kvm->mmu_lock);

	lockdep_assert_held(&kvm->slots_lock);

	/*
	 * It's also safe to flush TLBs out of mmu lock here as currently this
	 * function is only used for dirty logging, in which case flushing TLB
	 * out of mmu lock also guarantees no dirty pages will be lost in
	 * dirty_bitmap.
	 */
	if (flush)
		kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);

void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
					struct kvm_memory_slot *memslot)
{
5045
	bool flush;
5046 5047

	spin_lock(&kvm->mmu_lock);
5048 5049
	flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
					false);
5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062
	spin_unlock(&kvm->mmu_lock);

	/* see kvm_mmu_slot_remove_write_access */
	lockdep_assert_held(&kvm->slots_lock);

	if (flush)
		kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);

void kvm_mmu_slot_set_dirty(struct kvm *kvm,
			    struct kvm_memory_slot *memslot)
{
5063
	bool flush;
5064 5065

	spin_lock(&kvm->mmu_lock);
5066
	flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
5067 5068 5069 5070 5071 5072 5073 5074 5075 5076
	spin_unlock(&kvm->mmu_lock);

	lockdep_assert_held(&kvm->slots_lock);

	/* see kvm_mmu_slot_leaf_clear_dirty */
	if (flush)
		kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);

X
Xiao Guangrong 已提交
5077
#define BATCH_ZAP_PAGES	10
5078 5079 5080
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
	struct kvm_mmu_page *sp, *node;
X
Xiao Guangrong 已提交
5081
	int batch = 0;
5082 5083 5084 5085

restart:
	list_for_each_entry_safe_reverse(sp, node,
	      &kvm->arch.active_mmu_pages, link) {
X
Xiao Guangrong 已提交
5086 5087
		int ret;

5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102
		/*
		 * No obsolete page exists before new created page since
		 * active_mmu_pages is the FIFO list.
		 */
		if (!is_obsolete_sp(kvm, sp))
			break;

		/*
		 * Since we are reversely walking the list and the invalid
		 * list will be moved to the head, skip the invalid page
		 * can help us to avoid the infinity list walking.
		 */
		if (sp->role.invalid)
			continue;

5103 5104 5105 5106
		/*
		 * Need not flush tlb since we only zap the sp with invalid
		 * generation number.
		 */
X
Xiao Guangrong 已提交
5107
		if (batch >= BATCH_ZAP_PAGES &&
5108
		      cond_resched_lock(&kvm->mmu_lock)) {
X
Xiao Guangrong 已提交
5109
			batch = 0;
5110 5111 5112
			goto restart;
		}

5113 5114
		ret = kvm_mmu_prepare_zap_page(kvm, sp,
				&kvm->arch.zapped_obsolete_pages);
X
Xiao Guangrong 已提交
5115 5116 5117
		batch += ret;

		if (ret)
5118 5119 5120
			goto restart;
	}

5121 5122 5123 5124
	/*
	 * Should flush tlb before free page tables since lockless-walking
	 * may use the pages.
	 */
5125
	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139
}

/*
 * Fast invalidate all shadow pages and use lock-break technique
 * to zap obsolete pages.
 *
 * It's required when memslot is being deleted or VM is being
 * destroyed, in these cases, we should ensure that KVM MMU does
 * not use any resource of the being-deleted slot or all slots
 * after calling the function.
 */
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
{
	spin_lock(&kvm->mmu_lock);
5140
	trace_kvm_mmu_invalidate_zap_all_pages(kvm);
5141 5142
	kvm->arch.mmu_valid_gen++;

5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153
	/*
	 * Notify all vcpus to reload its shadow page table
	 * and flush TLB. Then all vcpus will switch to new
	 * shadow page table with the new mmu_valid_gen.
	 *
	 * Note: we should do this under the protection of
	 * mmu-lock, otherwise, vcpu would purge shadow page
	 * but miss tlb flush.
	 */
	kvm_reload_remote_mmus(kvm);

5154 5155 5156 5157
	kvm_zap_obsolete_pages(kvm);
	spin_unlock(&kvm->mmu_lock);
}

5158 5159 5160 5161 5162
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
{
	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}

5163
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
5164 5165 5166 5167 5168
{
	/*
	 * The very rare case: if the generation-number is round,
	 * zap all shadow pages.
	 */
5169
	if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
5170
		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5171
		kvm_mmu_invalidate_zap_all_pages(kvm);
5172
	}
5173 5174
}

5175 5176
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5177 5178
{
	struct kvm *kvm;
5179
	int nr_to_scan = sc->nr_to_scan;
5180
	unsigned long freed = 0;
5181

5182
	spin_lock(&kvm_lock);
5183 5184

	list_for_each_entry(kvm, &vm_list, vm_list) {
5185
		int idx;
5186
		LIST_HEAD(invalid_list);
5187

5188 5189 5190 5191 5192 5193 5194 5195
		/*
		 * Never scan more than sc->nr_to_scan VM instances.
		 * Will not hit this condition practically since we do not try
		 * to shrink more than one VM and it is very unlikely to see
		 * !n_used_mmu_pages so many times.
		 */
		if (!nr_to_scan--)
			break;
5196 5197 5198 5199 5200 5201
		/*
		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
		 * here. We may skip a VM instance errorneosly, but we do not
		 * want to shrink a VM that only started to populate its MMU
		 * anyway.
		 */
5202 5203
		if (!kvm->arch.n_used_mmu_pages &&
		      !kvm_has_zapped_obsolete_pages(kvm))
5204 5205
			continue;

5206
		idx = srcu_read_lock(&kvm->srcu);
5207 5208
		spin_lock(&kvm->mmu_lock);

5209 5210 5211 5212 5213 5214
		if (kvm_has_zapped_obsolete_pages(kvm)) {
			kvm_mmu_commit_zap_page(kvm,
			      &kvm->arch.zapped_obsolete_pages);
			goto unlock;
		}

5215 5216
		if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
			freed++;
5217
		kvm_mmu_commit_zap_page(kvm, &invalid_list);
5218

5219
unlock:
5220
		spin_unlock(&kvm->mmu_lock);
5221
		srcu_read_unlock(&kvm->srcu, idx);
5222

5223 5224 5225 5226 5227
		/*
		 * unfair on small ones
		 * per-vm shrinkers cry out
		 * sadness comes quickly
		 */
5228 5229
		list_move_tail(&kvm->vm_list, &vm_list);
		break;
5230 5231
	}

5232
	spin_unlock(&kvm_lock);
5233 5234 5235 5236 5237 5238
	return freed;
}

static unsigned long
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
5239
	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5240 5241 5242
}

static struct shrinker mmu_shrinker = {
5243 5244
	.count_objects = mmu_shrink_count,
	.scan_objects = mmu_shrink_scan,
5245 5246 5247
	.seeks = DEFAULT_SEEKS * 10,
};

I
Ingo Molnar 已提交
5248
static void mmu_destroy_caches(void)
5249
{
5250 5251
	if (pte_list_desc_cache)
		kmem_cache_destroy(pte_list_desc_cache);
5252 5253
	if (mmu_page_header_cache)
		kmem_cache_destroy(mmu_page_header_cache);
5254 5255 5256 5257
}

int kvm_mmu_module_init(void)
{
5258 5259
	kvm_mmu_clear_all_pte_masks();

5260 5261
	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
					    sizeof(struct pte_list_desc),
5262
					    0, 0, NULL);
5263
	if (!pte_list_desc_cache)
5264 5265
		goto nomem;

5266 5267
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
5268
						  0, 0, NULL);
5269 5270 5271
	if (!mmu_page_header_cache)
		goto nomem;

5272
	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
5273 5274
		goto nomem;

5275 5276
	register_shrinker(&mmu_shrinker);

5277 5278 5279
	return 0;

nomem:
5280
	mmu_destroy_caches();
5281 5282 5283
	return -ENOMEM;
}

5284 5285 5286 5287 5288 5289 5290
/*
 * Caculate mmu pages needed for kvm.
 */
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
	unsigned int nr_mmu_pages;
	unsigned int  nr_pages = 0;
5291
	struct kvm_memslots *slots;
5292
	struct kvm_memory_slot *memslot;
5293
	int i;
5294

5295 5296
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
5297

5298 5299 5300
		kvm_for_each_memslot(memslot, slots)
			nr_pages += memslot->npages;
	}
5301 5302 5303

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
	nr_mmu_pages = max(nr_mmu_pages,
5304
			   (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
5305 5306 5307 5308

	return nr_mmu_pages;
}

5309 5310
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
5311
	kvm_mmu_unload(vcpu);
5312 5313
	free_mmu_pages(vcpu);
	mmu_free_memory_caches(vcpu);
5314 5315 5316 5317 5318 5319 5320
}

void kvm_mmu_module_exit(void)
{
	mmu_destroy_caches();
	percpu_counter_destroy(&kvm_total_used_mmu_pages);
	unregister_shrinker(&mmu_shrinker);
5321 5322
	mmu_audit_disable();
}