mmu.c 153.3 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
10
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
11 12 13 14 15 16 17 18 19
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
A
Avi Kivity 已提交
20

21
#include "irq.h"
22
#include "mmu.h"
23
#include "x86.h"
A
Avi Kivity 已提交
24
#include "kvm_cache_regs.h"
25
#include "cpuid.h"
A
Avi Kivity 已提交
26

27
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
28 29 30 31
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
32 33
#include <linux/moduleparam.h>
#include <linux/export.h>
34
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
35
#include <linux/hugetlb.h>
36
#include <linux/compiler.h>
37
#include <linux/srcu.h>
38
#include <linux/slab.h>
39
#include <linux/sched/signal.h>
40
#include <linux/uaccess.h>
41
#include <linux/hash.h>
42
#include <linux/kern_levels.h>
A
Avi Kivity 已提交
43

A
Avi Kivity 已提交
44
#include <asm/page.h>
45
#include <asm/pat.h>
A
Avi Kivity 已提交
46
#include <asm/cmpxchg.h>
47
#include <asm/io.h>
48
#include <asm/vmx.h>
49
#include <asm/kvm_page_track.h>
50
#include "trace.h"
A
Avi Kivity 已提交
51

52 53 54 55 56 57 58
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
59
bool tdp_enabled = false;
60

61 62 63 64
enum {
	AUDIT_PRE_PAGE_FAULT,
	AUDIT_POST_PAGE_FAULT,
	AUDIT_PRE_PTE_WRITE,
65 66 67
	AUDIT_POST_PTE_WRITE,
	AUDIT_PRE_SYNC,
	AUDIT_POST_SYNC
68
};
69

70
#undef MMU_DEBUG
71 72

#ifdef MMU_DEBUG
73 74
static bool dbg = 0;
module_param(dbg, bool, 0644);
75 76 77

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
78
#define MMU_WARN_ON(x) WARN_ON(x)
79 80 81
#else
#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)
82
#define MMU_WARN_ON(x) do { } while (0)
83
#endif
A
Avi Kivity 已提交
84

85 86
#define PTE_PREFETCH_NUM		8

87
#define PT_FIRST_AVAIL_BITS_SHIFT 10
A
Avi Kivity 已提交
88 89 90 91 92
#define PT64_SECOND_AVAIL_BITS_SHIFT 52

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
M
Mike Day 已提交
93
		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
A
Avi Kivity 已提交
94 95 96 97 98 99 100 101

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
102
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
103

104 105 106
#define PT32_LVL_OFFSET_MASK(level) \
	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
107 108 109 110 111

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


112
#define PT64_BASE_ADDR_MASK __sme_clr((((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
A
Avi Kivity 已提交
113 114
#define PT64_DIR_BASE_ADDR_MASK \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
115 116 117 118 119 120
#define PT64_LVL_ADDR_MASK(level) \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
#define PT64_LVL_OFFSET_MASK(level) \
	(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
121 122 123 124

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
125 126 127
#define PT32_LVL_ADDR_MASK(level) \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
					    * PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
128

129
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
130
			| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
A
Avi Kivity 已提交
131

132 133 134 135 136
#define ACC_EXEC_MASK    1
#define ACC_WRITE_MASK   PT_WRITABLE_MASK
#define ACC_USER_MASK    PT_USER_MASK
#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)

137 138 139 140
/* The mask for the R/X bits in EPT PTEs */
#define PT64_EPT_READABLE_MASK			0x1ull
#define PT64_EPT_EXECUTABLE_MASK		0x4ull

141 142
#include <trace/events/kvm.h>

143 144 145
#define CREATE_TRACE_POINTS
#include "mmutrace.h"

146 147
#define SPTE_HOST_WRITEABLE	(1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define SPTE_MMU_WRITEABLE	(1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
148

149 150
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)

151 152 153
/* make pte_list_desc fit well in cache line */
#define PTE_LIST_EXT 3

154 155 156 157 158 159 160 161 162 163 164 165 166 167
/*
 * Return values of handle_mmio_page_fault and mmu.page_fault:
 * RET_PF_RETRY: let CPU fault again on the address.
 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
 *
 * For handle_mmio_page_fault only:
 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
 */
enum {
	RET_PF_RETRY = 0,
	RET_PF_EMULATE = 1,
	RET_PF_INVALID = 2,
};

168 169 170
struct pte_list_desc {
	u64 *sptes[PTE_LIST_EXT];
	struct pte_list_desc *more;
171 172
};

173 174 175 176
struct kvm_shadow_walk_iterator {
	u64 addr;
	hpa_t shadow_addr;
	u64 *sptep;
177
	int level;
178 179 180
	unsigned index;
};

181 182 183 184 185 186 187 188 189 190 191
static const union kvm_mmu_page_role mmu_base_role_mask = {
	.cr0_wp = 1,
	.cr4_pae = 1,
	.nxe = 1,
	.smep_andnot_wp = 1,
	.smap_andnot_wp = 1,
	.smm = 1,
	.guest_mode = 1,
	.ad_disabled = 1,
};

192 193 194 195 196 197 198
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
					 (_root), (_addr));                \
	     shadow_walk_okay(&(_walker));			           \
	     shadow_walk_next(&(_walker)))

#define for_each_shadow_entry(_vcpu, _addr, _walker)            \
199 200 201 202
	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
	     shadow_walk_okay(&(_walker));			\
	     shadow_walk_next(&(_walker)))

203 204 205 206 207 208
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
	     shadow_walk_okay(&(_walker)) &&				\
		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
	     __shadow_walk_next(&(_walker), spte))

209
static struct kmem_cache *pte_list_desc_cache;
210
static struct kmem_cache *mmu_page_header_cache;
211
static struct percpu_counter kvm_total_used_mmu_pages;
212

S
Sheng Yang 已提交
213 214 215 216 217
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
218
static u64 __read_mostly shadow_mmio_mask;
219
static u64 __read_mostly shadow_mmio_value;
220
static u64 __read_mostly shadow_present_mask;
221
static u64 __read_mostly shadow_me_mask;
222

223
/*
224 225 226
 * SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value.
 * Non-present SPTEs with shadow_acc_track_value set are in place for access
 * tracking.
227 228 229 230 231 232 233 234 235 236 237 238 239 240
 */
static u64 __read_mostly shadow_acc_track_mask;
static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK;

/*
 * The mask/shift to use for saving the original R/X bits when marking the PTE
 * as not-present for access tracking purposes. We do not save the W bit as the
 * PTEs being access tracked also need to be dirty tracked, so the W bit will be
 * restored only when a write is attempted to the page.
 */
static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
						    PT64_EPT_EXECUTABLE_MASK;
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;

241 242 243 244 245 246 247 248 249 250 251
/*
 * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
 * to guard against L1TF attacks.
 */
static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;

/*
 * The number of high-order 1 bits to use in the mask above.
 */
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;

252
static void mmu_spte_set(u64 *sptep, u64 spte);
253 254
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
255

256
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
257
{
258 259
	BUG_ON((mmio_mask & mmio_value) != mmio_value);
	shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
260
	shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
261 262 263
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
{
	return sp->role.ad_disabled;
}

static inline bool spte_ad_enabled(u64 spte)
{
	MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
	return !(spte & shadow_acc_track_value);
}

static inline u64 spte_shadow_accessed_mask(u64 spte)
{
	MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
	return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
}

static inline u64 spte_shadow_dirty_mask(u64 spte)
{
	MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
	return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
}

287 288
static inline bool is_access_track_spte(u64 spte)
{
289
	return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
290 291
}

292
/*
293 294 295 296 297 298 299
 * the low bit of the generation number is always presumed to be zero.
 * This disables mmio caching during memslot updates.  The concept is
 * similar to a seqcount but instead of retrying the access we just punt
 * and ignore the cache.
 *
 * spte bits 3-11 are used as bits 1-9 of the generation number,
 * the bits 52-61 are used as bits 10-19 of the generation number.
300
 */
301
#define MMIO_SPTE_GEN_LOW_SHIFT		2
302 303
#define MMIO_SPTE_GEN_HIGH_SHIFT	52

304 305 306
#define MMIO_GEN_SHIFT			20
#define MMIO_GEN_LOW_SHIFT		10
#define MMIO_GEN_LOW_MASK		((1 << MMIO_GEN_LOW_SHIFT) - 2)
307
#define MMIO_GEN_MASK			((1 << MMIO_GEN_SHIFT) - 1)
308 309 310 311 312

static u64 generation_mmio_spte_mask(unsigned int gen)
{
	u64 mask;

T
Tiejun Chen 已提交
313
	WARN_ON(gen & ~MMIO_GEN_MASK);
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330

	mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
	mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
	return mask;
}

static unsigned int get_mmio_spte_generation(u64 spte)
{
	unsigned int gen;

	spte &= ~shadow_mmio_mask;

	gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK;
	gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT;
	return gen;
}

331
static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
332
{
333
	return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK;
334 335
}

336
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
337
			   unsigned access)
338
{
339
	unsigned int gen = kvm_current_mmio_generation(vcpu);
340
	u64 mask = generation_mmio_spte_mask(gen);
341
	u64 gpa = gfn << PAGE_SHIFT;
342

343
	access &= ACC_WRITE_MASK | ACC_USER_MASK;
344 345 346 347
	mask |= shadow_mmio_value | access;
	mask |= gpa | shadow_nonpresent_or_rsvd_mask;
	mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
		<< shadow_nonpresent_or_rsvd_mask_len;
348

349
	trace_mark_mmio_spte(sptep, gfn, access, gen);
350
	mmu_spte_set(sptep, mask);
351 352 353 354
}

static bool is_mmio_spte(u64 spte)
{
355
	return (spte & shadow_mmio_mask) == shadow_mmio_value;
356 357 358 359
}

static gfn_t get_mmio_spte_gfn(u64 spte)
{
360 361 362 363 364 365 366 367
	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
		   shadow_nonpresent_or_rsvd_mask;
	u64 gpa = spte & ~mask;

	gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
	       & shadow_nonpresent_or_rsvd_mask;

	return gpa >> PAGE_SHIFT;
368 369 370 371
}

static unsigned get_mmio_spte_access(u64 spte)
{
T
Tiejun Chen 已提交
372
	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
373
	return (spte & ~mask) & ~PAGE_MASK;
374 375
}

376
static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
D
Dan Williams 已提交
377
			  kvm_pfn_t pfn, unsigned access)
378 379
{
	if (unlikely(is_noslot_pfn(pfn))) {
380
		mark_mmio_spte(vcpu, sptep, gfn, access);
381 382 383 384 385
		return true;
	}

	return false;
}
386

387
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
388
{
389 390
	unsigned int kvm_gen, spte_gen;

391
	kvm_gen = kvm_current_mmio_generation(vcpu);
392 393 394 395
	spte_gen = get_mmio_spte_generation(spte);

	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
	return likely(kvm_gen == spte_gen);
396 397
}

398 399 400 401 402 403 404
/*
 * Sets the shadow PTE masks used by the MMU.
 *
 * Assumptions:
 *  - Setting either @accessed_mask or @dirty_mask requires setting both
 *  - At least one of @accessed_mask or @acc_track_mask must be set
 */
S
Sheng Yang 已提交
405
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
406
		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
407
		u64 acc_track_mask, u64 me_mask)
S
Sheng Yang 已提交
408
{
409 410
	BUG_ON(!dirty_mask != !accessed_mask);
	BUG_ON(!accessed_mask && !acc_track_mask);
411
	BUG_ON(acc_track_mask & shadow_acc_track_value);
412

S
Sheng Yang 已提交
413 414 415 416 417
	shadow_user_mask = user_mask;
	shadow_accessed_mask = accessed_mask;
	shadow_dirty_mask = dirty_mask;
	shadow_nx_mask = nx_mask;
	shadow_x_mask = x_mask;
418
	shadow_present_mask = p_mask;
419
	shadow_acc_track_mask = acc_track_mask;
420
	shadow_me_mask = me_mask;
S
Sheng Yang 已提交
421 422 423
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

424
static void kvm_mmu_reset_all_pte_masks(void)
425 426 427 428 429 430 431 432 433
{
	shadow_user_mask = 0;
	shadow_accessed_mask = 0;
	shadow_dirty_mask = 0;
	shadow_nx_mask = 0;
	shadow_x_mask = 0;
	shadow_mmio_mask = 0;
	shadow_present_mask = 0;
	shadow_acc_track_mask = 0;
434 435 436 437 438 439 440 441 442 443 444 445

	/*
	 * If the CPU has 46 or less physical address bits, then set an
	 * appropriate mask to guard against L1TF attacks. Otherwise, it is
	 * assumed that the CPU is not vulnerable to L1TF.
	 */
	if (boot_cpu_data.x86_phys_bits <
	    52 - shadow_nonpresent_or_rsvd_mask_len)
		shadow_nonpresent_or_rsvd_mask =
			rsvd_bits(boot_cpu_data.x86_phys_bits -
				  shadow_nonpresent_or_rsvd_mask_len,
				  boot_cpu_data.x86_phys_bits - 1);
446 447
}

A
Avi Kivity 已提交
448 449 450 451 452
static int is_cpuid_PSE36(void)
{
	return 1;
}

453 454
static int is_nx(struct kvm_vcpu *vcpu)
{
455
	return vcpu->arch.efer & EFER_NX;
456 457
}

458 459
static int is_shadow_present_pte(u64 pte)
{
460
	return (pte != 0) && !is_mmio_spte(pte);
461 462
}

M
Marcelo Tosatti 已提交
463 464 465 466 467
static int is_large_pte(u64 pte)
{
	return pte & PT_PAGE_SIZE_MASK;
}

468 469 470 471
static int is_last_spte(u64 pte, int level)
{
	if (level == PT_PAGE_TABLE_LEVEL)
		return 1;
472
	if (is_large_pte(pte))
473 474 475 476
		return 1;
	return 0;
}

477 478 479 480 481
static bool is_executable_pte(u64 spte)
{
	return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
}

D
Dan Williams 已提交
482
static kvm_pfn_t spte_to_pfn(u64 pte)
483
{
484
	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
485 486
}

487 488 489 490 491 492 493
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

494
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
495
static void __set_spte(u64 *sptep, u64 spte)
496
{
497
	WRITE_ONCE(*sptep, spte);
498 499
}

500
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
501
{
502
	WRITE_ONCE(*sptep, spte);
503 504 505 506 507 508
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	return xchg(sptep, spte);
}
509 510 511

static u64 __get_spte_lockless(u64 *sptep)
{
512
	return READ_ONCE(*sptep);
513
}
514
#else
515 516 517 518 519 520 521
union split_spte {
	struct {
		u32 spte_low;
		u32 spte_high;
	};
	u64 spte;
};
522

523 524 525 526 527 528 529 530 531 532 533 534
static void count_spte_clear(u64 *sptep, u64 spte)
{
	struct kvm_mmu_page *sp =  page_header(__pa(sptep));

	if (is_shadow_present_pte(spte))
		return;

	/* Ensure the spte is completely set before we increase the count */
	smp_wmb();
	sp->clear_spte_count++;
}

535 536 537
static void __set_spte(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;
538

539 540 541 542 543 544 545 546 547 548 549 550
	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	ssptep->spte_high = sspte.spte_high;

	/*
	 * If we map the spte from nonpresent to present, We should store
	 * the high bits firstly, then set present bit, so cpu can not
	 * fetch this spte while we are setting the spte.
	 */
	smp_wmb();

551
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
552 553
}

554 555 556 557 558 559 560
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

561
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
562 563 564 565 566 567 568 569

	/*
	 * If we map the spte from present to nonpresent, we should clear
	 * present bit firstly to avoid vcpu fetch the old high bits.
	 */
	smp_wmb();

	ssptep->spte_high = sspte.spte_high;
570
	count_spte_clear(sptep, spte);
571 572 573 574 575 576 577 578 579 580 581
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte, orig;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	/* xchg acts as a barrier before the setting of the high bits */
	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
582 583
	orig.spte_high = ssptep->spte_high;
	ssptep->spte_high = sspte.spte_high;
584
	count_spte_clear(sptep, spte);
585 586 587

	return orig.spte;
}
588 589 590 591

/*
 * The idea using the light way get the spte on x86_32 guest is from
 * gup_get_pte(arch/x86/mm/gup.c).
592 593 594 595 596 597 598 599 600 601 602 603 604 605
 *
 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
 * coalesces them and we are running out of the MMU lock.  Therefore
 * we need to protect against in-progress updates of the spte.
 *
 * Reading the spte while an update is in progress may get the old value
 * for the high part of the spte.  The race is fine for a present->non-present
 * change (because the high part of the spte is ignored for non-present spte),
 * but for a present->present change we must reread the spte.
 *
 * All such changes are done in two steps (present->non-present and
 * non-present->present), hence it is enough to count the number of
 * present->non-present updates: if it changed while reading the spte,
 * we might have hit the race.  This is done using clear_spte_count.
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
 */
static u64 __get_spte_lockless(u64 *sptep)
{
	struct kvm_mmu_page *sp =  page_header(__pa(sptep));
	union split_spte spte, *orig = (union split_spte *)sptep;
	int count;

retry:
	count = sp->clear_spte_count;
	smp_rmb();

	spte.spte_low = orig->spte_low;
	smp_rmb();

	spte.spte_high = orig->spte_high;
	smp_rmb();

	if (unlikely(spte.spte_low != orig->spte_low ||
	      count != sp->clear_spte_count))
		goto retry;

	return spte.spte;
}
629 630
#endif

631
static bool spte_can_locklessly_be_made_writable(u64 spte)
632
{
633 634
	return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
		(SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
635 636
}

637 638
static bool spte_has_volatile_bits(u64 spte)
{
639 640 641
	if (!is_shadow_present_pte(spte))
		return false;

642
	/*
643
	 * Always atomically update spte if it can be updated
644 645 646 647
	 * out of mmu-lock, it can ensure dirty bit is not lost,
	 * also, it can help us to get a stable is_writable_pte()
	 * to ensure tlb flush is not missed.
	 */
648 649
	if (spte_can_locklessly_be_made_writable(spte) ||
	    is_access_track_spte(spte))
650 651
		return true;

652
	if (spte_ad_enabled(spte)) {
653 654 655 656
		if ((spte & shadow_accessed_mask) == 0 ||
	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
			return true;
	}
657

658
	return false;
659 660
}

661
static bool is_accessed_spte(u64 spte)
662
{
663 664 665 666
	u64 accessed_mask = spte_shadow_accessed_mask(spte);

	return accessed_mask ? spte & accessed_mask
			     : !is_access_track_spte(spte);
667 668
}

669
static bool is_dirty_spte(u64 spte)
670
{
671 672 673
	u64 dirty_mask = spte_shadow_dirty_mask(spte);

	return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
674 675
}

676 677 678 679 680 681 682 683 684 685 686 687
/* Rules for using mmu_spte_set:
 * Set the sptep from nonpresent to present.
 * Note: the sptep being assigned *must* be either not present
 * or in a state where the hardware will not attempt to update
 * the spte.
 */
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{
	WARN_ON(is_shadow_present_pte(*sptep));
	__set_spte(sptep, new_spte);
}

688 689 690
/*
 * Update the SPTE (excluding the PFN), but do not track changes in its
 * accessed/dirty status.
691
 */
692
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
693
{
694
	u64 old_spte = *sptep;
695

696
	WARN_ON(!is_shadow_present_pte(new_spte));
697

698 699
	if (!is_shadow_present_pte(old_spte)) {
		mmu_spte_set(sptep, new_spte);
700
		return old_spte;
701
	}
702

703
	if (!spte_has_volatile_bits(old_spte))
704
		__update_clear_spte_fast(sptep, new_spte);
705
	else
706
		old_spte = __update_clear_spte_slow(sptep, new_spte);
707

708 709
	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
	return old_spte;
}

/* Rules for using mmu_spte_update:
 * Update the state bits, it means the mapped pfn is not changed.
 *
 * Whenever we overwrite a writable spte with a read-only one we
 * should flush remote TLBs. Otherwise rmap_write_protect
 * will find a read-only spte, even though the writable spte
 * might be cached on a CPU's TLB, the return value indicates this
 * case.
 *
 * Returns true if the TLB needs to be flushed
 */
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
	bool flush = false;
	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);

	if (!is_shadow_present_pte(old_spte))
		return false;

732 733
	/*
	 * For the spte updated out of mmu-lock is safe, since
734
	 * we always atomically update it, see the comments in
735 736
	 * spte_has_volatile_bits().
	 */
737
	if (spte_can_locklessly_be_made_writable(old_spte) &&
738
	      !is_writable_pte(new_spte))
739
		flush = true;
740

741
	/*
742
	 * Flush TLB when accessed/dirty states are changed in the page tables,
743 744 745
	 * to guarantee consistency between TLB and page tables.
	 */

746 747
	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
		flush = true;
748
		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
749 750 751 752
	}

	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
		flush = true;
753
		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
754
	}
755

756
	return flush;
757 758
}

759 760 761 762
/*
 * Rules for using mmu_spte_clear_track_bits:
 * It sets the sptep from present to nonpresent, and track the
 * state bits, it is used to clear the last level sptep.
763
 * Returns non-zero if the PTE was previously valid.
764 765 766
 */
static int mmu_spte_clear_track_bits(u64 *sptep)
{
D
Dan Williams 已提交
767
	kvm_pfn_t pfn;
768 769 770
	u64 old_spte = *sptep;

	if (!spte_has_volatile_bits(old_spte))
771
		__update_clear_spte_fast(sptep, 0ull);
772
	else
773
		old_spte = __update_clear_spte_slow(sptep, 0ull);
774

775
	if (!is_shadow_present_pte(old_spte))
776 777 778
		return 0;

	pfn = spte_to_pfn(old_spte);
779 780 781 782 783 784

	/*
	 * KVM does not hold the refcount of the page used by
	 * kvm mmu, before reclaiming the page, we should
	 * unmap it from mmu first.
	 */
785
	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
786

787
	if (is_accessed_spte(old_spte))
788
		kvm_set_pfn_accessed(pfn);
789 790

	if (is_dirty_spte(old_spte))
791
		kvm_set_pfn_dirty(pfn);
792

793 794 795 796 797 798 799 800 801 802
	return 1;
}

/*
 * Rules for using mmu_spte_clear_no_track:
 * Directly clear spte without caring the state bits of sptep,
 * it is used to set the upper level spte.
 */
static void mmu_spte_clear_no_track(u64 *sptep)
{
803
	__update_clear_spte_fast(sptep, 0ull);
804 805
}

806 807 808 809 810
static u64 mmu_spte_get_lockless(u64 *sptep)
{
	return __get_spte_lockless(sptep);
}

811 812
static u64 mark_spte_for_access_track(u64 spte)
{
813
	if (spte_ad_enabled(spte))
814 815
		return spte & ~shadow_accessed_mask;

816
	if (is_access_track_spte(spte))
817 818 819
		return spte;

	/*
820 821 822
	 * Making an Access Tracking PTE will result in removal of write access
	 * from the PTE. So, verify that we will be able to restore the write
	 * access in the fast page fault path later on.
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
	 */
	WARN_ONCE((spte & PT_WRITABLE_MASK) &&
		  !spte_can_locklessly_be_made_writable(spte),
		  "kvm: Writable SPTE is not locklessly dirty-trackable\n");

	WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask <<
			  shadow_acc_track_saved_bits_shift),
		  "kvm: Access Tracking saved bit locations are not zero\n");

	spte |= (spte & shadow_acc_track_saved_bits_mask) <<
		shadow_acc_track_saved_bits_shift;
	spte &= ~shadow_acc_track_mask;

	return spte;
}

839 840 841 842 843 844 845
/* Restore an acc-track PTE back to a regular PTE */
static u64 restore_acc_track_spte(u64 spte)
{
	u64 new_spte = spte;
	u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift)
			 & shadow_acc_track_saved_bits_mask;

846
	WARN_ON_ONCE(spte_ad_enabled(spte));
847 848 849 850 851 852 853 854 855 856
	WARN_ON_ONCE(!is_access_track_spte(spte));

	new_spte &= ~shadow_acc_track_mask;
	new_spte &= ~(shadow_acc_track_saved_bits_mask <<
		      shadow_acc_track_saved_bits_shift);
	new_spte |= saved_bits;

	return new_spte;
}

857 858 859 860 861 862 863 864
/* Returns the Accessed status of the PTE and resets it at the same time. */
static bool mmu_spte_age(u64 *sptep)
{
	u64 spte = mmu_spte_get_lockless(sptep);

	if (!is_accessed_spte(spte))
		return false;

865
	if (spte_ad_enabled(spte)) {
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
		clear_bit((ffs(shadow_accessed_mask) - 1),
			  (unsigned long *)sptep);
	} else {
		/*
		 * Capture the dirty status of the page, so that it doesn't get
		 * lost when the SPTE is marked for access tracking.
		 */
		if (is_writable_pte(spte))
			kvm_set_pfn_dirty(spte_to_pfn(spte));

		spte = mark_spte_for_access_track(spte);
		mmu_spte_update_no_track(sptep, spte);
	}

	return true;
}

883 884
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
885 886 887 888 889
	/*
	 * Prevent page table teardown by making any free-er wait during
	 * kvm_flush_remote_tlbs() IPI to all active vcpus.
	 */
	local_irq_disable();
890

891 892 893 894
	/*
	 * Make sure a following spte read is not reordered ahead of the write
	 * to vcpu->mode.
	 */
895
	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
896 897 898 899
}

static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
900 901 902 903 904
	/*
	 * Make sure the write to vcpu->mode is not reordered in front of
	 * reads to sptes.  If it does, kvm_commit_zap_page() can see us
	 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
	 */
905
	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
906
	local_irq_enable();
907 908
}

909
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
910
				  struct kmem_cache *base_cache, int min)
911 912 913 914
{
	void *obj;

	if (cache->nobjs >= min)
915
		return 0;
916
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
917
		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
918
		if (!obj)
919
			return -ENOMEM;
920 921
		cache->objects[cache->nobjs++] = obj;
	}
922
	return 0;
923 924
}

925 926 927 928 929
static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
{
	return cache->nobjs;
}

930 931
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
				  struct kmem_cache *cache)
932 933
{
	while (mc->nobjs)
934
		kmem_cache_free(cache, mc->objects[--mc->nobjs]);
935 936
}

A
Avi Kivity 已提交
937
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
938
				       int min)
A
Avi Kivity 已提交
939
{
940
	void *page;
A
Avi Kivity 已提交
941 942 943 944

	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
945
		page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
A
Avi Kivity 已提交
946 947
		if (!page)
			return -ENOMEM;
948
		cache->objects[cache->nobjs++] = page;
A
Avi Kivity 已提交
949 950 951 952 953 954 955
	}
	return 0;
}

static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
956
		free_page((unsigned long)mc->objects[--mc->nobjs]);
A
Avi Kivity 已提交
957 958
}

959
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
960
{
961 962
	int r;

963
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
964
				   pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
965 966
	if (r)
		goto out;
967
	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
968 969
	if (r)
		goto out;
970
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
971
				   mmu_page_header_cache, 4);
972 973
out:
	return r;
974 975 976 977
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
978 979
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
				pte_list_desc_cache);
980
	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
981 982
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
				mmu_page_header_cache);
983 984
}

985
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
986 987 988 989 990 991 992 993
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	return p;
}

994
static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
995
{
996
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
997 998
}

999
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
1000
{
1001
	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
1002 1003
}

1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
	if (!sp->role.direct)
		return sp->gfns[index];

	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}

static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
	if (sp->role.direct)
		BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
	else
		sp->gfns[index] = gfn;
}

M
Marcelo Tosatti 已提交
1020
/*
1021 1022
 * Return the pointer to the large page information for a given gfn,
 * handling slots that are not large page aligned.
M
Marcelo Tosatti 已提交
1023
 */
1024 1025 1026
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
					      struct kvm_memory_slot *slot,
					      int level)
M
Marcelo Tosatti 已提交
1027 1028 1029
{
	unsigned long idx;

1030
	idx = gfn_to_index(gfn, slot->base_gfn, level);
1031
	return &slot->arch.lpage_info[level - 2][idx];
M
Marcelo Tosatti 已提交
1032 1033
}

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
					    gfn_t gfn, int count)
{
	struct kvm_lpage_info *linfo;
	int i;

	for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
		linfo = lpage_info_slot(gfn, slot, i);
		linfo->disallow_lpage += count;
		WARN_ON(linfo->disallow_lpage < 0);
	}
}

void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
	update_gfn_disallow_lpage_count(slot, gfn, 1);
}

void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
	update_gfn_disallow_lpage_count(slot, gfn, -1);
}

1057
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
1058
{
1059
	struct kvm_memslots *slots;
1060
	struct kvm_memory_slot *slot;
1061
	gfn_t gfn;
M
Marcelo Tosatti 已提交
1062

1063
	kvm->arch.indirect_shadow_pages++;
1064
	gfn = sp->gfn;
1065 1066
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
1067 1068 1069 1070 1071 1072

	/* the non-leaf shadow pages are keeping readonly. */
	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return kvm_slot_page_track_add_page(kvm, slot, gfn,
						    KVM_PAGE_TRACK_WRITE);

1073
	kvm_mmu_gfn_disallow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
1074 1075
}

1076
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
1077
{
1078
	struct kvm_memslots *slots;
1079
	struct kvm_memory_slot *slot;
1080
	gfn_t gfn;
M
Marcelo Tosatti 已提交
1081

1082
	kvm->arch.indirect_shadow_pages--;
1083
	gfn = sp->gfn;
1084 1085
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
1086 1087 1088 1089
	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
						       KVM_PAGE_TRACK_WRITE);

1090
	kvm_mmu_gfn_allow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
1091 1092
}

1093 1094
static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
					  struct kvm_memory_slot *slot)
M
Marcelo Tosatti 已提交
1095
{
1096
	struct kvm_lpage_info *linfo;
M
Marcelo Tosatti 已提交
1097 1098

	if (slot) {
1099
		linfo = lpage_info_slot(gfn, slot, level);
1100
		return !!linfo->disallow_lpage;
M
Marcelo Tosatti 已提交
1101 1102
	}

1103
	return true;
M
Marcelo Tosatti 已提交
1104 1105
}

1106 1107
static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
					int level)
1108 1109 1110 1111
{
	struct kvm_memory_slot *slot;

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1112
	return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
1113 1114
}

1115
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
M
Marcelo Tosatti 已提交
1116
{
J
Joerg Roedel 已提交
1117
	unsigned long page_size;
1118
	int i, ret = 0;
M
Marcelo Tosatti 已提交
1119

J
Joerg Roedel 已提交
1120
	page_size = kvm_host_page_size(kvm, gfn);
M
Marcelo Tosatti 已提交
1121

1122
	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
1123 1124 1125 1126 1127 1128
		if (page_size >= KVM_HPAGE_SIZE(i))
			ret = i;
		else
			break;
	}

1129
	return ret;
M
Marcelo Tosatti 已提交
1130 1131
}

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
					  bool no_dirty_log)
{
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
		return false;
	if (no_dirty_log && slot->dirty_bitmap)
		return false;

	return true;
}

1143 1144 1145
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
			    bool no_dirty_log)
M
Marcelo Tosatti 已提交
1146 1147
{
	struct kvm_memory_slot *slot;
1148

1149
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1150
	if (!memslot_valid_for_gpte(slot, no_dirty_log))
1151 1152 1153 1154 1155
		slot = NULL;

	return slot;
}

1156 1157
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
			 bool *force_pt_level)
1158 1159
{
	int host_level, level, max_level;
1160 1161
	struct kvm_memory_slot *slot;

1162 1163
	if (unlikely(*force_pt_level))
		return PT_PAGE_TABLE_LEVEL;
M
Marcelo Tosatti 已提交
1164

1165 1166
	slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
	*force_pt_level = !memslot_valid_for_gpte(slot, true);
1167 1168 1169
	if (unlikely(*force_pt_level))
		return PT_PAGE_TABLE_LEVEL;

1170 1171 1172 1173 1174
	host_level = host_mapping_level(vcpu->kvm, large_gfn);

	if (host_level == PT_PAGE_TABLE_LEVEL)
		return host_level;

X
Xiao Guangrong 已提交
1175
	max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
1176 1177

	for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
1178
		if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
1179 1180 1181
			break;

	return level - 1;
M
Marcelo Tosatti 已提交
1182 1183
}

1184
/*
1185
 * About rmap_head encoding:
1186
 *
1187 1188
 * If the bit zero of rmap_head->val is clear, then it points to the only spte
 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
1189
 * pte_list_desc containing more mappings.
1190 1191 1192 1193
 */

/*
 * Returns the number of pointers in the rmap chain, not counting the new one.
1194
 */
1195
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
1196
			struct kvm_rmap_head *rmap_head)
1197
{
1198
	struct pte_list_desc *desc;
1199
	int i, count = 0;
1200

1201
	if (!rmap_head->val) {
1202
		rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
1203 1204
		rmap_head->val = (unsigned long)spte;
	} else if (!(rmap_head->val & 1)) {
1205 1206
		rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
		desc = mmu_alloc_pte_list_desc(vcpu);
1207
		desc->sptes[0] = (u64 *)rmap_head->val;
A
Avi Kivity 已提交
1208
		desc->sptes[1] = spte;
1209
		rmap_head->val = (unsigned long)desc | 1;
1210
		++count;
1211
	} else {
1212
		rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
1213
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1214
		while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
1215
			desc = desc->more;
1216
			count += PTE_LIST_EXT;
1217
		}
1218 1219
		if (desc->sptes[PTE_LIST_EXT-1]) {
			desc->more = mmu_alloc_pte_list_desc(vcpu);
1220 1221
			desc = desc->more;
		}
A
Avi Kivity 已提交
1222
		for (i = 0; desc->sptes[i]; ++i)
1223
			++count;
A
Avi Kivity 已提交
1224
		desc->sptes[i] = spte;
1225
	}
1226
	return count;
1227 1228
}

1229
static void
1230 1231 1232
pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
			   struct pte_list_desc *desc, int i,
			   struct pte_list_desc *prev_desc)
1233 1234 1235
{
	int j;

1236
	for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
1237
		;
A
Avi Kivity 已提交
1238 1239
	desc->sptes[i] = desc->sptes[j];
	desc->sptes[j] = NULL;
1240 1241 1242
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
1243
		rmap_head->val = (unsigned long)desc->sptes[0];
1244 1245 1246 1247
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
1248
			rmap_head->val = (unsigned long)desc->more | 1;
1249
	mmu_free_pte_list_desc(desc);
1250 1251
}

1252
static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
1253
{
1254 1255
	struct pte_list_desc *desc;
	struct pte_list_desc *prev_desc;
1256 1257
	int i;

1258
	if (!rmap_head->val) {
1259
		printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
1260
		BUG();
1261
	} else if (!(rmap_head->val & 1)) {
1262
		rmap_printk("pte_list_remove:  %p 1->0\n", spte);
1263
		if ((u64 *)rmap_head->val != spte) {
1264
			printk(KERN_ERR "pte_list_remove:  %p 1->BUG\n", spte);
1265 1266
			BUG();
		}
1267
		rmap_head->val = 0;
1268
	} else {
1269
		rmap_printk("pte_list_remove:  %p many->many\n", spte);
1270
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1271 1272
		prev_desc = NULL;
		while (desc) {
1273
			for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
A
Avi Kivity 已提交
1274
				if (desc->sptes[i] == spte) {
1275 1276
					pte_list_desc_remove_entry(rmap_head,
							desc, i, prev_desc);
1277 1278
					return;
				}
1279
			}
1280 1281 1282
			prev_desc = desc;
			desc = desc->more;
		}
1283
		pr_err("pte_list_remove: %p many->many\n", spte);
1284 1285 1286 1287
		BUG();
	}
}

1288 1289
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
					   struct kvm_memory_slot *slot)
1290
{
1291
	unsigned long idx;
1292

1293
	idx = gfn_to_index(gfn, slot->base_gfn, level);
1294
	return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
1295 1296
}

1297 1298
static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
					 struct kvm_mmu_page *sp)
1299
{
1300
	struct kvm_memslots *slots;
1301 1302
	struct kvm_memory_slot *slot;

1303 1304
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
1305
	return __gfn_to_rmap(gfn, sp->role.level, slot);
1306 1307
}

1308 1309 1310 1311 1312 1313 1314 1315
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_memory_cache *cache;

	cache = &vcpu->arch.mmu_pte_list_desc_cache;
	return mmu_memory_cache_free_objects(cache);
}

1316 1317 1318
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
	struct kvm_mmu_page *sp;
1319
	struct kvm_rmap_head *rmap_head;
1320 1321 1322

	sp = page_header(__pa(spte));
	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1323 1324
	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
	return pte_list_add(vcpu, spte, rmap_head);
1325 1326 1327 1328 1329 1330
}

static void rmap_remove(struct kvm *kvm, u64 *spte)
{
	struct kvm_mmu_page *sp;
	gfn_t gfn;
1331
	struct kvm_rmap_head *rmap_head;
1332 1333 1334

	sp = page_header(__pa(spte));
	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1335 1336
	rmap_head = gfn_to_rmap(kvm, gfn, sp);
	pte_list_remove(spte, rmap_head);
1337 1338
}

1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
/*
 * Used by the following functions to iterate through the sptes linked by a
 * rmap.  All fields are private and not assumed to be used outside.
 */
struct rmap_iterator {
	/* private fields */
	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
	int pos;			/* index of the sptep */
};

/*
 * Iteration must be started by this function.  This should also be used after
 * removing/dropping sptes from the rmap link because in such cases the
 * information in the itererator may not be valid.
 *
 * Returns sptep if found, NULL otherwise.
 */
1356 1357
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
			   struct rmap_iterator *iter)
1358
{
1359 1360
	u64 *sptep;

1361
	if (!rmap_head->val)
1362 1363
		return NULL;

1364
	if (!(rmap_head->val & 1)) {
1365
		iter->desc = NULL;
1366 1367
		sptep = (u64 *)rmap_head->val;
		goto out;
1368 1369
	}

1370
	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1371
	iter->pos = 0;
1372 1373 1374 1375
	sptep = iter->desc->sptes[iter->pos];
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1376 1377 1378 1379 1380 1381 1382 1383 1384
}

/*
 * Must be used with a valid iterator: e.g. after rmap_get_first().
 *
 * Returns sptep if found, NULL otherwise.
 */
static u64 *rmap_get_next(struct rmap_iterator *iter)
{
1385 1386
	u64 *sptep;

1387 1388 1389 1390 1391
	if (iter->desc) {
		if (iter->pos < PTE_LIST_EXT - 1) {
			++iter->pos;
			sptep = iter->desc->sptes[iter->pos];
			if (sptep)
1392
				goto out;
1393 1394 1395 1396 1397 1398 1399
		}

		iter->desc = iter->desc->more;

		if (iter->desc) {
			iter->pos = 0;
			/* desc->sptes[0] cannot be NULL */
1400 1401
			sptep = iter->desc->sptes[iter->pos];
			goto out;
1402 1403 1404 1405
		}
	}

	return NULL;
1406 1407 1408
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1409 1410
}

1411 1412
#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1413
	     _spte_; _spte_ = rmap_get_next(_iter_))
1414

1415
static void drop_spte(struct kvm *kvm, u64 *sptep)
1416
{
1417
	if (mmu_spte_clear_track_bits(sptep))
1418
		rmap_remove(kvm, sptep);
A
Avi Kivity 已提交
1419 1420
}

1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441

static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
{
	if (is_large_pte(*sptep)) {
		WARN_ON(page_header(__pa(sptep))->role.level ==
			PT_PAGE_TABLE_LEVEL);
		drop_spte(kvm, sptep);
		--kvm->stat.lpages;
		return true;
	}

	return false;
}

static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
	if (__drop_large_spte(vcpu->kvm, sptep))
		kvm_flush_remote_tlbs(vcpu->kvm);
}

/*
1442
 * Write-protect on the specified @sptep, @pt_protect indicates whether
1443
 * spte write-protection is caused by protecting shadow page table.
1444
 *
T
Tiejun Chen 已提交
1445
 * Note: write protection is difference between dirty logging and spte
1446 1447 1448 1449 1450
 * protection:
 * - for dirty logging, the spte can be set to writable at anytime if
 *   its dirty bitmap is properly set.
 * - for spte protection, the spte can be writable only after unsync-ing
 *   shadow page.
1451
 *
1452
 * Return true if tlb need be flushed.
1453
 */
1454
static bool spte_write_protect(u64 *sptep, bool pt_protect)
1455 1456 1457
{
	u64 spte = *sptep;

1458
	if (!is_writable_pte(spte) &&
1459
	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1460 1461 1462 1463
		return false;

	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);

1464 1465
	if (pt_protect)
		spte &= ~SPTE_MMU_WRITEABLE;
1466
	spte = spte & ~PT_WRITABLE_MASK;
1467

1468
	return mmu_spte_update(sptep, spte);
1469 1470
}

1471 1472
static bool __rmap_write_protect(struct kvm *kvm,
				 struct kvm_rmap_head *rmap_head,
1473
				 bool pt_protect)
1474
{
1475 1476
	u64 *sptep;
	struct rmap_iterator iter;
1477
	bool flush = false;
1478

1479
	for_each_rmap_spte(rmap_head, &iter, sptep)
1480
		flush |= spte_write_protect(sptep, pt_protect);
1481

1482
	return flush;
1483 1484
}

1485
static bool spte_clear_dirty(u64 *sptep)
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
{
	u64 spte = *sptep;

	rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);

	spte &= ~shadow_dirty_mask;

	return mmu_spte_update(sptep, spte);
}

1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
static bool wrprot_ad_disabled_spte(u64 *sptep)
{
	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
					       (unsigned long *)sptep);
	if (was_writable)
		kvm_set_pfn_dirty(spte_to_pfn(*sptep));

	return was_writable;
}

/*
 * Gets the GFN ready for another round of dirty logging by clearing the
 *	- D bit on ad-enabled SPTEs, and
 *	- W bit on ad-disabled SPTEs.
 * Returns true iff any D or W bits were cleared.
 */
1512
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1513 1514 1515 1516 1517
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1518
	for_each_rmap_spte(rmap_head, &iter, sptep)
1519 1520 1521 1522
		if (spte_ad_enabled(*sptep))
			flush |= spte_clear_dirty(sptep);
		else
			flush |= wrprot_ad_disabled_spte(sptep);
1523 1524 1525 1526

	return flush;
}

1527
static bool spte_set_dirty(u64 *sptep)
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
{
	u64 spte = *sptep;

	rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);

	spte |= shadow_dirty_mask;

	return mmu_spte_update(sptep, spte);
}

1538
static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1539 1540 1541 1542 1543
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1544
	for_each_rmap_spte(rmap_head, &iter, sptep)
1545 1546
		if (spte_ad_enabled(*sptep))
			flush |= spte_set_dirty(sptep);
1547 1548 1549 1550

	return flush;
}

1551
/**
1552
 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1553 1554 1555 1556 1557 1558 1559 1560
 * @kvm: kvm instance
 * @slot: slot to protect
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should protect
 *
 * Used when we do not need to care about huge page mappings: e.g. during dirty
 * logging we do not have any such mappings.
 */
1561
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1562 1563
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
1564
{
1565
	struct kvm_rmap_head *rmap_head;
1566

1567
	while (mask) {
1568 1569 1570
		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					  PT_PAGE_TABLE_LEVEL, slot);
		__rmap_write_protect(kvm, rmap_head, false);
M
Marcelo Tosatti 已提交
1571

1572 1573 1574
		/* clear the first set bit */
		mask &= mask - 1;
	}
1575 1576
}

1577
/**
1578 1579
 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
 * protect the page if the D-bit isn't supported.
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
 * @kvm: kvm instance
 * @slot: slot to clear D-bit
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should clear D-bit
 *
 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
 */
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
{
1591
	struct kvm_rmap_head *rmap_head;
1592 1593

	while (mask) {
1594 1595 1596
		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					  PT_PAGE_TABLE_LEVEL, slot);
		__rmap_clear_dirty(kvm, rmap_head);
1597 1598 1599 1600 1601 1602 1603

		/* clear the first set bit */
		mask &= mask - 1;
	}
}
EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);

1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
/**
 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
 * PT level pages.
 *
 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
 * enable dirty logging for them.
 *
 * Used when we do not need to care about huge page mappings: e.g. during dirty
 * logging we do not have any such mappings.
 */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
				struct kvm_memory_slot *slot,
				gfn_t gfn_offset, unsigned long mask)
{
1618 1619 1620 1621 1622
	if (kvm_x86_ops->enable_log_dirty_pt_masked)
		kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
				mask);
	else
		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1623 1624
}

1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
/**
 * kvm_arch_write_log_dirty - emulate dirty page logging
 * @vcpu: Guest mode vcpu
 *
 * Emulate arch specific page modification logging for the
 * nested hypervisor
 */
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->write_log_dirty)
		return kvm_x86_ops->write_log_dirty(vcpu);

	return 0;
}

1640 1641
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
				    struct kvm_memory_slot *slot, u64 gfn)
1642
{
1643
	struct kvm_rmap_head *rmap_head;
1644
	int i;
1645
	bool write_protected = false;
1646

1647
	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
1648
		rmap_head = __gfn_to_rmap(gfn, i, slot);
1649
		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1650 1651 1652
	}

	return write_protected;
1653 1654
}

1655 1656 1657 1658 1659 1660 1661 1662
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{
	struct kvm_memory_slot *slot;

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
}

1663
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1664
{
1665 1666
	u64 *sptep;
	struct rmap_iterator iter;
1667
	bool flush = false;
1668

1669
	while ((sptep = rmap_get_first(rmap_head, &iter))) {
1670
		rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
1671 1672

		drop_spte(kvm, sptep);
1673
		flush = true;
1674
	}
1675

1676 1677 1678
	return flush;
}

1679
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1680 1681 1682
			   struct kvm_memory_slot *slot, gfn_t gfn, int level,
			   unsigned long data)
{
1683
	return kvm_zap_rmapp(kvm, rmap_head);
1684 1685
}

1686
static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1687 1688
			     struct kvm_memory_slot *slot, gfn_t gfn, int level,
			     unsigned long data)
1689
{
1690 1691
	u64 *sptep;
	struct rmap_iterator iter;
1692
	int need_flush = 0;
1693
	u64 new_spte;
1694
	pte_t *ptep = (pte_t *)data;
D
Dan Williams 已提交
1695
	kvm_pfn_t new_pfn;
1696 1697 1698

	WARN_ON(pte_huge(*ptep));
	new_pfn = pte_pfn(*ptep);
1699

1700
restart:
1701
	for_each_rmap_spte(rmap_head, &iter, sptep) {
1702
		rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1703
			    sptep, *sptep, gfn, level);
1704

1705
		need_flush = 1;
1706

1707
		if (pte_write(*ptep)) {
1708
			drop_spte(kvm, sptep);
1709
			goto restart;
1710
		} else {
1711
			new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
1712 1713 1714 1715
			new_spte |= (u64)new_pfn << PAGE_SHIFT;

			new_spte &= ~PT_WRITABLE_MASK;
			new_spte &= ~SPTE_HOST_WRITEABLE;
1716 1717

			new_spte = mark_spte_for_access_track(new_spte);
1718 1719 1720

			mmu_spte_clear_track_bits(sptep);
			mmu_spte_set(sptep, new_spte);
1721 1722
		}
	}
1723

1724 1725 1726 1727 1728 1729
	if (need_flush)
		kvm_flush_remote_tlbs(kvm);

	return 0;
}

1730 1731 1732 1733 1734 1735 1736 1737 1738 1739
struct slot_rmap_walk_iterator {
	/* input fields. */
	struct kvm_memory_slot *slot;
	gfn_t start_gfn;
	gfn_t end_gfn;
	int start_level;
	int end_level;

	/* output fields. */
	gfn_t gfn;
1740
	struct kvm_rmap_head *rmap;
1741 1742 1743
	int level;

	/* private field. */
1744
	struct kvm_rmap_head *end_rmap;
1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
};

static void
rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
{
	iterator->level = level;
	iterator->gfn = iterator->start_gfn;
	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
					   iterator->slot);
}

static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
		    struct kvm_memory_slot *slot, int start_level,
		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
{
	iterator->slot = slot;
	iterator->start_level = start_level;
	iterator->end_level = end_level;
	iterator->start_gfn = start_gfn;
	iterator->end_gfn = end_gfn;

	rmap_walk_init_level(iterator, iterator->start_level);
}

static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
{
	return !!iterator->rmap;
}

static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{
	if (++iterator->rmap <= iterator->end_rmap) {
		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
		return;
	}

	if (++iterator->level > iterator->end_level) {
		iterator->rmap = NULL;
		return;
	}

	rmap_walk_init_level(iterator, iterator->level);
}

#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
	   _start_gfn, _end_gfn, _iter_)				\
	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
				 _end_level_, _start_gfn, _end_gfn);	\
	     slot_rmap_walk_okay(_iter_);				\
	     slot_rmap_walk_next(_iter_))

1798 1799 1800 1801 1802
static int kvm_handle_hva_range(struct kvm *kvm,
				unsigned long start,
				unsigned long end,
				unsigned long data,
				int (*handler)(struct kvm *kvm,
1803
					       struct kvm_rmap_head *rmap_head,
1804
					       struct kvm_memory_slot *slot,
1805 1806
					       gfn_t gfn,
					       int level,
1807
					       unsigned long data))
1808
{
1809
	struct kvm_memslots *slots;
1810
	struct kvm_memory_slot *memslot;
1811 1812
	struct slot_rmap_walk_iterator iterator;
	int ret = 0;
1813
	int i;
1814

1815 1816 1817 1818 1819
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
		kvm_for_each_memslot(memslot, slots) {
			unsigned long hva_start, hva_end;
			gfn_t gfn_start, gfn_end;
1820

1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
			hva_start = max(start, memslot->userspace_addr);
			hva_end = min(end, memslot->userspace_addr +
				      (memslot->npages << PAGE_SHIFT));
			if (hva_start >= hva_end)
				continue;
			/*
			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
			 */
			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);

			for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
						 PT_MAX_HUGEPAGE_LEVEL,
						 gfn_start, gfn_end - 1,
						 &iterator)
				ret |= handler(kvm, iterator.rmap, memslot,
					       iterator.gfn, iterator.level, data);
		}
1840 1841
	}

1842
	return ret;
1843 1844
}

1845 1846
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
			  unsigned long data,
1847 1848
			  int (*handler)(struct kvm *kvm,
					 struct kvm_rmap_head *rmap_head,
1849
					 struct kvm_memory_slot *slot,
1850
					 gfn_t gfn, int level,
1851 1852 1853
					 unsigned long data))
{
	return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1854 1855
}

1856 1857 1858 1859 1860
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
	return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}

1861 1862
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
F
Frederik Deweerdt 已提交
1863
	kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1864 1865
}

1866
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1867 1868
			 struct kvm_memory_slot *slot, gfn_t gfn, int level,
			 unsigned long data)
1869
{
1870
	u64 *sptep;
1871
	struct rmap_iterator uninitialized_var(iter);
1872 1873
	int young = 0;

1874 1875
	for_each_rmap_spte(rmap_head, &iter, sptep)
		young |= mmu_spte_age(sptep);
1876

1877
	trace_kvm_age_page(gfn, level, slot, young);
1878 1879 1880
	return young;
}

1881
static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1882 1883
			      struct kvm_memory_slot *slot, gfn_t gfn,
			      int level, unsigned long data)
A
Andrea Arcangeli 已提交
1884
{
1885 1886
	u64 *sptep;
	struct rmap_iterator iter;
A
Andrea Arcangeli 已提交
1887

1888 1889 1890 1891
	for_each_rmap_spte(rmap_head, &iter, sptep)
		if (is_accessed_spte(*sptep))
			return 1;
	return 0;
A
Andrea Arcangeli 已提交
1892 1893
}

1894 1895
#define RMAP_RECYCLE_THRESHOLD 1000

1896
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1897
{
1898
	struct kvm_rmap_head *rmap_head;
1899 1900 1901
	struct kvm_mmu_page *sp;

	sp = page_header(__pa(spte));
1902

1903
	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1904

1905
	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
1906 1907 1908
	kvm_flush_remote_tlbs(vcpu->kvm);
}

A
Andres Lagar-Cavilla 已提交
1909
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1910
{
A
Andres Lagar-Cavilla 已提交
1911
	return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1912 1913
}

A
Andrea Arcangeli 已提交
1914 1915 1916 1917 1918
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
	return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
}

1919
#ifdef MMU_DEBUG
1920
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
1921
{
1922 1923 1924
	u64 *pos;
	u64 *end;

1925
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1926
		if (is_shadow_present_pte(*pos)) {
1927
			printk(KERN_ERR "%s: %p %llx\n", __func__,
1928
			       pos, *pos);
A
Avi Kivity 已提交
1929
			return 0;
1930
		}
A
Avi Kivity 已提交
1931 1932
	return 1;
}
1933
#endif
A
Avi Kivity 已提交
1934

1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
/*
 * This value is the sum of all of the kvm instances's
 * kvm->arch.n_used_mmu_pages values.  We need a global,
 * aggregate version in order to make the slab shrinker
 * faster
 */
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
{
	kvm->arch.n_used_mmu_pages += nr;
	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}

1947
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1948
{
1949
	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1950
	hlist_del(&sp->hash_link);
1951 1952
	list_del(&sp->link);
	free_page((unsigned long)sp->spt);
1953 1954
	if (!sp->role.direct)
		free_page((unsigned long)sp->gfns);
1955
	kmem_cache_free(mmu_page_header_cache, sp);
1956 1957
}

1958 1959
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
1960
	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1961 1962
}

1963
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1964
				    struct kvm_mmu_page *sp, u64 *parent_pte)
1965 1966 1967 1968
{
	if (!parent_pte)
		return;

1969
	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1970 1971
}

1972
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1973 1974
				       u64 *parent_pte)
{
1975
	pte_list_remove(parent_pte, &sp->parent_ptes);
1976 1977
}

1978 1979 1980 1981
static void drop_parent_pte(struct kvm_mmu_page *sp,
			    u64 *parent_pte)
{
	mmu_page_remove_parent_pte(sp, parent_pte);
1982
	mmu_spte_clear_no_track(parent_pte);
1983 1984
}

1985
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
M
Marcelo Tosatti 已提交
1986
{
1987
	struct kvm_mmu_page *sp;
1988

1989 1990
	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1991
	if (!direct)
1992
		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1993
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1994 1995 1996 1997 1998 1999

	/*
	 * The active_mmu_pages list is the FIFO list, do not move the
	 * page until it is zapped. kvm_zap_obsolete_pages depends on
	 * this feature. See the comments in kvm_zap_obsolete_pages().
	 */
2000 2001 2002
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
	return sp;
M
Marcelo Tosatti 已提交
2003 2004
}

2005
static void mark_unsync(u64 *spte);
2006
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
2007
{
2008 2009 2010 2011 2012 2013
	u64 *sptep;
	struct rmap_iterator iter;

	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
		mark_unsync(sptep);
	}
2014 2015
}

2016
static void mark_unsync(u64 *spte)
2017
{
2018
	struct kvm_mmu_page *sp;
2019
	unsigned int index;
2020

2021
	sp = page_header(__pa(spte));
2022 2023
	index = spte - sp->spt;
	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
2024
		return;
2025
	if (sp->unsync_children++)
2026
		return;
2027
	kvm_mmu_mark_parents_unsync(sp);
2028 2029
}

2030
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
2031
			       struct kvm_mmu_page *sp)
2032
{
2033
	return 0;
2034 2035
}

2036
static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
M
Marcelo Tosatti 已提交
2037 2038 2039
{
}

2040 2041
static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
				 struct kvm_mmu_page *sp, u64 *spte,
2042
				 const void *pte)
2043 2044 2045 2046
{
	WARN_ON(1);
}

2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
#define KVM_PAGE_ARRAY_NR 16

struct kvm_mmu_pages {
	struct mmu_page_and_offset {
		struct kvm_mmu_page *sp;
		unsigned int idx;
	} page[KVM_PAGE_ARRAY_NR];
	unsigned int nr;
};

2057 2058
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
			 int idx)
2059
{
2060
	int i;
2061

2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
	if (sp->unsync)
		for (i=0; i < pvec->nr; i++)
			if (pvec->page[i].sp == sp)
				return 0;

	pvec->page[pvec->nr].sp = sp;
	pvec->page[pvec->nr].idx = idx;
	pvec->nr++;
	return (pvec->nr == KVM_PAGE_ARRAY_NR);
}

2073 2074 2075 2076 2077 2078 2079
static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{
	--sp->unsync_children;
	WARN_ON((int)sp->unsync_children < 0);
	__clear_bit(idx, sp->unsync_child_bitmap);
}

2080 2081 2082 2083
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	int i, ret, nr_unsync_leaf = 0;
2084

2085
	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
2086
		struct kvm_mmu_page *child;
2087 2088
		u64 ent = sp->spt[i];

2089 2090 2091 2092
		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
			clear_unsync_child_bit(sp, i);
			continue;
		}
2093 2094 2095 2096 2097 2098 2099 2100

		child = page_header(ent & PT64_BASE_ADDR_MASK);

		if (child->unsync_children) {
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;

			ret = __mmu_unsync_walk(child, pvec);
2101 2102 2103 2104
			if (!ret) {
				clear_unsync_child_bit(sp, i);
				continue;
			} else if (ret > 0) {
2105
				nr_unsync_leaf += ret;
2106
			} else
2107 2108 2109 2110 2111 2112
				return ret;
		} else if (child->unsync) {
			nr_unsync_leaf++;
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;
		} else
2113
			clear_unsync_child_bit(sp, i);
2114 2115
	}

2116 2117 2118
	return nr_unsync_leaf;
}

2119 2120
#define INVALID_INDEX (-1)

2121 2122 2123
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
P
Paolo Bonzini 已提交
2124
	pvec->nr = 0;
2125 2126 2127
	if (!sp->unsync_children)
		return 0;

2128
	mmu_pages_add(pvec, sp, INVALID_INDEX);
2129
	return __mmu_unsync_walk(sp, pvec);
2130 2131 2132 2133 2134
}

static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	WARN_ON(!sp->unsync);
2135
	trace_kvm_mmu_sync_page(sp);
2136 2137 2138 2139
	sp->unsync = 0;
	--kvm->stat.mmu_unsync;
}

2140 2141 2142 2143
static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				    struct list_head *invalid_list);
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list);
2144

2145 2146 2147 2148 2149 2150
/*
 * NOTE: we should pay more attention on the zapped-obsolete page
 * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
 * since it has been deleted from active_mmu_pages but still can be found
 * at hast list.
 *
2151
 * for_each_valid_sp() has skipped that kind of pages.
2152
 */
2153
#define for_each_valid_sp(_kvm, _sp, _gfn)				\
2154 2155
	hlist_for_each_entry(_sp,					\
	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
2156 2157
		if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) {    \
		} else
2158 2159

#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
2160 2161
	for_each_valid_sp(_kvm, _sp, _gfn)				\
		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
2162

2163
/* @sp->gfn should be write-protected at the call site */
2164 2165
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			    struct list_head *invalid_list)
2166
{
2167 2168
	if (sp->role.cr4_pae != !!is_pae(vcpu)
	    || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
2169
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2170
		return false;
2171 2172
	}

2173
	return true;
2174 2175
}

2176 2177 2178
static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
				 struct list_head *invalid_list,
				 bool remote_flush, bool local_flush)
2179
{
2180 2181 2182 2183
	if (!list_empty(invalid_list)) {
		kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
		return;
	}
2184

2185 2186 2187 2188
	if (remote_flush)
		kvm_flush_remote_tlbs(vcpu->kvm);
	else if (local_flush)
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2189 2190
}

2191 2192 2193 2194 2195 2196 2197
#ifdef CONFIG_KVM_MMU_AUDIT
#include "mmu_audit.c"
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
#endif

2198 2199 2200 2201 2202
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
}

2203
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2204
			 struct list_head *invalid_list)
2205
{
2206 2207
	kvm_unlink_unsync_page(vcpu->kvm, sp);
	return __kvm_sync_page(vcpu, sp, invalid_list);
2208 2209
}

2210
/* @gfn should be write-protected at the call site */
2211 2212
static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
			   struct list_head *invalid_list)
2213 2214
{
	struct kvm_mmu_page *s;
2215
	bool ret = false;
2216

2217
	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2218
		if (!s->unsync)
2219 2220 2221
			continue;

		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
2222
		ret |= kvm_sync_page(vcpu, s, invalid_list);
2223 2224
	}

2225
	return ret;
2226 2227
}

2228
struct mmu_page_path {
2229 2230
	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
	unsigned int idx[PT64_ROOT_MAX_LEVEL];
2231 2232
};

2233
#define for_each_sp(pvec, sp, parents, i)			\
P
Paolo Bonzini 已提交
2234
		for (i = mmu_pages_first(&pvec, &parents);	\
2235 2236 2237
			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
			i = mmu_pages_next(&pvec, &parents, i))

2238 2239 2240
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
			  struct mmu_page_path *parents,
			  int i)
2241 2242 2243 2244 2245
{
	int n;

	for (n = i+1; n < pvec->nr; n++) {
		struct kvm_mmu_page *sp = pvec->page[n].sp;
P
Paolo Bonzini 已提交
2246 2247
		unsigned idx = pvec->page[n].idx;
		int level = sp->role.level;
2248

P
Paolo Bonzini 已提交
2249 2250 2251
		parents->idx[level-1] = idx;
		if (level == PT_PAGE_TABLE_LEVEL)
			break;
2252

P
Paolo Bonzini 已提交
2253
		parents->parent[level-2] = sp;
2254 2255 2256 2257 2258
	}

	return n;
}

P
Paolo Bonzini 已提交
2259 2260 2261 2262 2263 2264 2265 2266 2267
static int mmu_pages_first(struct kvm_mmu_pages *pvec,
			   struct mmu_page_path *parents)
{
	struct kvm_mmu_page *sp;
	int level;

	if (pvec->nr == 0)
		return 0;

2268 2269
	WARN_ON(pvec->page[0].idx != INVALID_INDEX);

P
Paolo Bonzini 已提交
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
	sp = pvec->page[0].sp;
	level = sp->role.level;
	WARN_ON(level == PT_PAGE_TABLE_LEVEL);

	parents->parent[level-2] = sp;

	/* Also set up a sentinel.  Further entries in pvec are all
	 * children of sp, so this element is never overwritten.
	 */
	parents->parent[level-1] = NULL;
	return mmu_pages_next(pvec, parents, 0);
}

2283
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2284
{
2285 2286 2287 2288 2289 2290 2291 2292 2293
	struct kvm_mmu_page *sp;
	unsigned int level = 0;

	do {
		unsigned int idx = parents->idx[level];
		sp = parents->parent[level];
		if (!sp)
			return;

2294
		WARN_ON(idx == INVALID_INDEX);
2295
		clear_unsync_child_bit(sp, idx);
2296
		level++;
P
Paolo Bonzini 已提交
2297
	} while (!sp->unsync_children);
2298
}
2299

2300 2301 2302 2303 2304 2305 2306
static void mmu_sync_children(struct kvm_vcpu *vcpu,
			      struct kvm_mmu_page *parent)
{
	int i;
	struct kvm_mmu_page *sp;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2307
	LIST_HEAD(invalid_list);
2308
	bool flush = false;
2309 2310

	while (mmu_unsync_walk(parent, &pages)) {
2311
		bool protected = false;
2312 2313

		for_each_sp(pages, sp, parents, i)
2314
			protected |= rmap_write_protect(vcpu, sp->gfn);
2315

2316
		if (protected) {
2317
			kvm_flush_remote_tlbs(vcpu->kvm);
2318 2319
			flush = false;
		}
2320

2321
		for_each_sp(pages, sp, parents, i) {
2322
			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2323 2324
			mmu_pages_clear_parents(&parents);
		}
2325 2326 2327 2328 2329
		if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
			cond_resched_lock(&vcpu->kvm->mmu_lock);
			flush = false;
		}
2330
	}
2331 2332

	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2333 2334
}

2335 2336
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
2337
	atomic_set(&sp->write_flooding_count,  0);
2338 2339 2340 2341 2342 2343 2344 2345 2346
}

static void clear_sp_write_flooding_count(u64 *spte)
{
	struct kvm_mmu_page *sp =  page_header(__pa(spte));

	__clear_sp_write_flooding_count(sp);
}

2347 2348 2349 2350
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
2351
					     int direct,
2352
					     unsigned access)
2353 2354 2355
{
	union kvm_mmu_page_role role;
	unsigned quadrant;
2356 2357
	struct kvm_mmu_page *sp;
	bool need_sync = false;
2358
	bool flush = false;
2359
	int collisions = 0;
2360
	LIST_HEAD(invalid_list);
2361

2362
	role = vcpu->arch.mmu.base_role;
2363
	role.level = level;
2364
	role.direct = direct;
2365
	if (role.direct)
2366
		role.cr4_pae = 0;
2367
	role.access = access;
2368 2369
	if (!vcpu->arch.mmu.direct_map
	    && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
2370 2371 2372 2373
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
2374 2375 2376 2377 2378 2379
	for_each_valid_sp(vcpu->kvm, sp, gfn) {
		if (sp->gfn != gfn) {
			collisions++;
			continue;
		}

2380 2381
		if (!need_sync && sp->unsync)
			need_sync = true;
2382

2383 2384
		if (sp->role.word != role.word)
			continue;
2385

2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
		if (sp->unsync) {
			/* The page is good, but __kvm_sync_page might still end
			 * up zapping it.  If so, break in order to rebuild it.
			 */
			if (!__kvm_sync_page(vcpu, sp, &invalid_list))
				break;

			WARN_ON(!list_empty(&invalid_list));
			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
		}
2396

2397
		if (sp->unsync_children)
2398
			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2399

2400
		__clear_sp_write_flooding_count(sp);
2401
		trace_kvm_mmu_get_page(sp, false);
2402
		goto out;
2403
	}
2404

A
Avi Kivity 已提交
2405
	++vcpu->kvm->stat.mmu_cache_miss;
2406 2407 2408

	sp = kvm_mmu_alloc_page(vcpu, direct);

2409 2410
	sp->gfn = gfn;
	sp->role = role;
2411 2412
	hlist_add_head(&sp->hash_link,
		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
2413
	if (!direct) {
2414 2415 2416 2417 2418 2419 2420 2421
		/*
		 * we should do write protection before syncing pages
		 * otherwise the content of the synced shadow page may
		 * be inconsistent with guest page table.
		 */
		account_shadowed(vcpu->kvm, sp);
		if (level == PT_PAGE_TABLE_LEVEL &&
		      rmap_write_protect(vcpu, gfn))
2422
			kvm_flush_remote_tlbs(vcpu->kvm);
2423 2424

		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
2425
			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2426
	}
2427
	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
2428
	clear_page(sp->spt);
A
Avi Kivity 已提交
2429
	trace_kvm_mmu_get_page(sp, true);
2430 2431

	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2432 2433 2434
out:
	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2435
	return sp;
2436 2437
}

2438 2439 2440
static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
					struct kvm_vcpu *vcpu, hpa_t root,
					u64 addr)
2441 2442
{
	iterator->addr = addr;
2443
	iterator->shadow_addr = root;
2444
	iterator->level = vcpu->arch.mmu.shadow_root_level;
2445

2446 2447
	if (iterator->level == PT64_ROOT_4LEVEL &&
	    vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL &&
2448 2449 2450
	    !vcpu->arch.mmu.direct_map)
		--iterator->level;

2451
	if (iterator->level == PT32E_ROOT_LEVEL) {
2452 2453 2454 2455 2456 2457
		/*
		 * prev_root is currently only used for 64-bit hosts. So only
		 * the active root_hpa is valid here.
		 */
		BUG_ON(root != vcpu->arch.mmu.root_hpa);

2458 2459 2460 2461 2462 2463 2464 2465 2466
		iterator->shadow_addr
			= vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
		--iterator->level;
		if (!iterator->shadow_addr)
			iterator->level = 0;
	}
}

2467 2468 2469 2470 2471 2472 2473
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
			     struct kvm_vcpu *vcpu, u64 addr)
{
	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa,
				    addr);
}

2474 2475 2476 2477
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
	if (iterator->level < PT_PAGE_TABLE_LEVEL)
		return false;
2478

2479 2480 2481 2482 2483
	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
	return true;
}

2484 2485
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
			       u64 spte)
2486
{
2487
	if (is_last_spte(spte, iterator->level)) {
2488 2489 2490 2491
		iterator->level = 0;
		return;
	}

2492
	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2493 2494 2495
	--iterator->level;
}

2496 2497
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
2498
	__shadow_walk_next(iterator, *iterator->sptep);
2499 2500
}

2501 2502
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
			     struct kvm_mmu_page *sp)
2503 2504 2505
{
	u64 spte;

2506
	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2507

2508
	spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
2509
	       shadow_user_mask | shadow_x_mask | shadow_me_mask;
2510 2511 2512 2513 2514

	if (sp_ad_disabled(sp))
		spte |= shadow_acc_track_value;
	else
		spte |= shadow_accessed_mask;
X
Xiao Guangrong 已提交
2515

2516
	mmu_spte_set(sptep, spte);
2517 2518 2519 2520 2521

	mmu_page_add_parent_pte(vcpu, sp, sptep);

	if (sp->unsync_children || sp->unsync)
		mark_unsync(sptep);
2522 2523
}

2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
				   unsigned direct_access)
{
	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
		struct kvm_mmu_page *child;

		/*
		 * For the direct sp, if the guest pte's dirty bit
		 * changed form clean to dirty, it will corrupt the
		 * sp's access: allow writable in the read-only sp,
		 * so we should update the spte at this point to get
		 * a new sp with the correct access.
		 */
		child = page_header(*sptep & PT64_BASE_ADDR_MASK);
		if (child->role.access == direct_access)
			return;

2541
		drop_parent_pte(child, sptep);
2542 2543 2544 2545
		kvm_flush_remote_tlbs(vcpu->kvm);
	}
}

X
Xiao Guangrong 已提交
2546
static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2547 2548 2549 2550 2551 2552 2553
			     u64 *spte)
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
	if (is_shadow_present_pte(pte)) {
X
Xiao Guangrong 已提交
2554
		if (is_last_spte(pte, sp->role.level)) {
2555
			drop_spte(kvm, spte);
X
Xiao Guangrong 已提交
2556 2557 2558
			if (is_large_pte(pte))
				--kvm->stat.lpages;
		} else {
2559
			child = page_header(pte & PT64_BASE_ADDR_MASK);
2560
			drop_parent_pte(child, spte);
2561
		}
X
Xiao Guangrong 已提交
2562 2563 2564 2565
		return true;
	}

	if (is_mmio_spte(pte))
2566
		mmu_spte_clear_no_track(spte);
2567

X
Xiao Guangrong 已提交
2568
	return false;
2569 2570
}

2571
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
2572
					 struct kvm_mmu_page *sp)
2573
{
2574 2575
	unsigned i;

2576 2577
	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		mmu_page_zap_pte(kvm, sp, sp->spt + i);
2578 2579
}

2580
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2581
{
2582 2583
	u64 *sptep;
	struct rmap_iterator iter;
2584

2585
	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2586
		drop_parent_pte(sp, sptep);
2587 2588
}

2589
static int mmu_zap_unsync_children(struct kvm *kvm,
2590 2591
				   struct kvm_mmu_page *parent,
				   struct list_head *invalid_list)
2592
{
2593 2594 2595
	int i, zapped = 0;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2596

2597
	if (parent->role.level == PT_PAGE_TABLE_LEVEL)
2598
		return 0;
2599 2600 2601 2602 2603

	while (mmu_unsync_walk(parent, &pages)) {
		struct kvm_mmu_page *sp;

		for_each_sp(pages, sp, parents, i) {
2604
			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2605
			mmu_pages_clear_parents(&parents);
2606
			zapped++;
2607 2608 2609 2610
		}
	}

	return zapped;
2611 2612
}

2613 2614
static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				    struct list_head *invalid_list)
2615
{
2616
	int ret;
A
Avi Kivity 已提交
2617

2618
	trace_kvm_mmu_prepare_zap_page(sp);
2619
	++kvm->stat.mmu_shadow_zapped;
2620
	ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
2621
	kvm_mmu_page_unlink_children(kvm, sp);
2622
	kvm_mmu_unlink_parents(kvm, sp);
2623

2624
	if (!sp->role.invalid && !sp->role.direct)
2625
		unaccount_shadowed(kvm, sp);
2626

2627 2628
	if (sp->unsync)
		kvm_unlink_unsync_page(kvm, sp);
2629
	if (!sp->root_count) {
2630 2631
		/* Count self */
		ret++;
2632
		list_move(&sp->link, invalid_list);
2633
		kvm_mod_used_mmu_pages(kvm, -1);
2634
	} else {
A
Avi Kivity 已提交
2635
		list_move(&sp->link, &kvm->arch.active_mmu_pages);
2636 2637 2638 2639 2640 2641 2642

		/*
		 * The obsolete pages can not be used on any vcpus.
		 * See the comments in kvm_mmu_invalidate_zap_all_pages().
		 */
		if (!sp->role.invalid && !is_obsolete_sp(kvm, sp))
			kvm_reload_remote_mmus(kvm);
2643
	}
2644 2645

	sp->role.invalid = 1;
2646
	return ret;
2647 2648
}

2649 2650 2651
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list)
{
2652
	struct kvm_mmu_page *sp, *nsp;
2653 2654 2655 2656

	if (list_empty(invalid_list))
		return;

2657
	/*
2658 2659 2660 2661 2662 2663 2664
	 * We need to make sure everyone sees our modifications to
	 * the page tables and see changes to vcpu->mode here. The barrier
	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
	 *
	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
	 * guest mode and/or lockless shadow page table walks.
2665 2666
	 */
	kvm_flush_remote_tlbs(kvm);
2667

2668
	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2669
		WARN_ON(!sp->role.invalid || sp->root_count);
2670
		kvm_mmu_free_page(sp);
2671
	}
2672 2673
}

2674 2675 2676 2677 2678 2679 2680 2681
static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
					struct list_head *invalid_list)
{
	struct kvm_mmu_page *sp;

	if (list_empty(&kvm->arch.active_mmu_pages))
		return false;

G
Geliang Tang 已提交
2682 2683
	sp = list_last_entry(&kvm->arch.active_mmu_pages,
			     struct kvm_mmu_page, link);
2684
	return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2685 2686
}

2687 2688
/*
 * Changing the number of mmu pages allocated to the vm
2689
 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2690
 */
2691
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
2692
{
2693
	LIST_HEAD(invalid_list);
2694

2695 2696
	spin_lock(&kvm->mmu_lock);

2697
	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2698 2699 2700 2701
		/* Need to free some mmu pages to achieve the goal. */
		while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
			if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
				break;
2702

2703
		kvm_mmu_commit_zap_page(kvm, &invalid_list);
2704
		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2705 2706
	}

2707
	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2708 2709

	spin_unlock(&kvm->mmu_lock);
2710 2711
}

2712
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2713
{
2714
	struct kvm_mmu_page *sp;
2715
	LIST_HEAD(invalid_list);
2716 2717
	int r;

2718
	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2719
	r = 0;
2720
	spin_lock(&kvm->mmu_lock);
2721
	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2722
		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2723 2724
			 sp->role.word);
		r = 1;
2725
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2726
	}
2727
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2728 2729
	spin_unlock(&kvm->mmu_lock);

2730
	return r;
2731
}
2732
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
2733

2734
static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2735 2736 2737 2738 2739 2740 2741 2742
{
	trace_kvm_mmu_unsync_page(sp);
	++vcpu->kvm->stat.mmu_unsync;
	sp->unsync = 1;

	kvm_mmu_mark_parents_unsync(sp);
}

2743 2744
static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
				   bool can_unsync)
2745
{
2746
	struct kvm_mmu_page *sp;
2747

2748 2749
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
		return true;
2750

2751
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2752
		if (!can_unsync)
2753
			return true;
2754

2755 2756
		if (sp->unsync)
			continue;
2757

2758 2759
		WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
		kvm_unsync_page(vcpu, sp);
2760
	}
2761

2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800
	/*
	 * We need to ensure that the marking of unsync pages is visible
	 * before the SPTE is updated to allow writes because
	 * kvm_mmu_sync_roots() checks the unsync flags without holding
	 * the MMU lock and so can race with this. If the SPTE was updated
	 * before the page had been marked as unsync-ed, something like the
	 * following could happen:
	 *
	 * CPU 1                    CPU 2
	 * ---------------------------------------------------------------------
	 * 1.2 Host updates SPTE
	 *     to be writable
	 *                      2.1 Guest writes a GPTE for GVA X.
	 *                          (GPTE being in the guest page table shadowed
	 *                           by the SP from CPU 1.)
	 *                          This reads SPTE during the page table walk.
	 *                          Since SPTE.W is read as 1, there is no
	 *                          fault.
	 *
	 *                      2.2 Guest issues TLB flush.
	 *                          That causes a VM Exit.
	 *
	 *                      2.3 kvm_mmu_sync_pages() reads sp->unsync.
	 *                          Since it is false, so it just returns.
	 *
	 *                      2.4 Guest accesses GVA X.
	 *                          Since the mapping in the SP was not updated,
	 *                          so the old mapping for GVA X incorrectly
	 *                          gets used.
	 * 1.1 Host marks SP
	 *     as unsync
	 *     (sp->unsync = true)
	 *
	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
	 * pairs with this write barrier.
	 */
	smp_wmb();

2801
	return false;
2802 2803
}

D
Dan Williams 已提交
2804
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
2805 2806
{
	if (pfn_valid(pfn))
2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
		return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
			/*
			 * Some reserved pages, such as those from NVDIMM
			 * DAX devices, are not for MMIO, and can be mapped
			 * with cached memory type for better performance.
			 * However, the above check misconceives those pages
			 * as MMIO, and results in KVM mapping them with UC
			 * memory type, which would hurt the performance.
			 * Therefore, we check the host memory type in addition
			 * and only treat UC/UC-/WC pages as MMIO.
			 */
			(!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
2819 2820 2821 2822

	return true;
}

2823 2824 2825 2826
/* Bits which may be returned by set_spte() */
#define SET_SPTE_WRITE_PROTECTED_PT	BIT(0)
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH	BIT(1)

A
Avi Kivity 已提交
2827
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2828
		    unsigned pte_access, int level,
D
Dan Williams 已提交
2829
		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2830
		    bool can_unsync, bool host_writable)
2831
{
2832
	u64 spte = 0;
M
Marcelo Tosatti 已提交
2833
	int ret = 0;
2834
	struct kvm_mmu_page *sp;
S
Sheng Yang 已提交
2835

2836
	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
2837 2838
		return 0;

2839 2840 2841 2842
	sp = page_header(__pa(sptep));
	if (sp_ad_disabled(sp))
		spte |= shadow_acc_track_value;

2843 2844 2845 2846 2847 2848
	/*
	 * For the EPT case, shadow_present_mask is 0 if hardware
	 * supports exec-only page table entries.  In that case,
	 * ACC_USER_MASK and shadow_user_mask are used to represent
	 * read access.  See FNAME(gpte_access) in paging_tmpl.h.
	 */
2849
	spte |= shadow_present_mask;
2850
	if (!speculative)
2851
		spte |= spte_shadow_accessed_mask(spte);
2852

S
Sheng Yang 已提交
2853 2854 2855 2856
	if (pte_access & ACC_EXEC_MASK)
		spte |= shadow_x_mask;
	else
		spte |= shadow_nx_mask;
2857

2858
	if (pte_access & ACC_USER_MASK)
S
Sheng Yang 已提交
2859
		spte |= shadow_user_mask;
2860

2861
	if (level > PT_PAGE_TABLE_LEVEL)
M
Marcelo Tosatti 已提交
2862
		spte |= PT_PAGE_SIZE_MASK;
2863
	if (tdp_enabled)
2864
		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2865
			kvm_is_mmio_pfn(pfn));
2866

2867
	if (host_writable)
2868
		spte |= SPTE_HOST_WRITEABLE;
2869 2870
	else
		pte_access &= ~ACC_WRITE_MASK;
2871

2872 2873 2874
	if (!kvm_is_mmio_pfn(pfn))
		spte |= shadow_me_mask;

2875
	spte |= (u64)pfn << PAGE_SHIFT;
2876

2877
	if (pte_access & ACC_WRITE_MASK) {
2878

X
Xiao Guangrong 已提交
2879
		/*
2880 2881 2882 2883
		 * Other vcpu creates new sp in the window between
		 * mapping_level() and acquiring mmu-lock. We can
		 * allow guest to retry the access, the mapping can
		 * be fixed if guest refault.
X
Xiao Guangrong 已提交
2884
		 */
2885
		if (level > PT_PAGE_TABLE_LEVEL &&
2886
		    mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
A
Avi Kivity 已提交
2887
			goto done;
2888

2889
		spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
2890

2891 2892 2893 2894 2895 2896
		/*
		 * Optimization: for pte sync, if spte was writable the hash
		 * lookup is unnecessary (and expensive). Write protection
		 * is responsibility of mmu_get_page / kvm_sync_page.
		 * Same reasoning can be applied to dirty page accounting.
		 */
2897
		if (!can_unsync && is_writable_pte(*sptep))
2898 2899
			goto set_pte;

2900
		if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2901
			pgprintk("%s: found shadow page for %llx, marking ro\n",
2902
				 __func__, gfn);
2903
			ret |= SET_SPTE_WRITE_PROTECTED_PT;
2904
			pte_access &= ~ACC_WRITE_MASK;
2905
			spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
2906 2907 2908
		}
	}

2909
	if (pte_access & ACC_WRITE_MASK) {
2910
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
2911
		spte |= spte_shadow_dirty_mask(spte);
2912
	}
2913

2914 2915 2916
	if (speculative)
		spte = mark_spte_for_access_track(spte);

2917
set_pte:
2918
	if (mmu_spte_update(sptep, spte))
2919
		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
A
Avi Kivity 已提交
2920
done:
M
Marcelo Tosatti 已提交
2921 2922 2923
	return ret;
}

2924 2925 2926
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
			int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
		       	bool speculative, bool host_writable)
M
Marcelo Tosatti 已提交
2927 2928
{
	int was_rmapped = 0;
2929
	int rmap_count;
2930
	int set_spte_ret;
2931
	int ret = RET_PF_RETRY;
2932
	bool flush = false;
M
Marcelo Tosatti 已提交
2933

2934 2935
	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
		 *sptep, write_fault, gfn);
M
Marcelo Tosatti 已提交
2936

2937
	if (is_shadow_present_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2938 2939 2940 2941
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
2942 2943
		if (level > PT_PAGE_TABLE_LEVEL &&
		    !is_large_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2944
			struct kvm_mmu_page *child;
A
Avi Kivity 已提交
2945
			u64 pte = *sptep;
M
Marcelo Tosatti 已提交
2946 2947

			child = page_header(pte & PT64_BASE_ADDR_MASK);
2948
			drop_parent_pte(child, sptep);
2949
			flush = true;
A
Avi Kivity 已提交
2950
		} else if (pfn != spte_to_pfn(*sptep)) {
2951
			pgprintk("hfn old %llx new %llx\n",
A
Avi Kivity 已提交
2952
				 spte_to_pfn(*sptep), pfn);
2953
			drop_spte(vcpu->kvm, sptep);
2954
			flush = true;
2955 2956
		} else
			was_rmapped = 1;
M
Marcelo Tosatti 已提交
2957
	}
2958

2959 2960 2961
	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
				speculative, true, host_writable);
	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
M
Marcelo Tosatti 已提交
2962
		if (write_fault)
2963
			ret = RET_PF_EMULATE;
2964
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2965
	}
2966
	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
2967
		kvm_flush_remote_tlbs(vcpu->kvm);
M
Marcelo Tosatti 已提交
2968

2969
	if (unlikely(is_mmio_spte(*sptep)))
2970
		ret = RET_PF_EMULATE;
2971

A
Avi Kivity 已提交
2972
	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2973
	pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
A
Avi Kivity 已提交
2974
		 is_large_pte(*sptep)? "2MB" : "4kB",
2975
		 *sptep & PT_WRITABLE_MASK ? "RW" : "R", gfn,
2976
		 *sptep, sptep);
A
Avi Kivity 已提交
2977
	if (!was_rmapped && is_large_pte(*sptep))
M
Marcelo Tosatti 已提交
2978 2979
		++vcpu->kvm->stat.lpages;

2980 2981 2982 2983 2984 2985
	if (is_shadow_present_pte(*sptep)) {
		if (!was_rmapped) {
			rmap_count = rmap_add(vcpu, sptep, gfn);
			if (rmap_count > RMAP_RECYCLE_THRESHOLD)
				rmap_recycle(vcpu, sptep, gfn);
		}
2986
	}
2987

X
Xiao Guangrong 已提交
2988
	kvm_release_pfn_clean(pfn);
2989

2990
	return ret;
2991 2992
}

D
Dan Williams 已提交
2993
static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2994 2995 2996 2997
				     bool no_dirty_log)
{
	struct kvm_memory_slot *slot;

2998
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2999
	if (!slot)
3000
		return KVM_PFN_ERR_FAULT;
3001

3002
	return gfn_to_pfn_memslot_atomic(slot, gfn);
3003 3004 3005 3006 3007 3008 3009
}

static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp,
				    u64 *start, u64 *end)
{
	struct page *pages[PTE_PREFETCH_NUM];
3010
	struct kvm_memory_slot *slot;
3011 3012 3013 3014 3015
	unsigned access = sp->role.access;
	int i, ret;
	gfn_t gfn;

	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
3016 3017
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
	if (!slot)
3018 3019
		return -1;

3020
	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
3021 3022 3023 3024
	if (ret <= 0)
		return -1;

	for (i = 0; i < ret; i++, gfn++, start++)
3025 3026
		mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
			     page_to_pfn(pages[i]), true, true);
3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042

	return 0;
}

static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *sptep)
{
	u64 *spte, *start = NULL;
	int i;

	WARN_ON(!sp->role.direct);

	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
	spte = sp->spt + i;

	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
3043
		if (is_shadow_present_pte(*spte) || spte == sptep) {
3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057
			if (!start)
				continue;
			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
				break;
			start = NULL;
		} else if (!start)
			start = spte;
	}
}

static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{
	struct kvm_mmu_page *sp;

3058 3059
	sp = page_header(__pa(sptep));

3060
	/*
3061 3062 3063
	 * Without accessed bits, there's no way to distinguish between
	 * actually accessed translations and prefetched, so disable pte
	 * prefetch if accessed bits aren't available.
3064
	 */
3065
	if (sp_ad_disabled(sp))
3066 3067 3068 3069 3070 3071 3072 3073
		return;

	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return;

	__direct_pte_prefetch(vcpu, sp, sptep);
}

3074
static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
D
Dan Williams 已提交
3075
			int level, gfn_t gfn, kvm_pfn_t pfn, bool prefault)
3076
{
3077
	struct kvm_shadow_walk_iterator iterator;
3078
	struct kvm_mmu_page *sp;
3079
	int emulate = 0;
3080
	gfn_t pseudo_gfn;
A
Avi Kivity 已提交
3081

3082 3083 3084
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return 0;

3085
	for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
3086
		if (iterator.level == level) {
3087 3088 3089
			emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
					       write, level, gfn, pfn, prefault,
					       map_writable);
3090
			direct_pte_prefetch(vcpu, iterator.sptep);
3091 3092
			++vcpu->stat.pf_fixed;
			break;
A
Avi Kivity 已提交
3093 3094
		}

3095
		drop_large_spte(vcpu, iterator.sptep);
3096
		if (!is_shadow_present_pte(*iterator.sptep)) {
3097 3098 3099 3100
			u64 base_addr = iterator.addr;

			base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
			pseudo_gfn = base_addr >> PAGE_SHIFT;
3101
			sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
3102
					      iterator.level - 1, 1, ACC_ALL);
3103

3104
			link_shadow_page(vcpu, iterator.sptep, sp);
3105 3106
		}
	}
3107
	return emulate;
A
Avi Kivity 已提交
3108 3109
}

H
Huang Ying 已提交
3110
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
3111
{
H
Huang Ying 已提交
3112 3113
	siginfo_t info;

3114
	clear_siginfo(&info);
H
Huang Ying 已提交
3115 3116 3117 3118 3119
	info.si_signo	= SIGBUS;
	info.si_errno	= 0;
	info.si_code	= BUS_MCEERR_AR;
	info.si_addr	= (void __user *)address;
	info.si_addr_lsb = PAGE_SHIFT;
3120

H
Huang Ying 已提交
3121
	send_sig_info(SIGBUS, &info, tsk);
3122 3123
}

D
Dan Williams 已提交
3124
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
3125
{
X
Xiao Guangrong 已提交
3126 3127 3128 3129 3130 3131
	/*
	 * Do not cache the mmio info caused by writing the readonly gfn
	 * into the spte otherwise read access on readonly gfn also can
	 * caused mmio page fault and treat it as mmio access.
	 */
	if (pfn == KVM_PFN_ERR_RO_FAULT)
3132
		return RET_PF_EMULATE;
X
Xiao Guangrong 已提交
3133

3134
	if (pfn == KVM_PFN_ERR_HWPOISON) {
3135
		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3136
		return RET_PF_RETRY;
3137
	}
3138

3139
	return -EFAULT;
3140 3141
}

3142
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
D
Dan Williams 已提交
3143 3144
					gfn_t *gfnp, kvm_pfn_t *pfnp,
					int *levelp)
3145
{
D
Dan Williams 已提交
3146
	kvm_pfn_t pfn = *pfnp;
3147 3148 3149 3150 3151 3152 3153 3154 3155
	gfn_t gfn = *gfnp;
	int level = *levelp;

	/*
	 * Check if it's a transparent hugepage. If this would be an
	 * hugetlbfs page, level wouldn't be set to
	 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
	 * here.
	 */
3156
	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
3157
	    level == PT_PAGE_TABLE_LEVEL &&
3158
	    PageTransCompoundMap(pfn_to_page(pfn)) &&
3159
	    !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177
		unsigned long mask;
		/*
		 * mmu_notifier_retry was successful and we hold the
		 * mmu_lock here, so the pmd can't become splitting
		 * from under us, and in turn
		 * __split_huge_page_refcount() can't run from under
		 * us and we can safely transfer the refcount from
		 * PG_tail to PG_head as we switch the pfn to tail to
		 * head.
		 */
		*levelp = level = PT_DIRECTORY_LEVEL;
		mask = KVM_PAGES_PER_HPAGE(level) - 1;
		VM_BUG_ON((gfn & mask) != (pfn & mask));
		if (pfn & mask) {
			gfn &= ~mask;
			*gfnp = gfn;
			kvm_release_pfn_clean(pfn);
			pfn &= ~mask;
3178
			kvm_get_pfn(pfn);
3179 3180 3181 3182 3183
			*pfnp = pfn;
		}
	}
}

3184
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
D
Dan Williams 已提交
3185
				kvm_pfn_t pfn, unsigned access, int *ret_val)
3186 3187
{
	/* The pfn is invalid, report the error! */
3188
	if (unlikely(is_error_pfn(pfn))) {
3189
		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
3190
		return true;
3191 3192
	}

3193
	if (unlikely(is_noslot_pfn(pfn)))
3194 3195
		vcpu_cache_mmio_info(vcpu, gva, gfn, access);

3196
	return false;
3197 3198
}

3199
static bool page_fault_can_be_fast(u32 error_code)
3200
{
3201 3202 3203 3204 3205 3206 3207
	/*
	 * Do not fix the mmio spte with invalid generation number which
	 * need to be updated by slow page fault path.
	 */
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return false;

3208 3209 3210 3211 3212
	/* See if the page fault is due to an NX violation */
	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
		return false;

3213
	/*
3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224
	 * #PF can be fast if:
	 * 1. The shadow page table entry is not present, which could mean that
	 *    the fault is potentially caused by access tracking (if enabled).
	 * 2. The shadow page table entry is present and the fault
	 *    is caused by write-protect, that means we just need change the W
	 *    bit of the spte which can be done out of mmu-lock.
	 *
	 * However, if access tracking is disabled we know that a non-present
	 * page must be a genuine page fault where we have to create a new SPTE.
	 * So, if access tracking is disabled, we return true only for write
	 * accesses to a present page.
3225 3226
	 */

3227 3228 3229
	return shadow_acc_track_mask != 0 ||
	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3230 3231
}

3232 3233 3234 3235
/*
 * Returns true if the SPTE was fixed successfully. Otherwise,
 * someone else modified the SPTE from its original value.
 */
3236
static bool
3237
fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3238
			u64 *sptep, u64 old_spte, u64 new_spte)
3239 3240 3241 3242 3243
{
	gfn_t gfn;

	WARN_ON(!sp->role.direct);

3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255
	/*
	 * Theoretically we could also set dirty bit (and flush TLB) here in
	 * order to eliminate unnecessary PML logging. See comments in
	 * set_spte. But fast_page_fault is very unlikely to happen with PML
	 * enabled, so we do not do this. This might result in the same GPA
	 * to be logged in PML buffer again when the write really happens, and
	 * eventually to be called by mark_page_dirty twice. But it's also no
	 * harm. This also avoids the TLB flush needed after setting dirty bit
	 * so non-PML cases won't be impacted.
	 *
	 * Compare with set_spte where instead shadow_dirty_mask is set.
	 */
3256
	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3257 3258
		return false;

3259
	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3260 3261 3262 3263 3264 3265 3266
		/*
		 * The gfn of direct spte is stable since it is
		 * calculated by sp->gfn.
		 */
		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
	}
3267 3268 3269 3270

	return true;
}

3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
static bool is_access_allowed(u32 fault_err_code, u64 spte)
{
	if (fault_err_code & PFERR_FETCH_MASK)
		return is_executable_pte(spte);

	if (fault_err_code & PFERR_WRITE_MASK)
		return is_writable_pte(spte);

	/* Fault was on Read access */
	return spte & PT_PRESENT_MASK;
}

3283 3284 3285 3286 3287 3288 3289 3290 3291
/*
 * Return value:
 * - true: let the vcpu to access on the same address again.
 * - false: let the real page fault path to fix it.
 */
static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
			    u32 error_code)
{
	struct kvm_shadow_walk_iterator iterator;
3292
	struct kvm_mmu_page *sp;
3293
	bool fault_handled = false;
3294
	u64 spte = 0ull;
3295
	uint retry_count = 0;
3296

3297 3298 3299
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return false;

3300
	if (!page_fault_can_be_fast(error_code))
3301 3302 3303 3304
		return false;

	walk_shadow_page_lockless_begin(vcpu);

3305
	do {
3306
		u64 new_spte;
3307

3308 3309 3310 3311 3312
		for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
			if (!is_shadow_present_pte(spte) ||
			    iterator.level < level)
				break;

3313 3314 3315
		sp = page_header(__pa(iterator.sptep));
		if (!is_last_spte(spte, sp->role.level))
			break;
3316

3317
		/*
3318 3319 3320 3321 3322
		 * Check whether the memory access that caused the fault would
		 * still cause it if it were to be performed right now. If not,
		 * then this is a spurious fault caused by TLB lazily flushed,
		 * or some other CPU has already fixed the PTE after the
		 * current CPU took the fault.
3323 3324 3325 3326
		 *
		 * Need not check the access of upper level table entries since
		 * they are always ACC_ALL.
		 */
3327 3328 3329 3330
		if (is_access_allowed(error_code, spte)) {
			fault_handled = true;
			break;
		}
3331

3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345
		new_spte = spte;

		if (is_access_track_spte(spte))
			new_spte = restore_acc_track_spte(new_spte);

		/*
		 * Currently, to simplify the code, write-protection can
		 * be removed in the fast path only if the SPTE was
		 * write-protected for dirty-logging or access tracking.
		 */
		if ((error_code & PFERR_WRITE_MASK) &&
		    spte_can_locklessly_be_made_writable(spte))
		{
			new_spte |= PT_WRITABLE_MASK;
3346 3347

			/*
3348 3349 3350 3351 3352 3353 3354 3355 3356
			 * Do not fix write-permission on the large spte.  Since
			 * we only dirty the first page into the dirty-bitmap in
			 * fast_pf_fix_direct_spte(), other pages are missed
			 * if its slot has dirty logging enabled.
			 *
			 * Instead, we let the slow page fault path create a
			 * normal spte to fix the access.
			 *
			 * See the comments in kvm_arch_commit_memory_region().
3357
			 */
3358
			if (sp->role.level > PT_PAGE_TABLE_LEVEL)
3359
				break;
3360
		}
3361

3362
		/* Verify that the fault can be handled in the fast path */
3363 3364
		if (new_spte == spte ||
		    !is_access_allowed(error_code, new_spte))
3365 3366 3367 3368 3369 3370 3371 3372
			break;

		/*
		 * Currently, fast page fault only works for direct mapping
		 * since the gfn is not stable for indirect shadow page. See
		 * Documentation/virtual/kvm/locking.txt to get more detail.
		 */
		fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
3373
							iterator.sptep, spte,
3374
							new_spte);
3375 3376 3377 3378 3379 3380 3381 3382 3383 3384
		if (fault_handled)
			break;

		if (++retry_count > 4) {
			printk_once(KERN_WARNING
				"kvm: Fast #PF retrying more than 4 times.\n");
			break;
		}

	} while (true);
3385

X
Xiao Guangrong 已提交
3386
	trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
3387
			      spte, fault_handled);
3388 3389
	walk_shadow_page_lockless_end(vcpu);

3390
	return fault_handled;
3391 3392
}

3393
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
D
Dan Williams 已提交
3394
			 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
3395
static int make_mmu_pages_available(struct kvm_vcpu *vcpu);
3396

3397 3398
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
			 gfn_t gfn, bool prefault)
3399 3400
{
	int r;
3401
	int level;
3402
	bool force_pt_level = false;
D
Dan Williams 已提交
3403
	kvm_pfn_t pfn;
3404
	unsigned long mmu_seq;
3405
	bool map_writable, write = error_code & PFERR_WRITE_MASK;
3406

3407
	level = mapping_level(vcpu, gfn, &force_pt_level);
3408 3409 3410 3411 3412 3413 3414 3415
	if (likely(!force_pt_level)) {
		/*
		 * This path builds a PAE pagetable - so we can map
		 * 2mb pages at maximum. Therefore check if the level
		 * is larger than that.
		 */
		if (level > PT_DIRECTORY_LEVEL)
			level = PT_DIRECTORY_LEVEL;
3416

3417
		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
3418
	}
M
Marcelo Tosatti 已提交
3419

3420
	if (fast_page_fault(vcpu, v, level, error_code))
3421
		return RET_PF_RETRY;
3422

3423
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
3424
	smp_rmb();
3425

3426
	if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
3427
		return RET_PF_RETRY;
3428

3429 3430
	if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
		return r;
3431

3432
	spin_lock(&vcpu->kvm->mmu_lock);
3433
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3434
		goto out_unlock;
3435 3436
	if (make_mmu_pages_available(vcpu) < 0)
		goto out_unlock;
3437 3438
	if (likely(!force_pt_level))
		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
3439
	r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
3440 3441
	spin_unlock(&vcpu->kvm->mmu_lock);

3442
	return r;
3443 3444 3445 3446

out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
3447
	return RET_PF_RETRY;
3448 3449
}

3450 3451
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
			       struct list_head *invalid_list)
3452
{
3453
	struct kvm_mmu_page *sp;
3454

3455
	if (!VALID_PAGE(*root_hpa))
A
Avi Kivity 已提交
3456
		return;
3457

3458 3459 3460 3461
	sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK);
	--sp->root_count;
	if (!sp->root_count && sp->role.invalid)
		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3462

3463 3464 3465
	*root_hpa = INVALID_PAGE;
}

3466 3467
/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free)
3468 3469 3470 3471
{
	int i;
	LIST_HEAD(invalid_list);
	struct kvm_mmu *mmu = &vcpu->arch.mmu;
3472
	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3473

3474
	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3475

3476
	/* Before acquiring the MMU lock, see if we need to do any real work. */
3477 3478 3479 3480 3481 3482 3483 3484 3485
	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
			    VALID_PAGE(mmu->prev_roots[i].hpa))
				break;

		if (i == KVM_MMU_NUM_PREV_ROOTS)
			return;
	}
3486 3487

	spin_lock(&vcpu->kvm->mmu_lock);
3488

3489 3490 3491 3492
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
			mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa,
					   &invalid_list);
3493

3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506
	if (free_active_root) {
		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
			mmu_free_root_page(vcpu->kvm, &mmu->root_hpa,
					   &invalid_list);
		} else {
			for (i = 0; i < 4; ++i)
				if (mmu->pae_root[i] != 0)
					mmu_free_root_page(vcpu->kvm,
							   &mmu->pae_root[i],
							   &invalid_list);
			mmu->root_hpa = INVALID_PAGE;
		}
3507
	}
3508

3509
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3510
	spin_unlock(&vcpu->kvm->mmu_lock);
3511
}
3512
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3513

3514 3515 3516 3517 3518
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
	int ret = 0;

	if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
3519
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3520 3521 3522 3523 3524 3525
		ret = 1;
	}

	return ret;
}

3526 3527 3528
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_page *sp;
3529
	unsigned i;
3530

3531
	if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL) {
3532
		spin_lock(&vcpu->kvm->mmu_lock);
3533 3534
		if(make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
3535
			return -ENOSPC;
3536
		}
3537 3538
		sp = kvm_mmu_get_page(vcpu, 0, 0,
				vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
3539 3540 3541 3542 3543 3544 3545
		++sp->root_count;
		spin_unlock(&vcpu->kvm->mmu_lock);
		vcpu->arch.mmu.root_hpa = __pa(sp->spt);
	} else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
		for (i = 0; i < 4; ++i) {
			hpa_t root = vcpu->arch.mmu.pae_root[i];

3546
			MMU_WARN_ON(VALID_PAGE(root));
3547
			spin_lock(&vcpu->kvm->mmu_lock);
3548 3549
			if (make_mmu_pages_available(vcpu) < 0) {
				spin_unlock(&vcpu->kvm->mmu_lock);
3550
				return -ENOSPC;
3551
			}
3552
			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
3553
					i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
3554 3555 3556 3557 3558
			root = __pa(sp->spt);
			++sp->root_count;
			spin_unlock(&vcpu->kvm->mmu_lock);
			vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
		}
3559
		vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
3560 3561 3562 3563 3564 3565 3566
	} else
		BUG();

	return 0;
}

static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3567
{
3568
	struct kvm_mmu_page *sp;
3569 3570 3571
	u64 pdptr, pm_mask;
	gfn_t root_gfn;
	int i;
3572

3573
	root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
3574

3575 3576 3577 3578 3579 3580 3581
	if (mmu_check_root(vcpu, root_gfn))
		return 1;

	/*
	 * Do we shadow a long mode page table? If so we need to
	 * write-protect the guests page table root.
	 */
3582
	if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
3583
		hpa_t root = vcpu->arch.mmu.root_hpa;
3584

3585
		MMU_WARN_ON(VALID_PAGE(root));
3586

3587
		spin_lock(&vcpu->kvm->mmu_lock);
3588 3589
		if (make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
3590
			return -ENOSPC;
3591
		}
3592 3593
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
				vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
3594 3595
		root = __pa(sp->spt);
		++sp->root_count;
3596
		spin_unlock(&vcpu->kvm->mmu_lock);
3597
		vcpu->arch.mmu.root_hpa = root;
3598
		return 0;
3599
	}
3600

3601 3602
	/*
	 * We shadow a 32 bit page table. This may be a legacy 2-level
3603 3604
	 * or a PAE 3-level page table. In either case we need to be aware that
	 * the shadow page table may be a PAE or a long mode page table.
3605
	 */
3606
	pm_mask = PT_PRESENT_MASK;
3607
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL)
3608 3609
		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;

3610
	for (i = 0; i < 4; ++i) {
3611
		hpa_t root = vcpu->arch.mmu.pae_root[i];
3612

3613
		MMU_WARN_ON(VALID_PAGE(root));
3614
		if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
3615
			pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
B
Bandan Das 已提交
3616
			if (!(pdptr & PT_PRESENT_MASK)) {
3617
				vcpu->arch.mmu.pae_root[i] = 0;
A
Avi Kivity 已提交
3618 3619
				continue;
			}
A
Avi Kivity 已提交
3620
			root_gfn = pdptr >> PAGE_SHIFT;
3621 3622
			if (mmu_check_root(vcpu, root_gfn))
				return 1;
3623
		}
3624
		spin_lock(&vcpu->kvm->mmu_lock);
3625 3626
		if (make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
3627
			return -ENOSPC;
3628
		}
3629 3630
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
				      0, ACC_ALL);
3631 3632
		root = __pa(sp->spt);
		++sp->root_count;
3633 3634
		spin_unlock(&vcpu->kvm->mmu_lock);

3635
		vcpu->arch.mmu.pae_root[i] = root | pm_mask;
3636
	}
3637
	vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
3638 3639 3640 3641 3642

	/*
	 * If we shadow a 32 bit page table with a long mode page
	 * table we enter this path.
	 */
3643
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663
		if (vcpu->arch.mmu.lm_root == NULL) {
			/*
			 * The additional page necessary for this is only
			 * allocated on demand.
			 */

			u64 *lm_root;

			lm_root = (void*)get_zeroed_page(GFP_KERNEL);
			if (lm_root == NULL)
				return 1;

			lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;

			vcpu->arch.mmu.lm_root = lm_root;
		}

		vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
	}

3664
	return 0;
3665 3666
}

3667 3668 3669 3670 3671 3672 3673 3674
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
	if (vcpu->arch.mmu.direct_map)
		return mmu_alloc_direct_roots(vcpu);
	else
		return mmu_alloc_shadow_roots(vcpu);
}

3675
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3676 3677 3678 3679
{
	int i;
	struct kvm_mmu_page *sp;

3680 3681 3682
	if (vcpu->arch.mmu.direct_map)
		return;

3683 3684
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return;
3685

3686
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3687

3688
	if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
3689
		hpa_t root = vcpu->arch.mmu.root_hpa;
3690

3691
		sp = page_header(root);
3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709

		/*
		 * Even if another CPU was marking the SP as unsync-ed
		 * simultaneously, any guest page table changes are not
		 * guaranteed to be visible anyway until this VCPU issues a TLB
		 * flush strictly after those changes are made. We only need to
		 * ensure that the other CPU sets these flags before any actual
		 * changes to the page tables are made. The comments in
		 * mmu_need_write_protect() describe what could go wrong if this
		 * requirement isn't satisfied.
		 */
		if (!smp_load_acquire(&sp->unsync) &&
		    !smp_load_acquire(&sp->unsync_children))
			return;

		spin_lock(&vcpu->kvm->mmu_lock);
		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);

3710
		mmu_sync_children(vcpu, sp);
3711

3712
		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3713
		spin_unlock(&vcpu->kvm->mmu_lock);
3714 3715
		return;
	}
3716 3717 3718 3719

	spin_lock(&vcpu->kvm->mmu_lock);
	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);

3720 3721 3722
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->arch.mmu.pae_root[i];

3723
		if (root && VALID_PAGE(root)) {
3724 3725 3726 3727 3728 3729
			root &= PT64_BASE_ADDR_MASK;
			sp = page_header(root);
			mmu_sync_children(vcpu, sp);
		}
	}

3730
	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3731
	spin_unlock(&vcpu->kvm->mmu_lock);
3732
}
N
Nadav Har'El 已提交
3733
EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
3734

3735
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
3736
				  u32 access, struct x86_exception *exception)
A
Avi Kivity 已提交
3737
{
3738 3739
	if (exception)
		exception->error_code = 0;
A
Avi Kivity 已提交
3740 3741 3742
	return vaddr;
}

3743
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
3744 3745
					 u32 access,
					 struct x86_exception *exception)
3746
{
3747 3748
	if (exception)
		exception->error_code = 0;
3749
	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3750 3751
}

3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770
static bool
__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
{
	int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f;

	return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) |
		((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0);
}

static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
{
	return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level);
}

static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
{
	return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
}

3771
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3772
{
3773 3774 3775 3776 3777 3778 3779
	/*
	 * A nested guest cannot use the MMIO cache if it is using nested
	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
	 */
	if (mmu_is_nested(vcpu))
		return false;

3780 3781 3782 3783 3784 3785
	if (direct)
		return vcpu_match_mmio_gpa(vcpu, addr);

	return vcpu_match_mmio_gva(vcpu, addr);
}

3786 3787 3788
/* return true if reserved bit is detected on spte. */
static bool
walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3789 3790
{
	struct kvm_shadow_walk_iterator iterator;
3791
	u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
3792 3793
	int root, leaf;
	bool reserved = false;
3794

3795
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3796
		goto exit;
3797

3798
	walk_shadow_page_lockless_begin(vcpu);
3799

3800 3801
	for (shadow_walk_init(&iterator, vcpu, addr),
		 leaf = root = iterator.level;
3802 3803 3804 3805 3806
	     shadow_walk_okay(&iterator);
	     __shadow_walk_next(&iterator, spte)) {
		spte = mmu_spte_get_lockless(iterator.sptep);

		sptes[leaf - 1] = spte;
3807
		leaf--;
3808

3809 3810
		if (!is_shadow_present_pte(spte))
			break;
3811 3812

		reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
3813
						    iterator.level);
3814 3815
	}

3816 3817
	walk_shadow_page_lockless_end(vcpu);

3818 3819 3820
	if (reserved) {
		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
		       __func__, addr);
3821
		while (root > leaf) {
3822 3823 3824 3825 3826 3827 3828 3829
			pr_err("------ spte 0x%llx level %d.\n",
			       sptes[root - 1], root);
			root--;
		}
	}
exit:
	*sptep = spte;
	return reserved;
3830 3831
}

P
Paolo Bonzini 已提交
3832
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3833 3834
{
	u64 spte;
3835
	bool reserved;
3836

3837
	if (mmio_info_in_cache(vcpu, addr, direct))
3838
		return RET_PF_EMULATE;
3839

3840
	reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
3841
	if (WARN_ON(reserved))
3842
		return -EINVAL;
3843 3844 3845 3846 3847

	if (is_mmio_spte(spte)) {
		gfn_t gfn = get_mmio_spte_gfn(spte);
		unsigned access = get_mmio_spte_access(spte);

3848
		if (!check_mmio_spte(vcpu, spte))
3849
			return RET_PF_INVALID;
3850

3851 3852
		if (direct)
			addr = 0;
X
Xiao Guangrong 已提交
3853 3854

		trace_handle_mmio_page_fault(addr, gfn, access);
3855
		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3856
		return RET_PF_EMULATE;
3857 3858 3859 3860 3861 3862
	}

	/*
	 * If the page table is zapped by other cpus, let CPU fault again on
	 * the address.
	 */
3863
	return RET_PF_RETRY;
3864 3865
}

3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
					 u32 error_code, gfn_t gfn)
{
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return false;

	if (!(error_code & PFERR_PRESENT_MASK) ||
	      !(error_code & PFERR_WRITE_MASK))
		return false;

	/*
	 * guest is writing the page which is write tracked which can
	 * not be fixed by page fault handler.
	 */
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
		return true;

	return false;
}

3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 spte;

	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return;

	walk_shadow_page_lockless_begin(vcpu);
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
		clear_sp_write_flooding_count(iterator.sptep);
		if (!is_shadow_present_pte(spte))
			break;
	}
	walk_shadow_page_lockless_end(vcpu);
}

A
Avi Kivity 已提交
3903
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3904
				u32 error_code, bool prefault)
A
Avi Kivity 已提交
3905
{
3906
	gfn_t gfn = gva >> PAGE_SHIFT;
3907
	int r;
A
Avi Kivity 已提交
3908

3909
	pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
3910

3911
	if (page_fault_handle_page_track(vcpu, error_code, gfn))
3912
		return RET_PF_EMULATE;
3913

3914 3915 3916
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
3917

3918
	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
3919 3920


3921
	return nonpaging_map(vcpu, gva & PAGE_MASK,
3922
			     error_code, gfn, prefault);
A
Avi Kivity 已提交
3923 3924
}

3925
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3926 3927
{
	struct kvm_arch_async_pf arch;
X
Xiao Guangrong 已提交
3928

3929
	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3930
	arch.gfn = gfn;
3931
	arch.direct_map = vcpu->arch.mmu.direct_map;
X
Xiao Guangrong 已提交
3932
	arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
3933

3934
	return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3935 3936
}

3937
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3938
{
3939
	if (unlikely(!lapic_in_kernel(vcpu) ||
3940 3941
		     kvm_event_needs_reinjection(vcpu) ||
		     vcpu->arch.exception.pending))
3942 3943
		return false;

3944
	if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
3945 3946
		return false;

3947 3948 3949
	return kvm_x86_ops->interrupt_allowed(vcpu);
}

3950
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
D
Dan Williams 已提交
3951
			 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable)
3952
{
3953
	struct kvm_memory_slot *slot;
3954 3955
	bool async;

3956 3957 3958 3959 3960 3961 3962 3963
	/*
	 * Don't expose private memslots to L2.
	 */
	if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) {
		*pfn = KVM_PFN_NOSLOT;
		return false;
	}

3964
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3965 3966
	async = false;
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
3967 3968 3969
	if (!async)
		return false; /* *pfn has correct page already */

3970
	if (!prefault && kvm_can_do_async_pf(vcpu)) {
3971
		trace_kvm_try_async_get_page(gva, gfn);
3972 3973 3974 3975 3976 3977 3978 3979
		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
			trace_kvm_async_pf_doublefault(gva, gfn);
			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
			return true;
		} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
			return true;
	}

3980
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
3981 3982 3983
	return false;
}

3984
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3985
				u64 fault_address, char *insn, int insn_len)
3986 3987 3988
{
	int r = 1;

P
Paolo Bonzini 已提交
3989
	vcpu->arch.l1tf_flush_l1d = true;
3990 3991 3992 3993
	switch (vcpu->arch.apf.host_apf_reason) {
	default:
		trace_kvm_page_fault(fault_address, error_code);

3994
		if (kvm_event_needs_reinjection(vcpu))
3995 3996 3997 3998 3999 4000 4001
			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
				insn_len);
		break;
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
		vcpu->arch.apf.host_apf_reason = 0;
		local_irq_disable();
4002
		kvm_async_pf_task_wait(fault_address, 0);
4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015
		local_irq_enable();
		break;
	case KVM_PV_REASON_PAGE_READY:
		vcpu->arch.apf.host_apf_reason = 0;
		local_irq_disable();
		kvm_async_pf_task_wake(fault_address);
		local_irq_enable();
		break;
	}
	return r;
}
EXPORT_SYMBOL_GPL(kvm_handle_page_fault);

4016 4017 4018 4019 4020 4021 4022 4023 4024 4025
static bool
check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
{
	int page_num = KVM_PAGES_PER_HPAGE(level);

	gfn &= ~(page_num - 1);

	return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
}

G
Gleb Natapov 已提交
4026
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
4027
			  bool prefault)
4028
{
D
Dan Williams 已提交
4029
	kvm_pfn_t pfn;
4030
	int r;
4031
	int level;
4032
	bool force_pt_level;
M
Marcelo Tosatti 已提交
4033
	gfn_t gfn = gpa >> PAGE_SHIFT;
4034
	unsigned long mmu_seq;
4035 4036
	int write = error_code & PFERR_WRITE_MASK;
	bool map_writable;
4037

4038
	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
4039

4040
	if (page_fault_handle_page_track(vcpu, error_code, gfn))
4041
		return RET_PF_EMULATE;
4042

4043 4044 4045 4046
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

4047 4048 4049
	force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
							   PT_DIRECTORY_LEVEL);
	level = mapping_level(vcpu, gfn, &force_pt_level);
4050
	if (likely(!force_pt_level)) {
4051 4052 4053
		if (level > PT_DIRECTORY_LEVEL &&
		    !check_hugepage_cache_consistency(vcpu, gfn, level))
			level = PT_DIRECTORY_LEVEL;
4054
		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
4055
	}
4056

4057
	if (fast_page_fault(vcpu, gpa, level, error_code))
4058
		return RET_PF_RETRY;
4059

4060
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
4061
	smp_rmb();
4062

4063
	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
4064
		return RET_PF_RETRY;
4065

4066 4067 4068
	if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
		return r;

4069
	spin_lock(&vcpu->kvm->mmu_lock);
4070
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
4071
		goto out_unlock;
4072 4073
	if (make_mmu_pages_available(vcpu) < 0)
		goto out_unlock;
4074 4075
	if (likely(!force_pt_level))
		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
4076
	r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
4077 4078 4079
	spin_unlock(&vcpu->kvm->mmu_lock);

	return r;
4080 4081 4082 4083

out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
4084
	return RET_PF_RETRY;
4085 4086
}

4087 4088
static void nonpaging_init_context(struct kvm_vcpu *vcpu,
				   struct kvm_mmu *context)
A
Avi Kivity 已提交
4089 4090 4091
{
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
4092
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
4093
	context->invlpg = nonpaging_invlpg;
4094
	context->update_pte = nonpaging_update_pte;
4095
	context->root_level = 0;
A
Avi Kivity 已提交
4096
	context->shadow_root_level = PT32E_ROOT_LEVEL;
4097
	context->direct_map = true;
4098
	context->nx = false;
A
Avi Kivity 已提交
4099 4100
}

4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132
/*
 * Find out if a previously cached root matching the new CR3/role is available.
 * The current root is also inserted into the cache.
 * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
 * returned.
 * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
 * false is returned. This root should now be freed by the caller.
 */
static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
				  union kvm_mmu_page_role new_role)
{
	uint i;
	struct kvm_mmu_root_info root;
	struct kvm_mmu *mmu = &vcpu->arch.mmu;

	root.cr3 = mmu->get_cr3(vcpu);
	root.hpa = mmu->root_hpa;

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		swap(root, mmu->prev_roots[i]);

		if (new_cr3 == root.cr3 && VALID_PAGE(root.hpa) &&
		    page_header(root.hpa) != NULL &&
		    new_role.word == page_header(root.hpa)->role.word)
			break;
	}

	mmu->root_hpa = root.hpa;

	return i < KVM_MMU_NUM_PREV_ROOTS;
}

4133
static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4134 4135
			    union kvm_mmu_page_role new_role,
			    bool skip_tlb_flush)
A
Avi Kivity 已提交
4136
{
4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148
	struct kvm_mmu *mmu = &vcpu->arch.mmu;

	/*
	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
	 * later if necessary.
	 */
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
	    mmu->root_level >= PT64_ROOT_4LEVEL) {
		if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT))
			return false;

4149
		if (cached_root_available(vcpu, new_cr3, new_role)) {
4150 4151 4152 4153 4154 4155 4156 4157
			/*
			 * It is possible that the cached previous root page is
			 * obsolete because of a change in the MMU
			 * generation number. However, that is accompanied by
			 * KVM_REQ_MMU_RELOAD, which will free the root that we
			 * have set here and allocate a new one.
			 */

4158
			kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
4159 4160
			if (!skip_tlb_flush) {
				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4161
				kvm_x86_ops->tlb_flush(vcpu, true);
4162 4163 4164 4165 4166 4167 4168 4169 4170 4171
			}

			/*
			 * The last MMIO access's GVA and GPA are cached in the
			 * VCPU. When switching to a new CR3, that GVA->GPA
			 * mapping may no longer be valid. So clear any cached
			 * MMIO info even when we don't need to sync the shadow
			 * page tables.
			 */
			vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4172

4173 4174 4175 4176 4177 4178 4179 4180
			__clear_sp_write_flooding_count(
				page_header(mmu->root_hpa));

			return true;
		}
	}

	return false;
A
Avi Kivity 已提交
4181 4182
}

4183
static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4184 4185
			      union kvm_mmu_page_role new_role,
			      bool skip_tlb_flush)
A
Avi Kivity 已提交
4186
{
4187
	if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
4188
		kvm_mmu_free_roots(vcpu, KVM_MMU_ROOT_CURRENT);
A
Avi Kivity 已提交
4189 4190
}

4191
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
4192
{
4193 4194
	__kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
			  skip_tlb_flush);
4195
}
4196
EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
4197

4198 4199
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
4200
	return kvm_read_cr3(vcpu);
4201 4202
}

4203 4204
static void inject_page_fault(struct kvm_vcpu *vcpu,
			      struct x86_exception *fault)
A
Avi Kivity 已提交
4205
{
4206
	vcpu->arch.mmu.inject_page_fault(vcpu, fault);
A
Avi Kivity 已提交
4207 4208
}

4209
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4210
			   unsigned access, int *nr_present)
4211 4212 4213 4214 4215 4216 4217 4218
{
	if (unlikely(is_mmio_spte(*sptep))) {
		if (gfn != get_mmio_spte_gfn(*sptep)) {
			mmu_spte_clear_no_track(sptep);
			return true;
		}

		(*nr_present)++;
4219
		mark_mmio_spte(vcpu, sptep, gfn, access);
4220 4221 4222 4223 4224 4225
		return true;
	}

	return false;
}

4226 4227
static inline bool is_last_gpte(struct kvm_mmu *mmu,
				unsigned level, unsigned gpte)
A
Avi Kivity 已提交
4228
{
4229 4230 4231 4232 4233 4234 4235
	/*
	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
	 * If it is clear, there are no large pages at this level, so clear
	 * PT_PAGE_SIZE_MASK in gpte if that is the case.
	 */
	gpte &= level - mmu->last_nonleaf_level;

4236 4237 4238 4239 4240 4241 4242
	/*
	 * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
	 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
	 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
	 */
	gpte |= level - PT_PAGE_TABLE_LEVEL - 1;

4243
	return gpte & PT_PAGE_SIZE_MASK;
A
Avi Kivity 已提交
4244 4245
}

4246 4247 4248 4249 4250
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
#undef PTTYPE

A
Avi Kivity 已提交
4251 4252 4253 4254 4255 4256 4257 4258
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

4259 4260 4261 4262
static void
__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
			struct rsvd_bits_validate *rsvd_check,
			int maxphyaddr, int level, bool nx, bool gbpages,
4263
			bool pse, bool amd)
4264 4265
{
	u64 exb_bit_rsvd = 0;
4266
	u64 gbpages_bit_rsvd = 0;
4267
	u64 nonleaf_bit8_rsvd = 0;
4268

4269
	rsvd_check->bad_mt_xwr = 0;
4270

4271
	if (!nx)
4272
		exb_bit_rsvd = rsvd_bits(63, 63);
4273
	if (!gbpages)
4274
		gbpages_bit_rsvd = rsvd_bits(7, 7);
4275 4276 4277 4278 4279

	/*
	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
	 * leaf entries) on AMD CPUs only.
	 */
4280
	if (amd)
4281 4282
		nonleaf_bit8_rsvd = rsvd_bits(8, 8);

4283
	switch (level) {
4284 4285
	case PT32_ROOT_LEVEL:
		/* no rsvd bits for 2 level 4K page table entries */
4286 4287 4288 4289
		rsvd_check->rsvd_bits_mask[0][1] = 0;
		rsvd_check->rsvd_bits_mask[0][0] = 0;
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4290

4291
		if (!pse) {
4292
			rsvd_check->rsvd_bits_mask[1][1] = 0;
4293 4294 4295
			break;
		}

4296 4297
		if (is_cpuid_PSE36())
			/* 36bits PSE 4MB page */
4298
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4299 4300
		else
			/* 32 bits PSE 4MB page */
4301
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4302 4303
		break;
	case PT32E_ROOT_LEVEL:
4304
		rsvd_check->rsvd_bits_mask[0][2] =
4305
			rsvd_bits(maxphyaddr, 63) |
4306
			rsvd_bits(5, 8) | rsvd_bits(1, 2);	/* PDPTE */
4307
		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4308
			rsvd_bits(maxphyaddr, 62);	/* PDE */
4309
		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
4310
			rsvd_bits(maxphyaddr, 62); 	/* PTE */
4311
		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4312 4313
			rsvd_bits(maxphyaddr, 62) |
			rsvd_bits(13, 20);		/* large page */
4314 4315
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4316
		break;
4317 4318 4319 4320 4321 4322
	case PT64_ROOT_5LEVEL:
		rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[1][4] =
			rsvd_check->rsvd_bits_mask[0][4];
4323
	case PT64_ROOT_4LEVEL:
4324 4325
		rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
4326
			rsvd_bits(maxphyaddr, 51);
4327 4328
		rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | gbpages_bit_rsvd |
4329
			rsvd_bits(maxphyaddr, 51);
4330 4331 4332 4333 4334 4335 4336
		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[1][3] =
			rsvd_check->rsvd_bits_mask[0][3];
		rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
4337
			gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
4338
			rsvd_bits(13, 29);
4339
		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4340 4341
			rsvd_bits(maxphyaddr, 51) |
			rsvd_bits(13, 20);		/* large page */
4342 4343
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4344 4345 4346 4347
		break;
	}
}

4348 4349 4350 4351 4352
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
{
	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
				cpuid_maxphyaddr(vcpu), context->root_level,
4353 4354
				context->nx,
				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4355
				is_pse(vcpu), guest_cpuid_is_amd(vcpu));
4356 4357
}

4358 4359 4360
static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
			    int maxphyaddr, bool execonly)
4361
{
4362
	u64 bad_mt_xwr;
4363

4364 4365
	rsvd_check->rsvd_bits_mask[0][4] =
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4366
	rsvd_check->rsvd_bits_mask[0][3] =
4367
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4368
	rsvd_check->rsvd_bits_mask[0][2] =
4369
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4370
	rsvd_check->rsvd_bits_mask[0][1] =
4371
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4372
	rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
4373 4374

	/* large page */
4375
	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4376 4377
	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
	rsvd_check->rsvd_bits_mask[1][2] =
4378
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
4379
	rsvd_check->rsvd_bits_mask[1][1] =
4380
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
4381
	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4382

4383 4384 4385 4386 4387 4388 4389 4390
	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
	if (!execonly) {
		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4391
	}
4392
	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4393 4394
}

4395 4396 4397 4398 4399 4400 4401
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
		struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
				    cpuid_maxphyaddr(vcpu), execonly);
}

4402 4403 4404 4405 4406 4407 4408 4409
/*
 * the page table on host is the shadow page table for the page
 * table in guest or amd nested guest, its mmu features completely
 * follow the features in guest.
 */
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
4410
	bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
4411 4412
	struct rsvd_bits_validate *shadow_zero_check;
	int i;
4413

4414 4415 4416 4417
	/*
	 * Passing "true" to the last argument is okay; it adds a check
	 * on bit 8 of the SPTEs which KVM doesn't use anyway.
	 */
4418 4419
	shadow_zero_check = &context->shadow_zero_check;
	__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4420
				boot_cpu_data.x86_phys_bits,
4421
				context->shadow_root_level, uses_nx,
4422 4423
				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
				is_pse(vcpu), true);
4424 4425 4426 4427 4428 4429 4430 4431 4432

	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}

4433 4434 4435
}
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);

4436 4437 4438 4439 4440 4441
static inline bool boot_cpu_is_amd(void)
{
	WARN_ON_ONCE(!tdp_enabled);
	return shadow_x_mask == 0;
}

4442 4443 4444 4445 4446 4447 4448 4449
/*
 * the direct page table on host, use as much mmu features as
 * possible, however, kvm currently does not do execution-protection.
 */
static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context)
{
4450 4451 4452 4453 4454
	struct rsvd_bits_validate *shadow_zero_check;
	int i;

	shadow_zero_check = &context->shadow_zero_check;

4455
	if (boot_cpu_is_amd())
4456
		__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4457 4458
					boot_cpu_data.x86_phys_bits,
					context->shadow_root_level, false,
4459 4460
					boot_cpu_has(X86_FEATURE_GBPAGES),
					true, true);
4461
	else
4462
		__reset_rsvds_bits_mask_ept(shadow_zero_check,
4463 4464 4465
					    boot_cpu_data.x86_phys_bits,
					    false);

4466 4467 4468 4469 4470 4471 4472
	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}
4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486
}

/*
 * as the comments in reset_shadow_zero_bits_mask() except it
 * is the shadow page table for intel nested guest.
 */
static void
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
				    boot_cpu_data.x86_phys_bits, execonly);
}

4487 4488 4489 4490 4491 4492 4493 4494 4495 4496
#define BYTE_MASK(access) \
	((1 & (access) ? 2 : 0) | \
	 (2 & (access) ? 4 : 0) | \
	 (3 & (access) ? 8 : 0) | \
	 (4 & (access) ? 16 : 0) | \
	 (5 & (access) ? 32 : 0) | \
	 (6 & (access) ? 64 : 0) | \
	 (7 & (access) ? 128 : 0))


4497 4498
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
				      struct kvm_mmu *mmu, bool ept)
4499
{
4500 4501 4502 4503 4504 4505 4506 4507 4508
	unsigned byte;

	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
	const u8 u = BYTE_MASK(ACC_USER_MASK);

	bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
	bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
	bool cr0_wp = is_write_protection(vcpu);
4509 4510

	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4511 4512
		unsigned pfec = byte << 1;

F
Feng Wu 已提交
4513
		/*
4514 4515
		 * Each "*f" variable has a 1 bit for each UWX value
		 * that causes a fault with the given PFEC.
F
Feng Wu 已提交
4516
		 */
4517

4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562
		/* Faults from writes to non-writable pages */
		u8 wf = (pfec & PFERR_WRITE_MASK) ? ~w : 0;
		/* Faults from user mode accesses to supervisor pages */
		u8 uf = (pfec & PFERR_USER_MASK) ? ~u : 0;
		/* Faults from fetches of non-executable pages*/
		u8 ff = (pfec & PFERR_FETCH_MASK) ? ~x : 0;
		/* Faults from kernel mode fetches of user pages */
		u8 smepf = 0;
		/* Faults from kernel mode accesses of user pages */
		u8 smapf = 0;

		if (!ept) {
			/* Faults from kernel mode accesses to user pages */
			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;

			/* Not really needed: !nx will cause pte.nx to fault */
			if (!mmu->nx)
				ff = 0;

			/* Allow supervisor writes if !cr0.wp */
			if (!cr0_wp)
				wf = (pfec & PFERR_USER_MASK) ? wf : 0;

			/* Disallow supervisor fetches of user code if cr4.smep */
			if (cr4_smep)
				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;

			/*
			 * SMAP:kernel-mode data accesses from user-mode
			 * mappings should fault. A fault is considered
			 * as a SMAP violation if all of the following
			 * conditions are ture:
			 *   - X86_CR4_SMAP is set in CR4
			 *   - A user page is accessed
			 *   - The access is not a fetch
			 *   - Page fault in kernel mode
			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
			 *
			 * Here, we cover the first three conditions.
			 * The fourth is computed dynamically in permission_fault();
			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
			 * *not* subject to SMAP restrictions.
			 */
			if (cr4_smap)
				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4563
		}
4564 4565

		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4566 4567 4568
	}
}

4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643
/*
* PKU is an additional mechanism by which the paging controls access to
* user-mode addresses based on the value in the PKRU register.  Protection
* key violations are reported through a bit in the page fault error code.
* Unlike other bits of the error code, the PK bit is not known at the
* call site of e.g. gva_to_gpa; it must be computed directly in
* permission_fault based on two bits of PKRU, on some machine state (CR4,
* CR0, EFER, CPL), and on other bits of the error code and the page tables.
*
* In particular the following conditions come from the error code, the
* page tables and the machine state:
* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
* - PK is always zero if U=0 in the page tables
* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
*
* The PKRU bitmask caches the result of these four conditions.  The error
* code (minus the P bit) and the page table's U bit form an index into the
* PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
* with the two bits of the PKRU register corresponding to the protection key.
* For the first three conditions above the bits will be 00, thus masking
* away both AD and WD.  For all reads or if the last condition holds, WD
* only will be masked away.
*/
static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
				bool ept)
{
	unsigned bit;
	bool wp;

	if (ept) {
		mmu->pkru_mask = 0;
		return;
	}

	/* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
	if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
		mmu->pkru_mask = 0;
		return;
	}

	wp = is_write_protection(vcpu);

	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
		unsigned pfec, pkey_bits;
		bool check_pkey, check_write, ff, uf, wf, pte_user;

		pfec = bit << 1;
		ff = pfec & PFERR_FETCH_MASK;
		uf = pfec & PFERR_USER_MASK;
		wf = pfec & PFERR_WRITE_MASK;

		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
		pte_user = pfec & PFERR_RSVD_MASK;

		/*
		 * Only need to check the access which is not an
		 * instruction fetch and is to a user page.
		 */
		check_pkey = (!ff && pte_user);
		/*
		 * write access is controlled by PKRU if it is a
		 * user access or CR0.WP = 1.
		 */
		check_write = check_pkey && wf && (uf || wp);

		/* PKRU.AD stops both read and write access. */
		pkey_bits = !!check_pkey;
		/* PKRU.WD stops write access. */
		pkey_bits |= (!!check_write) << 1;

		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
	}
}

4644
static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
A
Avi Kivity 已提交
4645
{
4646 4647 4648 4649 4650
	unsigned root_level = mmu->root_level;

	mmu->last_nonleaf_level = root_level;
	if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
		mmu->last_nonleaf_level++;
A
Avi Kivity 已提交
4651 4652
}

4653 4654 4655
static void paging64_init_context_common(struct kvm_vcpu *vcpu,
					 struct kvm_mmu *context,
					 int level)
A
Avi Kivity 已提交
4656
{
4657
	context->nx = is_nx(vcpu);
4658
	context->root_level = level;
4659

4660
	reset_rsvds_bits_mask(vcpu, context);
4661
	update_permission_bitmask(vcpu, context, false);
4662
	update_pkru_bitmask(vcpu, context, false);
4663
	update_last_nonleaf_level(vcpu, context);
A
Avi Kivity 已提交
4664

4665
	MMU_WARN_ON(!is_pae(vcpu));
A
Avi Kivity 已提交
4666 4667
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
4668
	context->sync_page = paging64_sync_page;
M
Marcelo Tosatti 已提交
4669
	context->invlpg = paging64_invlpg;
4670
	context->update_pte = paging64_update_pte;
4671
	context->shadow_root_level = level;
4672
	context->direct_map = false;
A
Avi Kivity 已提交
4673 4674
}

4675 4676
static void paging64_init_context(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
4677
{
4678 4679 4680 4681
	int root_level = is_la57_mode(vcpu) ?
			 PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;

	paging64_init_context_common(vcpu, context, root_level);
4682 4683
}

4684 4685
static void paging32_init_context(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
A
Avi Kivity 已提交
4686
{
4687
	context->nx = false;
4688
	context->root_level = PT32_ROOT_LEVEL;
4689

4690
	reset_rsvds_bits_mask(vcpu, context);
4691
	update_permission_bitmask(vcpu, context, false);
4692
	update_pkru_bitmask(vcpu, context, false);
4693
	update_last_nonleaf_level(vcpu, context);
A
Avi Kivity 已提交
4694 4695 4696

	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
4697
	context->sync_page = paging32_sync_page;
M
Marcelo Tosatti 已提交
4698
	context->invlpg = paging32_invlpg;
4699
	context->update_pte = paging32_update_pte;
A
Avi Kivity 已提交
4700
	context->shadow_root_level = PT32E_ROOT_LEVEL;
4701
	context->direct_map = false;
A
Avi Kivity 已提交
4702 4703
}

4704 4705
static void paging32E_init_context(struct kvm_vcpu *vcpu,
				   struct kvm_mmu *context)
A
Avi Kivity 已提交
4706
{
4707
	paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
4708 4709
}

4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724
static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
{
	union kvm_mmu_page_role role = {0};

	role.guest_mode = is_guest_mode(vcpu);
	role.smm = is_smm(vcpu);
	role.ad_disabled = (shadow_accessed_mask == 0);
	role.level = kvm_x86_ops->get_tdp_level(vcpu);
	role.direct = true;
	role.access = ACC_ALL;

	return role;
}

4725
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4726
{
4727
	struct kvm_mmu *context = &vcpu->arch.mmu;
4728

4729 4730
	context->base_role.word = mmu_base_role_mask.word &
				  kvm_calc_tdp_mmu_root_page_role(vcpu).word;
4731
	context->page_fault = tdp_page_fault;
4732
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
4733
	context->invlpg = nonpaging_invlpg;
4734
	context->update_pte = nonpaging_update_pte;
4735
	context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
4736
	context->direct_map = true;
4737
	context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
4738
	context->get_cr3 = get_cr3;
4739
	context->get_pdptr = kvm_pdptr_read;
4740
	context->inject_page_fault = kvm_inject_page_fault;
4741 4742

	if (!is_paging(vcpu)) {
4743
		context->nx = false;
4744 4745 4746
		context->gva_to_gpa = nonpaging_gva_to_gpa;
		context->root_level = 0;
	} else if (is_long_mode(vcpu)) {
4747
		context->nx = is_nx(vcpu);
4748 4749
		context->root_level = is_la57_mode(vcpu) ?
				PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4750 4751
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging64_gva_to_gpa;
4752
	} else if (is_pae(vcpu)) {
4753
		context->nx = is_nx(vcpu);
4754
		context->root_level = PT32E_ROOT_LEVEL;
4755 4756
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging64_gva_to_gpa;
4757
	} else {
4758
		context->nx = false;
4759
		context->root_level = PT32_ROOT_LEVEL;
4760 4761
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging32_gva_to_gpa;
4762 4763
	}

4764
	update_permission_bitmask(vcpu, context, false);
4765
	update_pkru_bitmask(vcpu, context, false);
4766
	update_last_nonleaf_level(vcpu, context);
4767
	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4768 4769
}

4770 4771
static union kvm_mmu_page_role
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
4772
{
4773
	union kvm_mmu_page_role role = {0};
4774
	bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
4775
	bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4776

4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798
	role.nxe = is_nx(vcpu);
	role.cr4_pae = !!is_pae(vcpu);
	role.cr0_wp  = is_write_protection(vcpu);
	role.smep_andnot_wp = smep && !is_write_protection(vcpu);
	role.smap_andnot_wp = smap && !is_write_protection(vcpu);
	role.guest_mode = is_guest_mode(vcpu);
	role.smm = is_smm(vcpu);
	role.direct = !is_paging(vcpu);
	role.access = ACC_ALL;

	if (!is_long_mode(vcpu))
		role.level = PT32E_ROOT_LEVEL;
	else if (is_la57_mode(vcpu))
		role.level = PT64_ROOT_5LEVEL;
	else
		role.level = PT64_ROOT_4LEVEL;

	return role;
}

void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
4799
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
4800 4801

	if (!is_paging(vcpu))
4802
		nonpaging_init_context(vcpu, context);
A
Avi Kivity 已提交
4803
	else if (is_long_mode(vcpu))
4804
		paging64_init_context(vcpu, context);
A
Avi Kivity 已提交
4805
	else if (is_pae(vcpu))
4806
		paging32E_init_context(vcpu, context);
A
Avi Kivity 已提交
4807
	else
4808
		paging32_init_context(vcpu, context);
4809

4810 4811
	context->base_role.word = mmu_base_role_mask.word &
				  kvm_calc_shadow_mmu_root_page_role(vcpu).word;
4812
	reset_shadow_zero_bits_mask(vcpu, context);
4813 4814 4815
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829
static union kvm_mmu_page_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
{
	union kvm_mmu_page_role role = vcpu->arch.mmu.base_role;

	role.level = PT64_ROOT_4LEVEL;
	role.direct = false;
	role.ad_disabled = !accessed_dirty;
	role.guest_mode = true;
	role.access = ACC_ALL;

	return role;
}

4830
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4831
			     bool accessed_dirty, gpa_t new_eptp)
N
Nadav Har'El 已提交
4832
{
4833
	struct kvm_mmu *context = &vcpu->arch.mmu;
4834 4835
	union kvm_mmu_page_role root_page_role =
		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty);
4836

4837
	__kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role, false);
4838
	context->shadow_root_level = PT64_ROOT_4LEVEL;
N
Nadav Har'El 已提交
4839 4840

	context->nx = true;
4841
	context->ept_ad = accessed_dirty;
N
Nadav Har'El 已提交
4842 4843 4844 4845 4846
	context->page_fault = ept_page_fault;
	context->gva_to_gpa = ept_gva_to_gpa;
	context->sync_page = ept_sync_page;
	context->invlpg = ept_invlpg;
	context->update_pte = ept_update_pte;
4847
	context->root_level = PT64_ROOT_4LEVEL;
N
Nadav Har'El 已提交
4848
	context->direct_map = false;
4849
	context->base_role.word = root_page_role.word & mmu_base_role_mask.word;
N
Nadav Har'El 已提交
4850
	update_permission_bitmask(vcpu, context, true);
4851
	update_pkru_bitmask(vcpu, context, true);
4852
	update_last_nonleaf_level(vcpu, context);
N
Nadav Har'El 已提交
4853
	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4854
	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
N
Nadav Har'El 已提交
4855 4856 4857
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

4858
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4859
{
4860 4861 4862 4863 4864 4865 4866
	struct kvm_mmu *context = &vcpu->arch.mmu;

	kvm_init_shadow_mmu(vcpu);
	context->set_cr3           = kvm_x86_ops->set_cr3;
	context->get_cr3           = get_cr3;
	context->get_pdptr         = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
A
Avi Kivity 已提交
4867 4868
}

4869
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4870 4871 4872 4873
{
	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;

	g_context->get_cr3           = get_cr3;
4874
	g_context->get_pdptr         = kvm_pdptr_read;
4875 4876 4877
	g_context->inject_page_fault = kvm_inject_page_fault;

	/*
4878 4879 4880 4881 4882 4883
	 * Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using
	 * L1's nested page tables (e.g. EPT12). The nested translation
	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
	 * L2's page tables as the first level of translation and L1's
	 * nested page tables as the second level of translation. Basically
	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4884 4885
	 */
	if (!is_paging(vcpu)) {
4886
		g_context->nx = false;
4887 4888 4889
		g_context->root_level = 0;
		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
	} else if (is_long_mode(vcpu)) {
4890
		g_context->nx = is_nx(vcpu);
4891 4892
		g_context->root_level = is_la57_mode(vcpu) ?
					PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4893
		reset_rsvds_bits_mask(vcpu, g_context);
4894 4895
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
	} else if (is_pae(vcpu)) {
4896
		g_context->nx = is_nx(vcpu);
4897
		g_context->root_level = PT32E_ROOT_LEVEL;
4898
		reset_rsvds_bits_mask(vcpu, g_context);
4899 4900
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
	} else {
4901
		g_context->nx = false;
4902
		g_context->root_level = PT32_ROOT_LEVEL;
4903
		reset_rsvds_bits_mask(vcpu, g_context);
4904 4905 4906
		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
	}

4907
	update_permission_bitmask(vcpu, g_context, false);
4908
	update_pkru_bitmask(vcpu, g_context, false);
4909
	update_last_nonleaf_level(vcpu, g_context);
4910 4911
}

4912
void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
4913
{
4914
	if (reset_roots) {
4915 4916
		uint i;

4917
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
4918 4919 4920

		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
			vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
4921 4922
	}

4923
	if (mmu_is_nested(vcpu))
4924
		init_kvm_nested_mmu(vcpu);
4925
	else if (tdp_enabled)
4926
		init_kvm_tdp_mmu(vcpu);
4927
	else
4928
		init_kvm_softmmu(vcpu);
4929
}
4930
EXPORT_SYMBOL_GPL(kvm_init_mmu);
4931

4932 4933 4934 4935 4936 4937 4938 4939
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
	if (tdp_enabled)
		return kvm_calc_tdp_mmu_root_page_role(vcpu);
	else
		return kvm_calc_shadow_mmu_root_page_role(vcpu);
}
4940

4941
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
4942
{
4943
	kvm_mmu_unload(vcpu);
4944
	kvm_init_mmu(vcpu, true);
A
Avi Kivity 已提交
4945
}
4946
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
4947 4948

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
4949
{
4950 4951
	int r;

4952
	r = mmu_topup_memory_caches(vcpu);
A
Avi Kivity 已提交
4953 4954
	if (r)
		goto out;
4955
	r = mmu_alloc_roots(vcpu);
4956
	kvm_mmu_sync_roots(vcpu);
4957 4958
	if (r)
		goto out;
4959
	kvm_mmu_load_cr3(vcpu);
4960
	kvm_x86_ops->tlb_flush(vcpu, true);
4961 4962
out:
	return r;
A
Avi Kivity 已提交
4963
}
A
Avi Kivity 已提交
4964 4965 4966 4967
EXPORT_SYMBOL_GPL(kvm_mmu_load);

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
4968
	kvm_mmu_free_roots(vcpu, KVM_MMU_ROOTS_ALL);
4969
	WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
4970
}
4971
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
A
Avi Kivity 已提交
4972

4973
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4974 4975
				  struct kvm_mmu_page *sp, u64 *spte,
				  const void *new)
4976
{
4977
	if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
4978 4979
		++vcpu->kvm->stat.mmu_pde_zapped;
		return;
4980
        }
4981

A
Avi Kivity 已提交
4982
	++vcpu->kvm->stat.mmu_pte_updated;
4983
	vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
4984 4985
}

4986 4987 4988 4989 4990 4991 4992 4993
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
4994 4995
	old ^= shadow_nx_mask;
	new ^= shadow_nx_mask;
4996 4997 4998
	return (old & ~new & PT64_PERM_MASK) != 0;
}

4999 5000
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
				    const u8 *new, int *bytes)
5001
{
5002 5003
	u64 gentry;
	int r;
5004 5005 5006

	/*
	 * Assume that the pte write on a page table of the same type
5007 5008
	 * as the current vcpu paging mode since we update the sptes only
	 * when they have the same mode.
5009
	 */
5010
	if (is_pae(vcpu) && *bytes == 4) {
5011
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5012 5013
		*gpa &= ~(gpa_t)7;
		*bytes = 8;
5014
		r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
5015 5016
		if (r)
			gentry = 0;
5017 5018 5019
		new = (const u8 *)&gentry;
	}

5020
	switch (*bytes) {
5021 5022 5023 5024 5025 5026 5027 5028 5029
	case 4:
		gentry = *(const u32 *)new;
		break;
	case 8:
		gentry = *(const u64 *)new;
		break;
	default:
		gentry = 0;
		break;
5030 5031
	}

5032 5033 5034 5035 5036 5037 5038
	return gentry;
}

/*
 * If we're seeing too many writes to a page, it may no longer be a page table,
 * or we may be forking, in which case it is better to unmap the page.
 */
5039
static bool detect_write_flooding(struct kvm_mmu_page *sp)
5040
{
5041 5042 5043 5044
	/*
	 * Skip write-flooding detected for the sp whose level is 1, because
	 * it can become unsync, then the guest page is not write-protected.
	 */
5045
	if (sp->role.level == PT_PAGE_TABLE_LEVEL)
5046
		return false;
5047

5048 5049
	atomic_inc(&sp->write_flooding_count);
	return atomic_read(&sp->write_flooding_count) >= 3;
5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065
}

/*
 * Misaligned accesses are too much trouble to fix up; also, they usually
 * indicate a page is not used as a page table.
 */
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
				    int bytes)
{
	unsigned offset, pte_size, misaligned;

	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
		 gpa, bytes, sp->role.word);

	offset = offset_in_page(gpa);
	pte_size = sp->role.cr4_pae ? 8 : 4;
5066 5067 5068 5069 5070 5071 5072 5073

	/*
	 * Sometimes, the OS only writes the last one bytes to update status
	 * bits, for example, in linux, andb instruction is used in clear_bit().
	 */
	if (!(offset & (pte_size - 1)) && bytes == 1)
		return false;

5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110
	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
	misaligned |= bytes < 4;

	return misaligned;
}

static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{
	unsigned page_offset, quadrant;
	u64 *spte;
	int level;

	page_offset = offset_in_page(gpa);
	level = sp->role.level;
	*nspte = 1;
	if (!sp->role.cr4_pae) {
		page_offset <<= 1;	/* 32->64 */
		/*
		 * A 32-bit pde maps 4MB while the shadow pdes map
		 * only 2MB.  So we need to double the offset again
		 * and zap two pdes instead of one.
		 */
		if (level == PT32_ROOT_LEVEL) {
			page_offset &= ~7; /* kill rounding error */
			page_offset <<= 1;
			*nspte = 2;
		}
		quadrant = page_offset >> PAGE_SHIFT;
		page_offset &= ~PAGE_MASK;
		if (quadrant != sp->role.quadrant)
			return NULL;
	}

	spte = &sp->spt[page_offset / sizeof(*spte)];
	return spte;
}

5111
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5112 5113
			      const u8 *new, int bytes,
			      struct kvm_page_track_notifier_node *node)
5114 5115 5116 5117 5118 5119
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_mmu_page *sp;
	LIST_HEAD(invalid_list);
	u64 entry, gentry, *spte;
	int npte;
5120
	bool remote_flush, local_flush;
5121 5122 5123 5124 5125

	/*
	 * If we don't have indirect shadow pages, it means no page is
	 * write-protected, so we can exit simply.
	 */
5126
	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5127 5128
		return;

5129
	remote_flush = local_flush = false;
5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143

	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);

	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);

	/*
	 * No need to care whether allocation memory is successful
	 * or not since pte prefetch is skiped if it does not have
	 * enough objects in the cache.
	 */
	mmu_topup_memory_caches(vcpu);

	spin_lock(&vcpu->kvm->mmu_lock);
	++vcpu->kvm->stat.mmu_pte_write;
5144
	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5145

5146
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5147
		if (detect_write_misaligned(sp, gpa, bytes) ||
5148
		      detect_write_flooding(sp)) {
5149
			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
A
Avi Kivity 已提交
5150
			++vcpu->kvm->stat.mmu_flooded;
5151 5152
			continue;
		}
5153 5154 5155 5156 5157

		spte = get_written_sptes(sp, gpa, &npte);
		if (!spte)
			continue;

5158
		local_flush = true;
5159
		while (npte--) {
5160
			entry = *spte;
5161
			mmu_page_zap_pte(vcpu->kvm, sp, spte);
5162 5163
			if (gentry &&
			      !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
5164
			      & mmu_base_role_mask.word) && rmap_can_add(vcpu))
5165
				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
G
Gleb Natapov 已提交
5166
			if (need_remote_flush(entry, *spte))
5167
				remote_flush = true;
5168
			++spte;
5169 5170
		}
	}
5171
	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5172
	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5173
	spin_unlock(&vcpu->kvm->mmu_lock);
5174 5175
}

5176 5177
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
5178 5179
	gpa_t gpa;
	int r;
5180

5181
	if (vcpu->arch.mmu.direct_map)
5182 5183
		return 0;

5184
	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
5185 5186

	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
5187

5188
	return r;
5189
}
5190
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
5191

5192
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5193
{
5194
	LIST_HEAD(invalid_list);
5195

5196
	if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
5197
		return 0;
5198

5199 5200 5201
	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
		if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
			break;
A
Avi Kivity 已提交
5202

A
Avi Kivity 已提交
5203
		++vcpu->kvm->stat.mmu_recycled;
A
Avi Kivity 已提交
5204
	}
5205
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
5206 5207 5208 5209

	if (!kvm_mmu_available_pages(vcpu->kvm))
		return -ENOSPC;
	return 0;
A
Avi Kivity 已提交
5210 5211
}

5212
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
5213
		       void *insn, int insn_len)
5214
{
5215
	int r, emulation_type = 0;
5216
	enum emulation_result er;
5217
	bool direct = vcpu->arch.mmu.direct_map;
5218

5219 5220 5221 5222 5223
	/* With shadow page tables, fault_address contains a GVA or nGPA.  */
	if (vcpu->arch.mmu.direct_map) {
		vcpu->arch.gpa_available = true;
		vcpu->arch.gpa_val = cr2;
	}
5224

5225
	r = RET_PF_INVALID;
5226 5227
	if (unlikely(error_code & PFERR_RSVD_MASK)) {
		r = handle_mmio_page_fault(vcpu, cr2, direct);
5228
		if (r == RET_PF_EMULATE)
5229 5230
			goto emulate;
	}
5231

5232 5233 5234 5235 5236 5237 5238 5239
	if (r == RET_PF_INVALID) {
		r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
					      false);
		WARN_ON(r == RET_PF_INVALID);
	}

	if (r == RET_PF_RETRY)
		return 1;
5240
	if (r < 0)
5241
		return r;
5242

5243 5244 5245 5246 5247 5248 5249
	/*
	 * Before emulating the instruction, check if the error code
	 * was due to a RO violation while translating the guest page.
	 * This can occur when using nested virtualization with nested
	 * paging in both guests. If true, we simply unprotect the page
	 * and resume the guest.
	 */
5250
	if (vcpu->arch.mmu.direct_map &&
5251
	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5252 5253 5254 5255
		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
		return 1;
	}

5256 5257 5258 5259 5260 5261
	/*
	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
	 * optimistically try to just unprotect the page and let the processor
	 * re-execute the instruction that caused the page fault.  Do not allow
	 * retrying MMIO emulation, as it's not only pointless but could also
	 * cause us to enter an infinite loop because the processor will keep
5262 5263 5264 5265
	 * faulting on the non-existent MMIO address.  Retrying an instruction
	 * from a nested guest is also pointless and dangerous as we are only
	 * explicitly shadowing L1's page tables, i.e. unprotecting something
	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5266
	 */
5267
	if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
5268
		emulation_type = EMULTYPE_ALLOW_RETRY;
5269
emulate:
5270 5271 5272 5273 5274 5275 5276 5277 5278 5279
	/*
	 * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
	 * This can happen if a guest gets a page-fault on data access but the HW
	 * table walker is not able to read the instruction page (e.g instruction
	 * page is not present in memory). In those cases we simply restart the
	 * guest.
	 */
	if (unlikely(insn && !insn_len))
		return 1;

5280
	er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
5281 5282 5283 5284

	switch (er) {
	case EMULATE_DONE:
		return 1;
P
Paolo Bonzini 已提交
5285
	case EMULATE_USER_EXIT:
5286
		++vcpu->stat.mmio_exits;
5287
		/* fall through */
5288
	case EMULATE_FAIL:
5289
		return 0;
5290 5291 5292 5293 5294 5295
	default:
		BUG();
	}
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

M
Marcelo Tosatti 已提交
5296 5297
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
5298
	struct kvm_mmu *mmu = &vcpu->arch.mmu;
5299
	int i;
5300

5301 5302 5303 5304
	/* INVLPG on a * non-canonical address is a NOP according to the SDM.  */
	if (is_noncanonical_address(gva, vcpu))
		return;

5305
	mmu->invlpg(vcpu, gva, mmu->root_hpa);
5306 5307 5308 5309

	/*
	 * INVLPG is required to invalidate any global mappings for the VA,
	 * irrespective of PCID. Since it would take us roughly similar amount
5310 5311 5312
	 * of work to determine whether any of the prev_root mappings of the VA
	 * is marked global, or to just sync it blindly, so we might as well
	 * just always sync it.
5313
	 *
5314 5315 5316
	 * Mappings not reachable via the current cr3 or the prev_roots will be
	 * synced when switching to that cr3, so nothing needs to be done here
	 * for them.
5317
	 */
5318 5319 5320
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (VALID_PAGE(mmu->prev_roots[i].hpa))
			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5321

5322
	kvm_x86_ops->tlb_flush_gva(vcpu, gva);
M
Marcelo Tosatti 已提交
5323 5324 5325 5326
	++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

5327 5328 5329
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
	struct kvm_mmu *mmu = &vcpu->arch.mmu;
5330
	bool tlb_flush = false;
5331
	uint i;
5332 5333

	if (pcid == kvm_get_active_pcid(vcpu)) {
5334
		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5335
		tlb_flush = true;
5336 5337
	}

5338 5339 5340 5341 5342 5343
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) {
			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
			tlb_flush = true;
		}
5344
	}
5345

5346 5347 5348
	if (tlb_flush)
		kvm_x86_ops->tlb_flush_gva(vcpu, gva);

5349 5350 5351
	++vcpu->stat.invlpg;

	/*
5352 5353 5354
	 * Mappings not reachable via the current cr3 or the prev_roots will be
	 * synced when switching to that cr3, so nothing needs to be done here
	 * for them.
5355 5356 5357 5358
	 */
}
EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);

5359 5360 5361 5362 5363 5364
void kvm_enable_tdp(void)
{
	tdp_enabled = true;
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);

5365 5366 5367 5368 5369 5370
void kvm_disable_tdp(void)
{
	tdp_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_tdp);

A
Avi Kivity 已提交
5371 5372
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
5373
	free_page((unsigned long)vcpu->arch.mmu.pae_root);
5374
	free_page((unsigned long)vcpu->arch.mmu.lm_root);
A
Avi Kivity 已提交
5375 5376 5377 5378
}

static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
5379
	struct page *page;
A
Avi Kivity 已提交
5380 5381
	int i;

5382 5383 5384
	if (tdp_enabled)
		return 0;

5385 5386 5387 5388 5389 5390 5391
	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.
	 */
	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
	if (!page)
5392 5393
		return -ENOMEM;

5394
	vcpu->arch.mmu.pae_root = page_address(page);
5395
	for (i = 0; i < 4; ++i)
5396
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
5397

A
Avi Kivity 已提交
5398 5399 5400
	return 0;
}

5401
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5402
{
5403 5404
	uint i;

5405 5406 5407 5408
	vcpu->arch.walk_mmu = &vcpu->arch.mmu;
	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
	vcpu->arch.mmu.translate_gpa = translate_gpa;
	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
A
Avi Kivity 已提交
5409

5410 5411 5412
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;

5413 5414
	return alloc_mmu_pages(vcpu);
}
A
Avi Kivity 已提交
5415

5416
void kvm_mmu_setup(struct kvm_vcpu *vcpu)
5417
{
5418
	MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
5419

5420
	kvm_init_mmu(vcpu, true);
A
Avi Kivity 已提交
5421 5422
}

5423
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5424 5425
			struct kvm_memory_slot *slot,
			struct kvm_page_track_notifier_node *node)
5426 5427 5428 5429
{
	kvm_mmu_invalidate_zap_all_pages(kvm);
}

5430 5431 5432 5433 5434
void kvm_mmu_init_vm(struct kvm *kvm)
{
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;

	node->track_write = kvm_mmu_pte_write;
5435
	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5436 5437 5438 5439 5440 5441 5442 5443 5444 5445
	kvm_page_track_register_notifier(kvm, node);
}

void kvm_mmu_uninit_vm(struct kvm *kvm)
{
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;

	kvm_page_track_unregister_notifier(kvm, node);
}

5446
/* The return value indicates if tlb flush on all vcpus is needed. */
5447
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
5448 5449

/* The caller should hold mmu-lock before calling this function. */
5450
static __always_inline bool
5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
			slot_level_handler fn, int start_level, int end_level,
			gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
{
	struct slot_rmap_walk_iterator iterator;
	bool flush = false;

	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
			end_gfn, &iterator) {
		if (iterator.rmap)
			flush |= fn(kvm, iterator.rmap);

		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
			if (flush && lock_flush_tlb) {
				kvm_flush_remote_tlbs(kvm);
				flush = false;
			}
			cond_resched_lock(&kvm->mmu_lock);
		}
	}

	if (flush && lock_flush_tlb) {
		kvm_flush_remote_tlbs(kvm);
		flush = false;
	}

	return flush;
}

5480
static __always_inline bool
5481 5482 5483 5484 5485 5486 5487 5488 5489 5490
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
		  slot_level_handler fn, int start_level, int end_level,
		  bool lock_flush_tlb)
{
	return slot_handle_level_range(kvm, memslot, fn, start_level,
			end_level, memslot->base_gfn,
			memslot->base_gfn + memslot->npages - 1,
			lock_flush_tlb);
}

5491
static __always_inline bool
5492 5493 5494 5495 5496 5497 5498
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
		      slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
				 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}

5499
static __always_inline bool
5500 5501 5502 5503 5504 5505 5506
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
			slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
				 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}

5507
static __always_inline bool
5508 5509 5510 5511 5512 5513 5514
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
		 slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
				 PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
}

X
Xiao Guangrong 已提交
5515 5516 5517 5518
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
5519
	int i;
X
Xiao Guangrong 已提交
5520 5521

	spin_lock(&kvm->mmu_lock);
5522 5523 5524 5525 5526 5527 5528 5529 5530
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
		kvm_for_each_memslot(memslot, slots) {
			gfn_t start, end;

			start = max(gfn_start, memslot->base_gfn);
			end = min(gfn_end, memslot->base_gfn + memslot->npages);
			if (start >= end)
				continue;
X
Xiao Guangrong 已提交
5531

5532 5533 5534 5535
			slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
						PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
						start, end - 1, true);
		}
X
Xiao Guangrong 已提交
5536 5537 5538 5539 5540
	}

	spin_unlock(&kvm->mmu_lock);
}

5541 5542
static bool slot_rmap_write_protect(struct kvm *kvm,
				    struct kvm_rmap_head *rmap_head)
5543
{
5544
	return __rmap_write_protect(kvm, rmap_head, false);
5545 5546
}

5547 5548
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
				      struct kvm_memory_slot *memslot)
A
Avi Kivity 已提交
5549
{
5550
	bool flush;
A
Avi Kivity 已提交
5551

5552
	spin_lock(&kvm->mmu_lock);
5553 5554
	flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
				      false);
5555
	spin_unlock(&kvm->mmu_lock);
5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574

	/*
	 * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log()
	 * which do tlb flush out of mmu-lock should be serialized by
	 * kvm->slots_lock otherwise tlb flush would be missed.
	 */
	lockdep_assert_held(&kvm->slots_lock);

	/*
	 * We can flush all the TLBs out of the mmu lock without TLB
	 * corruption since we just change the spte from writable to
	 * readonly so that we only need to care the case of changing
	 * spte from present to present (changing the spte from present
	 * to nonpresent will flush all the TLBs immediately), in other
	 * words, the only case we care is mmu_spte_update() where we
	 * haved checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
	 * instead of PT_WRITABLE_MASK, that means it does not depend
	 * on PT_WRITABLE_MASK anymore.
	 */
5575 5576
	if (flush)
		kvm_flush_remote_tlbs(kvm);
A
Avi Kivity 已提交
5577
}
5578

5579
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5580
					 struct kvm_rmap_head *rmap_head)
5581 5582 5583 5584
{
	u64 *sptep;
	struct rmap_iterator iter;
	int need_tlb_flush = 0;
D
Dan Williams 已提交
5585
	kvm_pfn_t pfn;
5586 5587
	struct kvm_mmu_page *sp;

5588
restart:
5589
	for_each_rmap_spte(rmap_head, &iter, sptep) {
5590 5591 5592 5593
		sp = page_header(__pa(sptep));
		pfn = spte_to_pfn(*sptep);

		/*
5594 5595 5596 5597 5598
		 * We cannot do huge page mapping for indirect shadow pages,
		 * which are found on the last rmap (level = 1) when not using
		 * tdp; such shadow pages are synced with the page table in
		 * the guest, and the guest page table is using 4K page size
		 * mapping if the indirect sp has level = 1.
5599 5600 5601
		 */
		if (sp->role.direct &&
			!kvm_is_reserved_pfn(pfn) &&
5602
			PageTransCompoundMap(pfn_to_page(pfn))) {
5603 5604
			drop_spte(kvm, sptep);
			need_tlb_flush = 1;
5605 5606
			goto restart;
		}
5607 5608 5609 5610 5611 5612
	}

	return need_tlb_flush;
}

void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5613
				   const struct kvm_memory_slot *memslot)
5614
{
5615
	/* FIXME: const-ify all uses of struct kvm_memory_slot.  */
5616
	spin_lock(&kvm->mmu_lock);
5617 5618
	slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
			 kvm_mmu_zap_collapsible_spte, true);
5619 5620 5621
	spin_unlock(&kvm->mmu_lock);
}

5622 5623 5624
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
				   struct kvm_memory_slot *memslot)
{
5625
	bool flush;
5626 5627

	spin_lock(&kvm->mmu_lock);
5628
	flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646
	spin_unlock(&kvm->mmu_lock);

	lockdep_assert_held(&kvm->slots_lock);

	/*
	 * It's also safe to flush TLBs out of mmu lock here as currently this
	 * function is only used for dirty logging, in which case flushing TLB
	 * out of mmu lock also guarantees no dirty pages will be lost in
	 * dirty_bitmap.
	 */
	if (flush)
		kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);

void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
					struct kvm_memory_slot *memslot)
{
5647
	bool flush;
5648 5649

	spin_lock(&kvm->mmu_lock);
5650 5651
	flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
					false);
5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664
	spin_unlock(&kvm->mmu_lock);

	/* see kvm_mmu_slot_remove_write_access */
	lockdep_assert_held(&kvm->slots_lock);

	if (flush)
		kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);

void kvm_mmu_slot_set_dirty(struct kvm *kvm,
			    struct kvm_memory_slot *memslot)
{
5665
	bool flush;
5666 5667

	spin_lock(&kvm->mmu_lock);
5668
	flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
5669 5670 5671 5672 5673 5674 5675 5676 5677 5678
	spin_unlock(&kvm->mmu_lock);

	lockdep_assert_held(&kvm->slots_lock);

	/* see kvm_mmu_slot_leaf_clear_dirty */
	if (flush)
		kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);

X
Xiao Guangrong 已提交
5679
#define BATCH_ZAP_PAGES	10
5680 5681 5682
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
	struct kvm_mmu_page *sp, *node;
X
Xiao Guangrong 已提交
5683
	int batch = 0;
5684 5685 5686 5687

restart:
	list_for_each_entry_safe_reverse(sp, node,
	      &kvm->arch.active_mmu_pages, link) {
X
Xiao Guangrong 已提交
5688 5689
		int ret;

5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704
		/*
		 * No obsolete page exists before new created page since
		 * active_mmu_pages is the FIFO list.
		 */
		if (!is_obsolete_sp(kvm, sp))
			break;

		/*
		 * Since we are reversely walking the list and the invalid
		 * list will be moved to the head, skip the invalid page
		 * can help us to avoid the infinity list walking.
		 */
		if (sp->role.invalid)
			continue;

5705 5706 5707 5708
		/*
		 * Need not flush tlb since we only zap the sp with invalid
		 * generation number.
		 */
X
Xiao Guangrong 已提交
5709
		if (batch >= BATCH_ZAP_PAGES &&
5710
		      cond_resched_lock(&kvm->mmu_lock)) {
X
Xiao Guangrong 已提交
5711
			batch = 0;
5712 5713 5714
			goto restart;
		}

5715 5716
		ret = kvm_mmu_prepare_zap_page(kvm, sp,
				&kvm->arch.zapped_obsolete_pages);
X
Xiao Guangrong 已提交
5717 5718 5719
		batch += ret;

		if (ret)
5720 5721 5722
			goto restart;
	}

5723 5724 5725 5726
	/*
	 * Should flush tlb before free page tables since lockless-walking
	 * may use the pages.
	 */
5727
	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741
}

/*
 * Fast invalidate all shadow pages and use lock-break technique
 * to zap obsolete pages.
 *
 * It's required when memslot is being deleted or VM is being
 * destroyed, in these cases, we should ensure that KVM MMU does
 * not use any resource of the being-deleted slot or all slots
 * after calling the function.
 */
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
{
	spin_lock(&kvm->mmu_lock);
5742
	trace_kvm_mmu_invalidate_zap_all_pages(kvm);
5743 5744
	kvm->arch.mmu_valid_gen++;

5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755
	/*
	 * Notify all vcpus to reload its shadow page table
	 * and flush TLB. Then all vcpus will switch to new
	 * shadow page table with the new mmu_valid_gen.
	 *
	 * Note: we should do this under the protection of
	 * mmu-lock, otherwise, vcpu would purge shadow page
	 * but miss tlb flush.
	 */
	kvm_reload_remote_mmus(kvm);

5756 5757 5758 5759
	kvm_zap_obsolete_pages(kvm);
	spin_unlock(&kvm->mmu_lock);
}

5760 5761 5762 5763 5764
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
{
	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}

5765
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
5766 5767 5768 5769 5770
{
	/*
	 * The very rare case: if the generation-number is round,
	 * zap all shadow pages.
	 */
5771
	if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
5772
		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5773
		kvm_mmu_invalidate_zap_all_pages(kvm);
5774
	}
5775 5776
}

5777 5778
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5779 5780
{
	struct kvm *kvm;
5781
	int nr_to_scan = sc->nr_to_scan;
5782
	unsigned long freed = 0;
5783

5784
	spin_lock(&kvm_lock);
5785 5786

	list_for_each_entry(kvm, &vm_list, vm_list) {
5787
		int idx;
5788
		LIST_HEAD(invalid_list);
5789

5790 5791 5792 5793 5794 5795 5796 5797
		/*
		 * Never scan more than sc->nr_to_scan VM instances.
		 * Will not hit this condition practically since we do not try
		 * to shrink more than one VM and it is very unlikely to see
		 * !n_used_mmu_pages so many times.
		 */
		if (!nr_to_scan--)
			break;
5798 5799 5800 5801 5802 5803
		/*
		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
		 * here. We may skip a VM instance errorneosly, but we do not
		 * want to shrink a VM that only started to populate its MMU
		 * anyway.
		 */
5804 5805
		if (!kvm->arch.n_used_mmu_pages &&
		      !kvm_has_zapped_obsolete_pages(kvm))
5806 5807
			continue;

5808
		idx = srcu_read_lock(&kvm->srcu);
5809 5810
		spin_lock(&kvm->mmu_lock);

5811 5812 5813 5814 5815 5816
		if (kvm_has_zapped_obsolete_pages(kvm)) {
			kvm_mmu_commit_zap_page(kvm,
			      &kvm->arch.zapped_obsolete_pages);
			goto unlock;
		}

5817 5818
		if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
			freed++;
5819
		kvm_mmu_commit_zap_page(kvm, &invalid_list);
5820

5821
unlock:
5822
		spin_unlock(&kvm->mmu_lock);
5823
		srcu_read_unlock(&kvm->srcu, idx);
5824

5825 5826 5827 5828 5829
		/*
		 * unfair on small ones
		 * per-vm shrinkers cry out
		 * sadness comes quickly
		 */
5830 5831
		list_move_tail(&kvm->vm_list, &vm_list);
		break;
5832 5833
	}

5834
	spin_unlock(&kvm_lock);
5835 5836 5837 5838 5839 5840
	return freed;
}

static unsigned long
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
5841
	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5842 5843 5844
}

static struct shrinker mmu_shrinker = {
5845 5846
	.count_objects = mmu_shrink_count,
	.scan_objects = mmu_shrink_scan,
5847 5848 5849
	.seeks = DEFAULT_SEEKS * 10,
};

I
Ingo Molnar 已提交
5850
static void mmu_destroy_caches(void)
5851
{
5852 5853
	kmem_cache_destroy(pte_list_desc_cache);
	kmem_cache_destroy(mmu_page_header_cache);
5854 5855 5856 5857
}

int kvm_mmu_module_init(void)
{
5858 5859
	int ret = -ENOMEM;

5860
	kvm_mmu_reset_all_pte_masks();
5861

5862 5863
	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
					    sizeof(struct pte_list_desc),
5864
					    0, SLAB_ACCOUNT, NULL);
5865
	if (!pte_list_desc_cache)
5866
		goto out;
5867

5868 5869
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
5870
						  0, SLAB_ACCOUNT, NULL);
5871
	if (!mmu_page_header_cache)
5872
		goto out;
5873

5874
	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
5875
		goto out;
5876

5877 5878 5879
	ret = register_shrinker(&mmu_shrinker);
	if (ret)
		goto out;
5880

5881 5882
	return 0;

5883
out:
5884
	mmu_destroy_caches();
5885
	return ret;
5886 5887
}

5888 5889 5890 5891 5892 5893 5894
/*
 * Caculate mmu pages needed for kvm.
 */
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
	unsigned int nr_mmu_pages;
	unsigned int  nr_pages = 0;
5895
	struct kvm_memslots *slots;
5896
	struct kvm_memory_slot *memslot;
5897
	int i;
5898

5899 5900
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
5901

5902 5903 5904
		kvm_for_each_memslot(memslot, slots)
			nr_pages += memslot->npages;
	}
5905 5906 5907

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
	nr_mmu_pages = max(nr_mmu_pages,
5908
			   (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
5909 5910 5911 5912

	return nr_mmu_pages;
}

5913 5914
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
5915
	kvm_mmu_unload(vcpu);
5916 5917
	free_mmu_pages(vcpu);
	mmu_free_memory_caches(vcpu);
5918 5919 5920 5921 5922 5923 5924
}

void kvm_mmu_module_exit(void)
{
	mmu_destroy_caches();
	percpu_counter_destroy(&kvm_total_used_mmu_pages);
	unregister_shrinker(&mmu_shrinker);
5925 5926
	mmu_audit_disable();
}