mmu.c 169.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
A
Avi Kivity 已提交
2 3 4 5 6 7 8 9 10
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
11
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
12 13 14 15 16
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 */
A
Avi Kivity 已提交
17

18
#include "irq.h"
19
#include "mmu.h"
20
#include "x86.h"
A
Avi Kivity 已提交
21
#include "kvm_cache_regs.h"
22
#include "cpuid.h"
A
Avi Kivity 已提交
23

24
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
25 26 27 28
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
29 30
#include <linux/moduleparam.h>
#include <linux/export.h>
31
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
32
#include <linux/hugetlb.h>
33
#include <linux/compiler.h>
34
#include <linux/srcu.h>
35
#include <linux/slab.h>
36
#include <linux/sched/signal.h>
37
#include <linux/uaccess.h>
38
#include <linux/hash.h>
39
#include <linux/kern_levels.h>
40
#include <linux/kthread.h>
A
Avi Kivity 已提交
41

A
Avi Kivity 已提交
42
#include <asm/page.h>
43
#include <asm/pat.h>
A
Avi Kivity 已提交
44
#include <asm/cmpxchg.h>
45
#include <asm/e820/api.h>
46
#include <asm/io.h>
47
#include <asm/vmx.h>
48
#include <asm/kvm_page_track.h>
49
#include "trace.h"
A
Avi Kivity 已提交
50

P
Paolo Bonzini 已提交
51 52 53
extern bool itlb_multihit_kvm_mitigation;

static int __read_mostly nx_huge_pages = -1;
54 55 56 57
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
#else
58
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
59
#endif
P
Paolo Bonzini 已提交
60 61

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
62
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
P
Paolo Bonzini 已提交
63 64 65 66 67 68

static struct kernel_param_ops nx_huge_pages_ops = {
	.set = set_nx_huge_pages,
	.get = param_get_bool,
};

69 70 71 72 73
static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
	.set = set_nx_huge_pages_recovery_ratio,
	.get = param_get_uint,
};

P
Paolo Bonzini 已提交
74 75
module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
__MODULE_PARM_TYPE(nx_huge_pages, "bool");
76 77 78
module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
		&nx_huge_pages_recovery_ratio, 0644);
__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
P
Paolo Bonzini 已提交
79

80 81 82 83 84 85 86
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
87
bool tdp_enabled = false;
88

89 90 91 92
enum {
	AUDIT_PRE_PAGE_FAULT,
	AUDIT_POST_PAGE_FAULT,
	AUDIT_PRE_PTE_WRITE,
93 94 95
	AUDIT_POST_PTE_WRITE,
	AUDIT_PRE_SYNC,
	AUDIT_POST_SYNC
96
};
97

98
#undef MMU_DEBUG
99 100

#ifdef MMU_DEBUG
101 102
static bool dbg = 0;
module_param(dbg, bool, 0644);
103 104 105

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
106
#define MMU_WARN_ON(x) WARN_ON(x)
107 108 109
#else
#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)
110
#define MMU_WARN_ON(x) do { } while (0)
111
#endif
A
Avi Kivity 已提交
112

113 114
#define PTE_PREFETCH_NUM		8

115
#define PT_FIRST_AVAIL_BITS_SHIFT 10
116 117 118 119 120 121 122 123 124
#define PT64_SECOND_AVAIL_BITS_SHIFT 54

/*
 * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
 * Access Tracking SPTEs.
 */
#define SPTE_SPECIAL_MASK (3ULL << 52)
#define SPTE_AD_ENABLED_MASK (0ULL << 52)
#define SPTE_AD_DISABLED_MASK (1ULL << 52)
125
#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52)
126
#define SPTE_MMIO_MASK (3ULL << 52)
A
Avi Kivity 已提交
127 128 129 130

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
M
Mike Day 已提交
131
		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
A
Avi Kivity 已提交
132 133 134 135 136 137 138 139

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
140
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
141

142 143 144
#define PT32_LVL_OFFSET_MASK(level) \
	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
145 146 147 148 149

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


150 151 152 153 154
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
#define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
#else
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
#endif
155 156 157 158 159 160
#define PT64_LVL_ADDR_MASK(level) \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
#define PT64_LVL_OFFSET_MASK(level) \
	(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
161 162 163 164

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
165 166 167
#define PT32_LVL_ADDR_MASK(level) \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
					    * PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
168

169
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
170
			| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
A
Avi Kivity 已提交
171

172 173 174 175 176
#define ACC_EXEC_MASK    1
#define ACC_WRITE_MASK   PT_WRITABLE_MASK
#define ACC_USER_MASK    PT_USER_MASK
#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)

177 178 179 180
/* The mask for the R/X bits in EPT PTEs */
#define PT64_EPT_READABLE_MASK			0x1ull
#define PT64_EPT_EXECUTABLE_MASK		0x4ull

181 182
#include <trace/events/kvm.h>

183 184
#define SPTE_HOST_WRITEABLE	(1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define SPTE_MMU_WRITEABLE	(1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
185

186 187
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)

188 189 190
/* make pte_list_desc fit well in cache line */
#define PTE_LIST_EXT 3

191 192 193 194 195 196 197 198 199 200 201 202 203 204
/*
 * Return values of handle_mmio_page_fault and mmu.page_fault:
 * RET_PF_RETRY: let CPU fault again on the address.
 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
 *
 * For handle_mmio_page_fault only:
 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
 */
enum {
	RET_PF_RETRY = 0,
	RET_PF_EMULATE = 1,
	RET_PF_INVALID = 2,
};

205 206 207
struct pte_list_desc {
	u64 *sptes[PTE_LIST_EXT];
	struct pte_list_desc *more;
208 209
};

210 211 212 213
struct kvm_shadow_walk_iterator {
	u64 addr;
	hpa_t shadow_addr;
	u64 *sptep;
214
	int level;
215 216 217
	unsigned index;
};

218 219
static const union kvm_mmu_page_role mmu_base_role_mask = {
	.cr0_wp = 1,
220
	.gpte_is_8_bytes = 1,
221 222 223 224 225 226 227 228
	.nxe = 1,
	.smep_andnot_wp = 1,
	.smap_andnot_wp = 1,
	.smm = 1,
	.guest_mode = 1,
	.ad_disabled = 1,
};

229 230 231 232 233 234 235
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
					 (_root), (_addr));                \
	     shadow_walk_okay(&(_walker));			           \
	     shadow_walk_next(&(_walker)))

#define for_each_shadow_entry(_vcpu, _addr, _walker)            \
236 237 238 239
	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
	     shadow_walk_okay(&(_walker));			\
	     shadow_walk_next(&(_walker)))

240 241 242 243 244 245
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
	     shadow_walk_okay(&(_walker)) &&				\
		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
	     __shadow_walk_next(&(_walker), spte))

246
static struct kmem_cache *pte_list_desc_cache;
247
static struct kmem_cache *mmu_page_header_cache;
248
static struct percpu_counter kvm_total_used_mmu_pages;
249

S
Sheng Yang 已提交
250 251 252 253 254
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
255
static u64 __read_mostly shadow_mmio_mask;
256
static u64 __read_mostly shadow_mmio_value;
257
static u64 __read_mostly shadow_mmio_access_mask;
258
static u64 __read_mostly shadow_present_mask;
259
static u64 __read_mostly shadow_me_mask;
260

261
/*
262 263 264
 * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK;
 * shadow_acc_track_mask is the set of bits to be cleared in non-accessed
 * pages.
265 266 267 268 269 270 271 272 273 274 275 276 277
 */
static u64 __read_mostly shadow_acc_track_mask;

/*
 * The mask/shift to use for saving the original R/X bits when marking the PTE
 * as not-present for access tracking purposes. We do not save the W bit as the
 * PTEs being access tracked also need to be dirty tracked, so the W bit will be
 * restored only when a write is attempted to the page.
 */
static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
						    PT64_EPT_EXECUTABLE_MASK;
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;

278 279 280 281 282 283 284 285 286 287 288
/*
 * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
 * to guard against L1TF attacks.
 */
static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;

/*
 * The number of high-order 1 bits to use in the mask above.
 */
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;

289 290 291 292 293 294 295 296 297 298
/*
 * In some cases, we need to preserve the GFN of a non-present or reserved
 * SPTE when we usurp the upper five bits of the physical address space to
 * defend against L1TF, e.g. for MMIO SPTEs.  To preserve the GFN, we'll
 * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
 * left into the reserved bits, i.e. the GFN in the SPTE will be split into
 * high and low parts.  This mask covers the lower bits of the GFN.
 */
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;

299 300 301 302 303
/*
 * The number of non-reserved physical address bits irrespective of features
 * that repurpose legal bits, e.g. MKTME.
 */
static u8 __read_mostly shadow_phys_bits;
304

305
static void mmu_spte_set(u64 *sptep, u64 spte);
306
static bool is_executable_pte(u64 spte);
307 308
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
309

310 311 312
#define CREATE_TRACE_POINTS
#include "mmutrace.h"

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341

static inline bool kvm_available_flush_tlb_with_range(void)
{
	return kvm_x86_ops->tlb_remote_flush_with_range;
}

static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
		struct kvm_tlb_range *range)
{
	int ret = -ENOTSUPP;

	if (range && kvm_x86_ops->tlb_remote_flush_with_range)
		ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);

	if (ret)
		kvm_flush_remote_tlbs(kvm);
}

static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
		u64 start_gfn, u64 pages)
{
	struct kvm_tlb_range range;

	range.start_gfn = start_gfn;
	range.pages = pages;

	kvm_flush_remote_tlbs_with_range(kvm, &range);
}

342
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
343
{
344
	BUG_ON((u64)(unsigned)access_mask != access_mask);
345
	BUG_ON((mmio_mask & mmio_value) != mmio_value);
346
	shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
347
	shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
348
	shadow_mmio_access_mask = access_mask;
349 350 351
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);

352 353 354 355 356
static bool is_mmio_spte(u64 spte)
{
	return (spte & shadow_mmio_mask) == shadow_mmio_value;
}

357 358 359 360 361
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
{
	return sp->role.ad_disabled;
}

362 363 364 365 366 367 368 369 370 371 372
static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
{
	/*
	 * When using the EPT page-modification log, the GPAs in the log
	 * would come from L2 rather than L1.  Therefore, we need to rely
	 * on write protection to record dirty pages.  This also bypasses
	 * PML, since writes now result in a vmexit.
	 */
	return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
}

373 374
static inline bool spte_ad_enabled(u64 spte)
{
375
	MMU_WARN_ON(is_mmio_spte(spte));
376 377 378 379 380 381 382
	return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK;
}

static inline bool spte_ad_need_write_protect(u64 spte)
{
	MMU_WARN_ON(is_mmio_spte(spte));
	return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
383 384
}

P
Paolo Bonzini 已提交
385 386 387 388 389
static bool is_nx_huge_page_enabled(void)
{
	return READ_ONCE(nx_huge_pages);
}

390 391
static inline u64 spte_shadow_accessed_mask(u64 spte)
{
392
	MMU_WARN_ON(is_mmio_spte(spte));
393 394 395 396 397
	return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
}

static inline u64 spte_shadow_dirty_mask(u64 spte)
{
398
	MMU_WARN_ON(is_mmio_spte(spte));
399 400 401
	return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
}

402 403
static inline bool is_access_track_spte(u64 spte)
{
404
	return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
405 406
}

407
/*
408 409
 * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
 * the memslots generation and is derived as follows:
410
 *
411 412
 * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
 * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61
413
 *
414 415 416 417 418 419
 * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
 * the MMIO generation number, as doing so would require stealing a bit from
 * the "real" generation number and thus effectively halve the maximum number
 * of MMIO generations that can be handled before encountering a wrap (which
 * requires a full MMU zap).  The flag is instead explicitly queried when
 * checking for MMIO spte cache hits.
420
 */
421
#define MMIO_SPTE_GEN_MASK		GENMASK_ULL(18, 0)
422

423 424 425 426
#define MMIO_SPTE_GEN_LOW_START		3
#define MMIO_SPTE_GEN_LOW_END		11
#define MMIO_SPTE_GEN_LOW_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
						    MMIO_SPTE_GEN_LOW_START)
427

428 429 430 431
#define MMIO_SPTE_GEN_HIGH_START	52
#define MMIO_SPTE_GEN_HIGH_END		61
#define MMIO_SPTE_GEN_HIGH_MASK		GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
						    MMIO_SPTE_GEN_HIGH_START)
432
static u64 generation_mmio_spte_mask(u64 gen)
433 434 435
{
	u64 mask;

436
	WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
437

438 439
	mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
	mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
440 441 442
	return mask;
}

443
static u64 get_mmio_spte_generation(u64 spte)
444
{
445
	u64 gen;
446 447 448

	spte &= ~shadow_mmio_mask;

449 450
	gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
	gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
451 452 453
	return gen;
}

454
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
455
			   unsigned access)
456
{
457
	u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
458
	u64 mask = generation_mmio_spte_mask(gen);
459
	u64 gpa = gfn << PAGE_SHIFT;
460

461
	access &= shadow_mmio_access_mask;
462 463 464 465
	mask |= shadow_mmio_value | access;
	mask |= gpa | shadow_nonpresent_or_rsvd_mask;
	mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
		<< shadow_nonpresent_or_rsvd_mask_len;
466

467
	trace_mark_mmio_spte(sptep, gfn, access, gen);
468
	mmu_spte_set(sptep, mask);
469 470 471 472
}

static gfn_t get_mmio_spte_gfn(u64 spte)
{
473
	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
474 475 476 477 478

	gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
	       & shadow_nonpresent_or_rsvd_mask;

	return gpa >> PAGE_SHIFT;
479 480 481 482
}

static unsigned get_mmio_spte_access(u64 spte)
{
483
	return spte & shadow_mmio_access_mask;
484 485
}

486
static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
D
Dan Williams 已提交
487
			  kvm_pfn_t pfn, unsigned access)
488 489
{
	if (unlikely(is_noslot_pfn(pfn))) {
490
		mark_mmio_spte(vcpu, sptep, gfn, access);
491 492 493 494 495
		return true;
	}

	return false;
}
496

497
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
498
{
499
	u64 kvm_gen, spte_gen, gen;
500

501 502 503
	gen = kvm_vcpu_memslots(vcpu)->generation;
	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
		return false;
504

505
	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
506 507 508 509
	spte_gen = get_mmio_spte_generation(spte);

	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
	return likely(kvm_gen == spte_gen);
510 511
}

512 513 514 515 516 517 518
/*
 * Sets the shadow PTE masks used by the MMU.
 *
 * Assumptions:
 *  - Setting either @accessed_mask or @dirty_mask requires setting both
 *  - At least one of @accessed_mask or @acc_track_mask must be set
 */
S
Sheng Yang 已提交
519
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
520
		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
521
		u64 acc_track_mask, u64 me_mask)
S
Sheng Yang 已提交
522
{
523 524
	BUG_ON(!dirty_mask != !accessed_mask);
	BUG_ON(!accessed_mask && !acc_track_mask);
525
	BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK);
526

S
Sheng Yang 已提交
527 528 529 530 531
	shadow_user_mask = user_mask;
	shadow_accessed_mask = accessed_mask;
	shadow_dirty_mask = dirty_mask;
	shadow_nx_mask = nx_mask;
	shadow_x_mask = x_mask;
532
	shadow_present_mask = p_mask;
533
	shadow_acc_track_mask = acc_track_mask;
534
	shadow_me_mask = me_mask;
S
Sheng Yang 已提交
535 536 537
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

538 539 540
static u8 kvm_get_shadow_phys_bits(void)
{
	/*
541 542 543 544
	 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
	 * in CPU detection code, but the processor treats those reduced bits as
	 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
	 * the physical address bits reported by CPUID.
545
	 */
546 547
	if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
		return cpuid_eax(0x80000008) & 0xff;
548

549 550 551 552 553 554
	/*
	 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
	 * custom CPUID.  Proceed with whatever the kernel found since these features
	 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
	 */
	return boot_cpu_data.x86_phys_bits;
555 556
}

557
static void kvm_mmu_reset_all_pte_masks(void)
558
{
559 560
	u8 low_phys_bits;

561 562 563 564 565 566 567 568
	shadow_user_mask = 0;
	shadow_accessed_mask = 0;
	shadow_dirty_mask = 0;
	shadow_nx_mask = 0;
	shadow_x_mask = 0;
	shadow_mmio_mask = 0;
	shadow_present_mask = 0;
	shadow_acc_track_mask = 0;
569

570 571
	shadow_phys_bits = kvm_get_shadow_phys_bits();

572 573 574 575
	/*
	 * If the CPU has 46 or less physical address bits, then set an
	 * appropriate mask to guard against L1TF attacks. Otherwise, it is
	 * assumed that the CPU is not vulnerable to L1TF.
576 577 578 579 580
	 *
	 * Some Intel CPUs address the L1 cache using more PA bits than are
	 * reported by CPUID. Use the PA width of the L1 cache when possible
	 * to achieve more effective mitigation, e.g. if system RAM overlaps
	 * the most significant bits of legal physical address space.
581
	 */
582 583 584
	shadow_nonpresent_or_rsvd_mask = 0;
	low_phys_bits = boot_cpu_data.x86_cache_bits;
	if (boot_cpu_data.x86_cache_bits <
585
	    52 - shadow_nonpresent_or_rsvd_mask_len) {
586
		shadow_nonpresent_or_rsvd_mask =
587
			rsvd_bits(boot_cpu_data.x86_cache_bits -
588
				  shadow_nonpresent_or_rsvd_mask_len,
589
				  boot_cpu_data.x86_cache_bits - 1);
590
		low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
591 592 593
	} else
		WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));

594 595
	shadow_nonpresent_or_rsvd_lower_gfn_mask =
		GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
596 597
}

A
Avi Kivity 已提交
598 599 600 601 602
static int is_cpuid_PSE36(void)
{
	return 1;
}

603 604
static int is_nx(struct kvm_vcpu *vcpu)
{
605
	return vcpu->arch.efer & EFER_NX;
606 607
}

608 609
static int is_shadow_present_pte(u64 pte)
{
610
	return (pte != 0) && !is_mmio_spte(pte);
611 612
}

M
Marcelo Tosatti 已提交
613 614 615 616 617
static int is_large_pte(u64 pte)
{
	return pte & PT_PAGE_SIZE_MASK;
}

618 619 620 621
static int is_last_spte(u64 pte, int level)
{
	if (level == PT_PAGE_TABLE_LEVEL)
		return 1;
622
	if (is_large_pte(pte))
623 624 625 626
		return 1;
	return 0;
}

627 628 629 630 631
static bool is_executable_pte(u64 spte)
{
	return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
}

D
Dan Williams 已提交
632
static kvm_pfn_t spte_to_pfn(u64 pte)
633
{
634
	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
635 636
}

637 638 639 640 641 642 643
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

644
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
645
static void __set_spte(u64 *sptep, u64 spte)
646
{
647
	WRITE_ONCE(*sptep, spte);
648 649
}

650
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
651
{
652
	WRITE_ONCE(*sptep, spte);
653 654 655 656 657 658
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	return xchg(sptep, spte);
}
659 660 661

static u64 __get_spte_lockless(u64 *sptep)
{
662
	return READ_ONCE(*sptep);
663
}
664
#else
665 666 667 668 669 670 671
union split_spte {
	struct {
		u32 spte_low;
		u32 spte_high;
	};
	u64 spte;
};
672

673 674 675 676 677 678 679 680 681 682 683 684
static void count_spte_clear(u64 *sptep, u64 spte)
{
	struct kvm_mmu_page *sp =  page_header(__pa(sptep));

	if (is_shadow_present_pte(spte))
		return;

	/* Ensure the spte is completely set before we increase the count */
	smp_wmb();
	sp->clear_spte_count++;
}

685 686 687
static void __set_spte(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;
688

689 690 691 692 693 694 695 696 697 698 699 700
	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	ssptep->spte_high = sspte.spte_high;

	/*
	 * If we map the spte from nonpresent to present, We should store
	 * the high bits firstly, then set present bit, so cpu can not
	 * fetch this spte while we are setting the spte.
	 */
	smp_wmb();

701
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
702 703
}

704 705 706 707 708 709 710
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

711
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
712 713 714 715 716 717 718 719

	/*
	 * If we map the spte from present to nonpresent, we should clear
	 * present bit firstly to avoid vcpu fetch the old high bits.
	 */
	smp_wmb();

	ssptep->spte_high = sspte.spte_high;
720
	count_spte_clear(sptep, spte);
721 722 723 724 725 726 727 728 729 730 731
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte, orig;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	/* xchg acts as a barrier before the setting of the high bits */
	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
732 733
	orig.spte_high = ssptep->spte_high;
	ssptep->spte_high = sspte.spte_high;
734
	count_spte_clear(sptep, spte);
735 736 737

	return orig.spte;
}
738 739 740

/*
 * The idea using the light way get the spte on x86_32 guest is from
741
 * gup_get_pte (mm/gup.c).
742 743 744 745 746 747 748 749 750 751 752 753 754 755
 *
 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
 * coalesces them and we are running out of the MMU lock.  Therefore
 * we need to protect against in-progress updates of the spte.
 *
 * Reading the spte while an update is in progress may get the old value
 * for the high part of the spte.  The race is fine for a present->non-present
 * change (because the high part of the spte is ignored for non-present spte),
 * but for a present->present change we must reread the spte.
 *
 * All such changes are done in two steps (present->non-present and
 * non-present->present), hence it is enough to count the number of
 * present->non-present updates: if it changed while reading the spte,
 * we might have hit the race.  This is done using clear_spte_count.
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
 */
static u64 __get_spte_lockless(u64 *sptep)
{
	struct kvm_mmu_page *sp =  page_header(__pa(sptep));
	union split_spte spte, *orig = (union split_spte *)sptep;
	int count;

retry:
	count = sp->clear_spte_count;
	smp_rmb();

	spte.spte_low = orig->spte_low;
	smp_rmb();

	spte.spte_high = orig->spte_high;
	smp_rmb();

	if (unlikely(spte.spte_low != orig->spte_low ||
	      count != sp->clear_spte_count))
		goto retry;

	return spte.spte;
}
779 780
#endif

781
static bool spte_can_locklessly_be_made_writable(u64 spte)
782
{
783 784
	return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
		(SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
785 786
}

787 788
static bool spte_has_volatile_bits(u64 spte)
{
789 790 791
	if (!is_shadow_present_pte(spte))
		return false;

792
	/*
793
	 * Always atomically update spte if it can be updated
794 795 796 797
	 * out of mmu-lock, it can ensure dirty bit is not lost,
	 * also, it can help us to get a stable is_writable_pte()
	 * to ensure tlb flush is not missed.
	 */
798 799
	if (spte_can_locklessly_be_made_writable(spte) ||
	    is_access_track_spte(spte))
800 801
		return true;

802
	if (spte_ad_enabled(spte)) {
803 804 805 806
		if ((spte & shadow_accessed_mask) == 0 ||
	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
			return true;
	}
807

808
	return false;
809 810
}

811
static bool is_accessed_spte(u64 spte)
812
{
813 814 815 816
	u64 accessed_mask = spte_shadow_accessed_mask(spte);

	return accessed_mask ? spte & accessed_mask
			     : !is_access_track_spte(spte);
817 818
}

819
static bool is_dirty_spte(u64 spte)
820
{
821 822 823
	u64 dirty_mask = spte_shadow_dirty_mask(spte);

	return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
824 825
}

826 827 828 829 830 831 832 833 834 835 836 837
/* Rules for using mmu_spte_set:
 * Set the sptep from nonpresent to present.
 * Note: the sptep being assigned *must* be either not present
 * or in a state where the hardware will not attempt to update
 * the spte.
 */
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{
	WARN_ON(is_shadow_present_pte(*sptep));
	__set_spte(sptep, new_spte);
}

838 839 840
/*
 * Update the SPTE (excluding the PFN), but do not track changes in its
 * accessed/dirty status.
841
 */
842
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
843
{
844
	u64 old_spte = *sptep;
845

846
	WARN_ON(!is_shadow_present_pte(new_spte));
847

848 849
	if (!is_shadow_present_pte(old_spte)) {
		mmu_spte_set(sptep, new_spte);
850
		return old_spte;
851
	}
852

853
	if (!spte_has_volatile_bits(old_spte))
854
		__update_clear_spte_fast(sptep, new_spte);
855
	else
856
		old_spte = __update_clear_spte_slow(sptep, new_spte);
857

858 859
	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));

860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
	return old_spte;
}

/* Rules for using mmu_spte_update:
 * Update the state bits, it means the mapped pfn is not changed.
 *
 * Whenever we overwrite a writable spte with a read-only one we
 * should flush remote TLBs. Otherwise rmap_write_protect
 * will find a read-only spte, even though the writable spte
 * might be cached on a CPU's TLB, the return value indicates this
 * case.
 *
 * Returns true if the TLB needs to be flushed
 */
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
	bool flush = false;
	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);

	if (!is_shadow_present_pte(old_spte))
		return false;

882 883
	/*
	 * For the spte updated out of mmu-lock is safe, since
884
	 * we always atomically update it, see the comments in
885 886
	 * spte_has_volatile_bits().
	 */
887
	if (spte_can_locklessly_be_made_writable(old_spte) &&
888
	      !is_writable_pte(new_spte))
889
		flush = true;
890

891
	/*
892
	 * Flush TLB when accessed/dirty states are changed in the page tables,
893 894 895
	 * to guarantee consistency between TLB and page tables.
	 */

896 897
	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
		flush = true;
898
		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
899 900 901 902
	}

	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
		flush = true;
903
		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
904
	}
905

906
	return flush;
907 908
}

909 910 911 912
/*
 * Rules for using mmu_spte_clear_track_bits:
 * It sets the sptep from present to nonpresent, and track the
 * state bits, it is used to clear the last level sptep.
913
 * Returns non-zero if the PTE was previously valid.
914 915 916
 */
static int mmu_spte_clear_track_bits(u64 *sptep)
{
D
Dan Williams 已提交
917
	kvm_pfn_t pfn;
918 919 920
	u64 old_spte = *sptep;

	if (!spte_has_volatile_bits(old_spte))
921
		__update_clear_spte_fast(sptep, 0ull);
922
	else
923
		old_spte = __update_clear_spte_slow(sptep, 0ull);
924

925
	if (!is_shadow_present_pte(old_spte))
926 927 928
		return 0;

	pfn = spte_to_pfn(old_spte);
929 930 931 932 933 934

	/*
	 * KVM does not hold the refcount of the page used by
	 * kvm mmu, before reclaiming the page, we should
	 * unmap it from mmu first.
	 */
935
	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
936

937
	if (is_accessed_spte(old_spte))
938
		kvm_set_pfn_accessed(pfn);
939 940

	if (is_dirty_spte(old_spte))
941
		kvm_set_pfn_dirty(pfn);
942

943 944 945 946 947 948 949 950 951 952
	return 1;
}

/*
 * Rules for using mmu_spte_clear_no_track:
 * Directly clear spte without caring the state bits of sptep,
 * it is used to set the upper level spte.
 */
static void mmu_spte_clear_no_track(u64 *sptep)
{
953
	__update_clear_spte_fast(sptep, 0ull);
954 955
}

956 957 958 959 960
static u64 mmu_spte_get_lockless(u64 *sptep)
{
	return __get_spte_lockless(sptep);
}

961 962
static u64 mark_spte_for_access_track(u64 spte)
{
963
	if (spte_ad_enabled(spte))
964 965
		return spte & ~shadow_accessed_mask;

966
	if (is_access_track_spte(spte))
967 968 969
		return spte;

	/*
970 971 972
	 * Making an Access Tracking PTE will result in removal of write access
	 * from the PTE. So, verify that we will be able to restore the write
	 * access in the fast page fault path later on.
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
	 */
	WARN_ONCE((spte & PT_WRITABLE_MASK) &&
		  !spte_can_locklessly_be_made_writable(spte),
		  "kvm: Writable SPTE is not locklessly dirty-trackable\n");

	WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask <<
			  shadow_acc_track_saved_bits_shift),
		  "kvm: Access Tracking saved bit locations are not zero\n");

	spte |= (spte & shadow_acc_track_saved_bits_mask) <<
		shadow_acc_track_saved_bits_shift;
	spte &= ~shadow_acc_track_mask;

	return spte;
}

989 990 991 992 993 994 995
/* Restore an acc-track PTE back to a regular PTE */
static u64 restore_acc_track_spte(u64 spte)
{
	u64 new_spte = spte;
	u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift)
			 & shadow_acc_track_saved_bits_mask;

996
	WARN_ON_ONCE(spte_ad_enabled(spte));
997 998 999 1000 1001 1002 1003 1004 1005 1006
	WARN_ON_ONCE(!is_access_track_spte(spte));

	new_spte &= ~shadow_acc_track_mask;
	new_spte &= ~(shadow_acc_track_saved_bits_mask <<
		      shadow_acc_track_saved_bits_shift);
	new_spte |= saved_bits;

	return new_spte;
}

1007 1008 1009 1010 1011 1012 1013 1014
/* Returns the Accessed status of the PTE and resets it at the same time. */
static bool mmu_spte_age(u64 *sptep)
{
	u64 spte = mmu_spte_get_lockless(sptep);

	if (!is_accessed_spte(spte))
		return false;

1015
	if (spte_ad_enabled(spte)) {
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
		clear_bit((ffs(shadow_accessed_mask) - 1),
			  (unsigned long *)sptep);
	} else {
		/*
		 * Capture the dirty status of the page, so that it doesn't get
		 * lost when the SPTE is marked for access tracking.
		 */
		if (is_writable_pte(spte))
			kvm_set_pfn_dirty(spte_to_pfn(spte));

		spte = mark_spte_for_access_track(spte);
		mmu_spte_update_no_track(sptep, spte);
	}

	return true;
}

1033 1034
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
1035 1036 1037 1038 1039
	/*
	 * Prevent page table teardown by making any free-er wait during
	 * kvm_flush_remote_tlbs() IPI to all active vcpus.
	 */
	local_irq_disable();
1040

1041 1042 1043 1044
	/*
	 * Make sure a following spte read is not reordered ahead of the write
	 * to vcpu->mode.
	 */
1045
	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
1046 1047 1048 1049
}

static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
1050 1051
	/*
	 * Make sure the write to vcpu->mode is not reordered in front of
1052
	 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
1053 1054
	 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
	 */
1055
	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
1056
	local_irq_enable();
1057 1058
}

1059
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
1060
				  struct kmem_cache *base_cache, int min)
1061 1062 1063 1064
{
	void *obj;

	if (cache->nobjs >= min)
1065
		return 0;
1066
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
1067
		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL_ACCOUNT);
1068
		if (!obj)
1069
			return cache->nobjs >= min ? 0 : -ENOMEM;
1070 1071
		cache->objects[cache->nobjs++] = obj;
	}
1072
	return 0;
1073 1074
}

1075 1076 1077 1078 1079
static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
{
	return cache->nobjs;
}

1080 1081
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
				  struct kmem_cache *cache)
1082 1083
{
	while (mc->nobjs)
1084
		kmem_cache_free(cache, mc->objects[--mc->nobjs]);
1085 1086
}

A
Avi Kivity 已提交
1087
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
1088
				       int min)
A
Avi Kivity 已提交
1089
{
1090
	void *page;
A
Avi Kivity 已提交
1091 1092 1093 1094

	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
1095
		page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
A
Avi Kivity 已提交
1096
		if (!page)
1097
			return cache->nobjs >= min ? 0 : -ENOMEM;
1098
		cache->objects[cache->nobjs++] = page;
A
Avi Kivity 已提交
1099 1100 1101 1102 1103 1104 1105
	}
	return 0;
}

static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
1106
		free_page((unsigned long)mc->objects[--mc->nobjs]);
A
Avi Kivity 已提交
1107 1108
}

1109
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
1110
{
1111 1112
	int r;

1113
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
1114
				   pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
1115 1116
	if (r)
		goto out;
1117
	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
1118 1119
	if (r)
		goto out;
1120
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
1121
				   mmu_page_header_cache, 4);
1122 1123
out:
	return r;
1124 1125 1126 1127
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
1128 1129
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
				pte_list_desc_cache);
1130
	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
1131 1132
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
				mmu_page_header_cache);
1133 1134
}

1135
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
1136 1137 1138 1139 1140 1141 1142 1143
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	return p;
}

1144
static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
1145
{
1146
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
1147 1148
}

1149
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
1150
{
1151
	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
1152 1153
}

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
	if (!sp->role.direct)
		return sp->gfns[index];

	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}

static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
1164
	if (!sp->role.direct) {
1165
		sp->gfns[index] = gfn;
1166 1167 1168 1169 1170 1171 1172 1173
		return;
	}

	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
		pr_err_ratelimited("gfn mismatch under direct page %llx "
				   "(expected %llx, got %llx)\n",
				   sp->gfn,
				   kvm_mmu_page_get_gfn(sp, index), gfn);
1174 1175
}

M
Marcelo Tosatti 已提交
1176
/*
1177 1178
 * Return the pointer to the large page information for a given gfn,
 * handling slots that are not large page aligned.
M
Marcelo Tosatti 已提交
1179
 */
1180 1181 1182
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
					      struct kvm_memory_slot *slot,
					      int level)
M
Marcelo Tosatti 已提交
1183 1184 1185
{
	unsigned long idx;

1186
	idx = gfn_to_index(gfn, slot->base_gfn, level);
1187
	return &slot->arch.lpage_info[level - 2][idx];
M
Marcelo Tosatti 已提交
1188 1189
}

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
					    gfn_t gfn, int count)
{
	struct kvm_lpage_info *linfo;
	int i;

	for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
		linfo = lpage_info_slot(gfn, slot, i);
		linfo->disallow_lpage += count;
		WARN_ON(linfo->disallow_lpage < 0);
	}
}

void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
	update_gfn_disallow_lpage_count(slot, gfn, 1);
}

void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
	update_gfn_disallow_lpage_count(slot, gfn, -1);
}

1213
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
1214
{
1215
	struct kvm_memslots *slots;
1216
	struct kvm_memory_slot *slot;
1217
	gfn_t gfn;
M
Marcelo Tosatti 已提交
1218

1219
	kvm->arch.indirect_shadow_pages++;
1220
	gfn = sp->gfn;
1221 1222
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
1223 1224 1225 1226 1227 1228

	/* the non-leaf shadow pages are keeping readonly. */
	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return kvm_slot_page_track_add_page(kvm, slot, gfn,
						    KVM_PAGE_TRACK_WRITE);

1229
	kvm_mmu_gfn_disallow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
1230 1231
}

P
Paolo Bonzini 已提交
1232 1233 1234 1235 1236 1237
static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	if (sp->lpage_disallowed)
		return;

	++kvm->stat.nx_lpage_splits;
1238 1239
	list_add_tail(&sp->lpage_disallowed_link,
		      &kvm->arch.lpage_disallowed_mmu_pages);
P
Paolo Bonzini 已提交
1240 1241 1242
	sp->lpage_disallowed = true;
}

1243
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
1244
{
1245
	struct kvm_memslots *slots;
1246
	struct kvm_memory_slot *slot;
1247
	gfn_t gfn;
M
Marcelo Tosatti 已提交
1248

1249
	kvm->arch.indirect_shadow_pages--;
1250
	gfn = sp->gfn;
1251 1252
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
1253 1254 1255 1256
	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
						       KVM_PAGE_TRACK_WRITE);

1257
	kvm_mmu_gfn_allow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
1258 1259
}

P
Paolo Bonzini 已提交
1260 1261 1262 1263
static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	--kvm->stat.nx_lpage_splits;
	sp->lpage_disallowed = false;
1264
	list_del(&sp->lpage_disallowed_link);
P
Paolo Bonzini 已提交
1265 1266
}

1267 1268
static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
					  struct kvm_memory_slot *slot)
M
Marcelo Tosatti 已提交
1269
{
1270
	struct kvm_lpage_info *linfo;
M
Marcelo Tosatti 已提交
1271 1272

	if (slot) {
1273
		linfo = lpage_info_slot(gfn, slot, level);
1274
		return !!linfo->disallow_lpage;
M
Marcelo Tosatti 已提交
1275 1276
	}

1277
	return true;
M
Marcelo Tosatti 已提交
1278 1279
}

1280 1281
static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
					int level)
1282 1283 1284 1285
{
	struct kvm_memory_slot *slot;

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1286
	return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
1287 1288
}

1289
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
M
Marcelo Tosatti 已提交
1290
{
J
Joerg Roedel 已提交
1291
	unsigned long page_size;
1292
	int i, ret = 0;
M
Marcelo Tosatti 已提交
1293

J
Joerg Roedel 已提交
1294
	page_size = kvm_host_page_size(kvm, gfn);
M
Marcelo Tosatti 已提交
1295

1296
	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
1297 1298 1299 1300 1301 1302
		if (page_size >= KVM_HPAGE_SIZE(i))
			ret = i;
		else
			break;
	}

1303
	return ret;
M
Marcelo Tosatti 已提交
1304 1305
}

1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
					  bool no_dirty_log)
{
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
		return false;
	if (no_dirty_log && slot->dirty_bitmap)
		return false;

	return true;
}

1317 1318 1319
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
			    bool no_dirty_log)
M
Marcelo Tosatti 已提交
1320 1321
{
	struct kvm_memory_slot *slot;
1322

1323
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1324
	if (!memslot_valid_for_gpte(slot, no_dirty_log))
1325 1326 1327 1328 1329
		slot = NULL;

	return slot;
}

1330
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
1331
			 int *max_levelp)
1332
{
1333
	int max_level = *max_levelp;
1334 1335
	struct kvm_memory_slot *slot;

1336
	if (unlikely(max_level == PT_PAGE_TABLE_LEVEL))
1337
		return PT_PAGE_TABLE_LEVEL;
M
Marcelo Tosatti 已提交
1338

1339
	slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
1340 1341
	if (!memslot_valid_for_gpte(slot, true)) {
		*max_levelp = PT_PAGE_TABLE_LEVEL;
1342
		return PT_PAGE_TABLE_LEVEL;
1343
	}
1344

1345
	max_level = min(max_level, kvm_x86_ops->get_lpage_level());
1346 1347
	for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
		if (!__mmu_gfn_lpage_is_disallowed(large_gfn, max_level, slot))
1348
			break;
1349
	}
1350

1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
	*max_levelp = max_level;

	if (max_level == PT_PAGE_TABLE_LEVEL)
		return PT_PAGE_TABLE_LEVEL;

	/*
	 * Note, host_mapping_level() does *not* handle transparent huge pages.
	 * As suggested by "mapping", it reflects the page size established by
	 * the associated vma, if there is one, i.e. host_mapping_level() will
	 * return a huge page level if and only if a vma exists and the backing
	 * implementation for the vma uses huge pages, e.g. hugetlbfs and dax.
	 * So, do not propagate host_mapping_level() to max_level as KVM can
	 * still promote the guest mapping to a huge page in the THP case.
	 */
	return host_mapping_level(vcpu->kvm, large_gfn);
M
Marcelo Tosatti 已提交
1366 1367
}

1368
/*
1369
 * About rmap_head encoding:
1370
 *
1371 1372
 * If the bit zero of rmap_head->val is clear, then it points to the only spte
 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
1373
 * pte_list_desc containing more mappings.
1374 1375 1376 1377
 */

/*
 * Returns the number of pointers in the rmap chain, not counting the new one.
1378
 */
1379
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
1380
			struct kvm_rmap_head *rmap_head)
1381
{
1382
	struct pte_list_desc *desc;
1383
	int i, count = 0;
1384

1385
	if (!rmap_head->val) {
1386
		rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
1387 1388
		rmap_head->val = (unsigned long)spte;
	} else if (!(rmap_head->val & 1)) {
1389 1390
		rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
		desc = mmu_alloc_pte_list_desc(vcpu);
1391
		desc->sptes[0] = (u64 *)rmap_head->val;
A
Avi Kivity 已提交
1392
		desc->sptes[1] = spte;
1393
		rmap_head->val = (unsigned long)desc | 1;
1394
		++count;
1395
	} else {
1396
		rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
1397
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1398
		while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
1399
			desc = desc->more;
1400
			count += PTE_LIST_EXT;
1401
		}
1402 1403
		if (desc->sptes[PTE_LIST_EXT-1]) {
			desc->more = mmu_alloc_pte_list_desc(vcpu);
1404 1405
			desc = desc->more;
		}
A
Avi Kivity 已提交
1406
		for (i = 0; desc->sptes[i]; ++i)
1407
			++count;
A
Avi Kivity 已提交
1408
		desc->sptes[i] = spte;
1409
	}
1410
	return count;
1411 1412
}

1413
static void
1414 1415 1416
pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
			   struct pte_list_desc *desc, int i,
			   struct pte_list_desc *prev_desc)
1417 1418 1419
{
	int j;

1420
	for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
1421
		;
A
Avi Kivity 已提交
1422 1423
	desc->sptes[i] = desc->sptes[j];
	desc->sptes[j] = NULL;
1424 1425 1426
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
1427
		rmap_head->val = 0;
1428 1429 1430 1431
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
1432
			rmap_head->val = (unsigned long)desc->more | 1;
1433
	mmu_free_pte_list_desc(desc);
1434 1435
}

1436
static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
1437
{
1438 1439
	struct pte_list_desc *desc;
	struct pte_list_desc *prev_desc;
1440 1441
	int i;

1442
	if (!rmap_head->val) {
1443
		pr_err("%s: %p 0->BUG\n", __func__, spte);
1444
		BUG();
1445
	} else if (!(rmap_head->val & 1)) {
1446
		rmap_printk("%s:  %p 1->0\n", __func__, spte);
1447
		if ((u64 *)rmap_head->val != spte) {
1448
			pr_err("%s:  %p 1->BUG\n", __func__, spte);
1449 1450
			BUG();
		}
1451
		rmap_head->val = 0;
1452
	} else {
1453
		rmap_printk("%s:  %p many->many\n", __func__, spte);
1454
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1455 1456
		prev_desc = NULL;
		while (desc) {
1457
			for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
A
Avi Kivity 已提交
1458
				if (desc->sptes[i] == spte) {
1459 1460
					pte_list_desc_remove_entry(rmap_head,
							desc, i, prev_desc);
1461 1462
					return;
				}
1463
			}
1464 1465 1466
			prev_desc = desc;
			desc = desc->more;
		}
1467
		pr_err("%s: %p many->many\n", __func__, spte);
1468 1469 1470 1471
		BUG();
	}
}

1472 1473 1474 1475 1476 1477
static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
{
	mmu_spte_clear_track_bits(sptep);
	__pte_list_remove(sptep, rmap_head);
}

1478 1479
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
					   struct kvm_memory_slot *slot)
1480
{
1481
	unsigned long idx;
1482

1483
	idx = gfn_to_index(gfn, slot->base_gfn, level);
1484
	return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
1485 1486
}

1487 1488
static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
					 struct kvm_mmu_page *sp)
1489
{
1490
	struct kvm_memslots *slots;
1491 1492
	struct kvm_memory_slot *slot;

1493 1494
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
1495
	return __gfn_to_rmap(gfn, sp->role.level, slot);
1496 1497
}

1498 1499 1500 1501 1502 1503 1504 1505
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_memory_cache *cache;

	cache = &vcpu->arch.mmu_pte_list_desc_cache;
	return mmu_memory_cache_free_objects(cache);
}

1506 1507 1508
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
	struct kvm_mmu_page *sp;
1509
	struct kvm_rmap_head *rmap_head;
1510 1511 1512

	sp = page_header(__pa(spte));
	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1513 1514
	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
	return pte_list_add(vcpu, spte, rmap_head);
1515 1516 1517 1518 1519 1520
}

static void rmap_remove(struct kvm *kvm, u64 *spte)
{
	struct kvm_mmu_page *sp;
	gfn_t gfn;
1521
	struct kvm_rmap_head *rmap_head;
1522 1523 1524

	sp = page_header(__pa(spte));
	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1525
	rmap_head = gfn_to_rmap(kvm, gfn, sp);
1526
	__pte_list_remove(spte, rmap_head);
1527 1528
}

1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
/*
 * Used by the following functions to iterate through the sptes linked by a
 * rmap.  All fields are private and not assumed to be used outside.
 */
struct rmap_iterator {
	/* private fields */
	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
	int pos;			/* index of the sptep */
};

/*
 * Iteration must be started by this function.  This should also be used after
 * removing/dropping sptes from the rmap link because in such cases the
M
Miaohe Lin 已提交
1542
 * information in the iterator may not be valid.
1543 1544 1545
 *
 * Returns sptep if found, NULL otherwise.
 */
1546 1547
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
			   struct rmap_iterator *iter)
1548
{
1549 1550
	u64 *sptep;

1551
	if (!rmap_head->val)
1552 1553
		return NULL;

1554
	if (!(rmap_head->val & 1)) {
1555
		iter->desc = NULL;
1556 1557
		sptep = (u64 *)rmap_head->val;
		goto out;
1558 1559
	}

1560
	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1561
	iter->pos = 0;
1562 1563 1564 1565
	sptep = iter->desc->sptes[iter->pos];
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1566 1567 1568 1569 1570 1571 1572 1573 1574
}

/*
 * Must be used with a valid iterator: e.g. after rmap_get_first().
 *
 * Returns sptep if found, NULL otherwise.
 */
static u64 *rmap_get_next(struct rmap_iterator *iter)
{
1575 1576
	u64 *sptep;

1577 1578 1579 1580 1581
	if (iter->desc) {
		if (iter->pos < PTE_LIST_EXT - 1) {
			++iter->pos;
			sptep = iter->desc->sptes[iter->pos];
			if (sptep)
1582
				goto out;
1583 1584 1585 1586 1587 1588 1589
		}

		iter->desc = iter->desc->more;

		if (iter->desc) {
			iter->pos = 0;
			/* desc->sptes[0] cannot be NULL */
1590 1591
			sptep = iter->desc->sptes[iter->pos];
			goto out;
1592 1593 1594 1595
		}
	}

	return NULL;
1596 1597 1598
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1599 1600
}

1601 1602
#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1603
	     _spte_; _spte_ = rmap_get_next(_iter_))
1604

1605
static void drop_spte(struct kvm *kvm, u64 *sptep)
1606
{
1607
	if (mmu_spte_clear_track_bits(sptep))
1608
		rmap_remove(kvm, sptep);
A
Avi Kivity 已提交
1609 1610
}

1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626

static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
{
	if (is_large_pte(*sptep)) {
		WARN_ON(page_header(__pa(sptep))->role.level ==
			PT_PAGE_TABLE_LEVEL);
		drop_spte(kvm, sptep);
		--kvm->stat.lpages;
		return true;
	}

	return false;
}

static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
1627 1628 1629 1630 1631 1632
	if (__drop_large_spte(vcpu->kvm, sptep)) {
		struct kvm_mmu_page *sp = page_header(__pa(sptep));

		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
			KVM_PAGES_PER_HPAGE(sp->role.level));
	}
1633 1634 1635
}

/*
1636
 * Write-protect on the specified @sptep, @pt_protect indicates whether
1637
 * spte write-protection is caused by protecting shadow page table.
1638
 *
T
Tiejun Chen 已提交
1639
 * Note: write protection is difference between dirty logging and spte
1640 1641 1642 1643 1644
 * protection:
 * - for dirty logging, the spte can be set to writable at anytime if
 *   its dirty bitmap is properly set.
 * - for spte protection, the spte can be writable only after unsync-ing
 *   shadow page.
1645
 *
1646
 * Return true if tlb need be flushed.
1647
 */
1648
static bool spte_write_protect(u64 *sptep, bool pt_protect)
1649 1650 1651
{
	u64 spte = *sptep;

1652
	if (!is_writable_pte(spte) &&
1653
	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1654 1655 1656 1657
		return false;

	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);

1658 1659
	if (pt_protect)
		spte &= ~SPTE_MMU_WRITEABLE;
1660
	spte = spte & ~PT_WRITABLE_MASK;
1661

1662
	return mmu_spte_update(sptep, spte);
1663 1664
}

1665 1666
static bool __rmap_write_protect(struct kvm *kvm,
				 struct kvm_rmap_head *rmap_head,
1667
				 bool pt_protect)
1668
{
1669 1670
	u64 *sptep;
	struct rmap_iterator iter;
1671
	bool flush = false;
1672

1673
	for_each_rmap_spte(rmap_head, &iter, sptep)
1674
		flush |= spte_write_protect(sptep, pt_protect);
1675

1676
	return flush;
1677 1678
}

1679
static bool spte_clear_dirty(u64 *sptep)
1680 1681 1682 1683 1684
{
	u64 spte = *sptep;

	rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);

1685
	MMU_WARN_ON(!spte_ad_enabled(spte));
1686 1687 1688 1689
	spte &= ~shadow_dirty_mask;
	return mmu_spte_update(sptep, spte);
}

1690
static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1691 1692 1693
{
	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
					       (unsigned long *)sptep);
1694
	if (was_writable && !spte_ad_enabled(*sptep))
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
		kvm_set_pfn_dirty(spte_to_pfn(*sptep));

	return was_writable;
}

/*
 * Gets the GFN ready for another round of dirty logging by clearing the
 *	- D bit on ad-enabled SPTEs, and
 *	- W bit on ad-disabled SPTEs.
 * Returns true iff any D or W bits were cleared.
 */
1706
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1707 1708 1709 1710 1711
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1712
	for_each_rmap_spte(rmap_head, &iter, sptep)
1713 1714
		if (spte_ad_need_write_protect(*sptep))
			flush |= spte_wrprot_for_clear_dirty(sptep);
1715
		else
1716
			flush |= spte_clear_dirty(sptep);
1717 1718 1719 1720

	return flush;
}

1721
static bool spte_set_dirty(u64 *sptep)
1722 1723 1724 1725 1726
{
	u64 spte = *sptep;

	rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);

1727 1728 1729 1730 1731
	/*
	 * Similar to the !kvm_x86_ops->slot_disable_log_dirty case,
	 * do not bother adding back write access to pages marked
	 * SPTE_AD_WRPROT_ONLY_MASK.
	 */
1732 1733 1734 1735 1736
	spte |= shadow_dirty_mask;

	return mmu_spte_update(sptep, spte);
}

1737
static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1738 1739 1740 1741 1742
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1743
	for_each_rmap_spte(rmap_head, &iter, sptep)
1744 1745
		if (spte_ad_enabled(*sptep))
			flush |= spte_set_dirty(sptep);
1746 1747 1748 1749

	return flush;
}

1750
/**
1751
 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1752 1753 1754 1755 1756 1757 1758 1759
 * @kvm: kvm instance
 * @slot: slot to protect
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should protect
 *
 * Used when we do not need to care about huge page mappings: e.g. during dirty
 * logging we do not have any such mappings.
 */
1760
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1761 1762
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
1763
{
1764
	struct kvm_rmap_head *rmap_head;
1765

1766
	while (mask) {
1767 1768 1769
		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					  PT_PAGE_TABLE_LEVEL, slot);
		__rmap_write_protect(kvm, rmap_head, false);
M
Marcelo Tosatti 已提交
1770

1771 1772 1773
		/* clear the first set bit */
		mask &= mask - 1;
	}
1774 1775
}

1776
/**
1777 1778
 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
 * protect the page if the D-bit isn't supported.
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
 * @kvm: kvm instance
 * @slot: slot to clear D-bit
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should clear D-bit
 *
 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
 */
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
{
1790
	struct kvm_rmap_head *rmap_head;
1791 1792

	while (mask) {
1793 1794 1795
		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					  PT_PAGE_TABLE_LEVEL, slot);
		__rmap_clear_dirty(kvm, rmap_head);
1796 1797 1798 1799 1800 1801 1802

		/* clear the first set bit */
		mask &= mask - 1;
	}
}
EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);

1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
/**
 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
 * PT level pages.
 *
 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
 * enable dirty logging for them.
 *
 * Used when we do not need to care about huge page mappings: e.g. during dirty
 * logging we do not have any such mappings.
 */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
				struct kvm_memory_slot *slot,
				gfn_t gfn_offset, unsigned long mask)
{
1817 1818 1819 1820 1821
	if (kvm_x86_ops->enable_log_dirty_pt_masked)
		kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
				mask);
	else
		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1822 1823
}

1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
/**
 * kvm_arch_write_log_dirty - emulate dirty page logging
 * @vcpu: Guest mode vcpu
 *
 * Emulate arch specific page modification logging for the
 * nested hypervisor
 */
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->write_log_dirty)
		return kvm_x86_ops->write_log_dirty(vcpu);

	return 0;
}

1839 1840
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
				    struct kvm_memory_slot *slot, u64 gfn)
1841
{
1842
	struct kvm_rmap_head *rmap_head;
1843
	int i;
1844
	bool write_protected = false;
1845

1846
	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
1847
		rmap_head = __gfn_to_rmap(gfn, i, slot);
1848
		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1849 1850 1851
	}

	return write_protected;
1852 1853
}

1854 1855 1856 1857 1858 1859 1860 1861
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{
	struct kvm_memory_slot *slot;

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
}

1862
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1863
{
1864 1865
	u64 *sptep;
	struct rmap_iterator iter;
1866
	bool flush = false;
1867

1868
	while ((sptep = rmap_get_first(rmap_head, &iter))) {
1869
		rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
1870

1871
		pte_list_remove(rmap_head, sptep);
1872
		flush = true;
1873
	}
1874

1875 1876 1877
	return flush;
}

1878
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1879 1880 1881
			   struct kvm_memory_slot *slot, gfn_t gfn, int level,
			   unsigned long data)
{
1882
	return kvm_zap_rmapp(kvm, rmap_head);
1883 1884
}

1885
static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1886 1887
			     struct kvm_memory_slot *slot, gfn_t gfn, int level,
			     unsigned long data)
1888
{
1889 1890
	u64 *sptep;
	struct rmap_iterator iter;
1891
	int need_flush = 0;
1892
	u64 new_spte;
1893
	pte_t *ptep = (pte_t *)data;
D
Dan Williams 已提交
1894
	kvm_pfn_t new_pfn;
1895 1896 1897

	WARN_ON(pte_huge(*ptep));
	new_pfn = pte_pfn(*ptep);
1898

1899
restart:
1900
	for_each_rmap_spte(rmap_head, &iter, sptep) {
1901
		rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1902
			    sptep, *sptep, gfn, level);
1903

1904
		need_flush = 1;
1905

1906
		if (pte_write(*ptep)) {
1907
			pte_list_remove(rmap_head, sptep);
1908
			goto restart;
1909
		} else {
1910
			new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
1911 1912 1913 1914
			new_spte |= (u64)new_pfn << PAGE_SHIFT;

			new_spte &= ~PT_WRITABLE_MASK;
			new_spte &= ~SPTE_HOST_WRITEABLE;
1915 1916

			new_spte = mark_spte_for_access_track(new_spte);
1917 1918 1919

			mmu_spte_clear_track_bits(sptep);
			mmu_spte_set(sptep, new_spte);
1920 1921
		}
	}
1922

1923 1924 1925 1926 1927
	if (need_flush && kvm_available_flush_tlb_with_range()) {
		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
		return 0;
	}

1928
	return need_flush;
1929 1930
}

1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
struct slot_rmap_walk_iterator {
	/* input fields. */
	struct kvm_memory_slot *slot;
	gfn_t start_gfn;
	gfn_t end_gfn;
	int start_level;
	int end_level;

	/* output fields. */
	gfn_t gfn;
1941
	struct kvm_rmap_head *rmap;
1942 1943 1944
	int level;

	/* private field. */
1945
	struct kvm_rmap_head *end_rmap;
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998
};

static void
rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
{
	iterator->level = level;
	iterator->gfn = iterator->start_gfn;
	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
					   iterator->slot);
}

static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
		    struct kvm_memory_slot *slot, int start_level,
		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
{
	iterator->slot = slot;
	iterator->start_level = start_level;
	iterator->end_level = end_level;
	iterator->start_gfn = start_gfn;
	iterator->end_gfn = end_gfn;

	rmap_walk_init_level(iterator, iterator->start_level);
}

static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
{
	return !!iterator->rmap;
}

static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{
	if (++iterator->rmap <= iterator->end_rmap) {
		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
		return;
	}

	if (++iterator->level > iterator->end_level) {
		iterator->rmap = NULL;
		return;
	}

	rmap_walk_init_level(iterator, iterator->level);
}

#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
	   _start_gfn, _end_gfn, _iter_)				\
	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
				 _end_level_, _start_gfn, _end_gfn);	\
	     slot_rmap_walk_okay(_iter_);				\
	     slot_rmap_walk_next(_iter_))

1999 2000 2001 2002 2003
static int kvm_handle_hva_range(struct kvm *kvm,
				unsigned long start,
				unsigned long end,
				unsigned long data,
				int (*handler)(struct kvm *kvm,
2004
					       struct kvm_rmap_head *rmap_head,
2005
					       struct kvm_memory_slot *slot,
2006 2007
					       gfn_t gfn,
					       int level,
2008
					       unsigned long data))
2009
{
2010
	struct kvm_memslots *slots;
2011
	struct kvm_memory_slot *memslot;
2012 2013
	struct slot_rmap_walk_iterator iterator;
	int ret = 0;
2014
	int i;
2015

2016 2017 2018 2019 2020
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
		kvm_for_each_memslot(memslot, slots) {
			unsigned long hva_start, hva_end;
			gfn_t gfn_start, gfn_end;
2021

2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040
			hva_start = max(start, memslot->userspace_addr);
			hva_end = min(end, memslot->userspace_addr +
				      (memslot->npages << PAGE_SHIFT));
			if (hva_start >= hva_end)
				continue;
			/*
			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
			 */
			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);

			for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
						 PT_MAX_HUGEPAGE_LEVEL,
						 gfn_start, gfn_end - 1,
						 &iterator)
				ret |= handler(kvm, iterator.rmap, memslot,
					       iterator.gfn, iterator.level, data);
		}
2041 2042
	}

2043
	return ret;
2044 2045
}

2046 2047
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
			  unsigned long data,
2048 2049
			  int (*handler)(struct kvm *kvm,
					 struct kvm_rmap_head *rmap_head,
2050
					 struct kvm_memory_slot *slot,
2051
					 gfn_t gfn, int level,
2052 2053 2054
					 unsigned long data))
{
	return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
2055 2056
}

2057 2058 2059 2060 2061
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
	return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}

2062
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
2063
{
2064
	return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
2065 2066
}

2067
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
2068 2069
			 struct kvm_memory_slot *slot, gfn_t gfn, int level,
			 unsigned long data)
2070
{
2071
	u64 *sptep;
2072
	struct rmap_iterator uninitialized_var(iter);
2073 2074
	int young = 0;

2075 2076
	for_each_rmap_spte(rmap_head, &iter, sptep)
		young |= mmu_spte_age(sptep);
2077

2078
	trace_kvm_age_page(gfn, level, slot, young);
2079 2080 2081
	return young;
}

2082
static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
2083 2084
			      struct kvm_memory_slot *slot, gfn_t gfn,
			      int level, unsigned long data)
A
Andrea Arcangeli 已提交
2085
{
2086 2087
	u64 *sptep;
	struct rmap_iterator iter;
A
Andrea Arcangeli 已提交
2088

2089 2090 2091 2092
	for_each_rmap_spte(rmap_head, &iter, sptep)
		if (is_accessed_spte(*sptep))
			return 1;
	return 0;
A
Andrea Arcangeli 已提交
2093 2094
}

2095 2096
#define RMAP_RECYCLE_THRESHOLD 1000

2097
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
2098
{
2099
	struct kvm_rmap_head *rmap_head;
2100 2101 2102
	struct kvm_mmu_page *sp;

	sp = page_header(__pa(spte));
2103

2104
	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
2105

2106
	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
2107 2108
	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
			KVM_PAGES_PER_HPAGE(sp->role.level));
2109 2110
}

A
Andres Lagar-Cavilla 已提交
2111
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
2112
{
A
Andres Lagar-Cavilla 已提交
2113
	return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
2114 2115
}

A
Andrea Arcangeli 已提交
2116 2117 2118 2119 2120
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
	return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
}

2121
#ifdef MMU_DEBUG
2122
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
2123
{
2124 2125 2126
	u64 *pos;
	u64 *end;

2127
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
2128
		if (is_shadow_present_pte(*pos)) {
2129
			printk(KERN_ERR "%s: %p %llx\n", __func__,
2130
			       pos, *pos);
A
Avi Kivity 已提交
2131
			return 0;
2132
		}
A
Avi Kivity 已提交
2133 2134
	return 1;
}
2135
#endif
A
Avi Kivity 已提交
2136

2137 2138 2139 2140 2141 2142
/*
 * This value is the sum of all of the kvm instances's
 * kvm->arch.n_used_mmu_pages values.  We need a global,
 * aggregate version in order to make the slab shrinker
 * faster
 */
2143
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
2144 2145 2146 2147 2148
{
	kvm->arch.n_used_mmu_pages += nr;
	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}

2149
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
2150
{
2151
	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
2152
	hlist_del(&sp->hash_link);
2153 2154
	list_del(&sp->link);
	free_page((unsigned long)sp->spt);
2155 2156
	if (!sp->role.direct)
		free_page((unsigned long)sp->gfns);
2157
	kmem_cache_free(mmu_page_header_cache, sp);
2158 2159
}

2160 2161
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
2162
	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
2163 2164
}

2165
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
2166
				    struct kvm_mmu_page *sp, u64 *parent_pte)
2167 2168 2169 2170
{
	if (!parent_pte)
		return;

2171
	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
2172 2173
}

2174
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
2175 2176
				       u64 *parent_pte)
{
2177
	__pte_list_remove(parent_pte, &sp->parent_ptes);
2178 2179
}

2180 2181 2182 2183
static void drop_parent_pte(struct kvm_mmu_page *sp,
			    u64 *parent_pte)
{
	mmu_page_remove_parent_pte(sp, parent_pte);
2184
	mmu_spte_clear_no_track(parent_pte);
2185 2186
}

2187
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
M
Marcelo Tosatti 已提交
2188
{
2189
	struct kvm_mmu_page *sp;
2190

2191 2192
	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
2193
	if (!direct)
2194
		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
2195
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
2196 2197 2198 2199 2200 2201

	/*
	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
	 * depends on valid pages being added to the head of the list.  See
	 * comments in kvm_zap_obsolete_pages().
	 */
2202
	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
2203 2204 2205
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
	return sp;
M
Marcelo Tosatti 已提交
2206 2207
}

2208
static void mark_unsync(u64 *spte);
2209
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
2210
{
2211 2212 2213 2214 2215 2216
	u64 *sptep;
	struct rmap_iterator iter;

	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
		mark_unsync(sptep);
	}
2217 2218
}

2219
static void mark_unsync(u64 *spte)
2220
{
2221
	struct kvm_mmu_page *sp;
2222
	unsigned int index;
2223

2224
	sp = page_header(__pa(spte));
2225 2226
	index = spte - sp->spt;
	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
2227
		return;
2228
	if (sp->unsync_children++)
2229
		return;
2230
	kvm_mmu_mark_parents_unsync(sp);
2231 2232
}

2233
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
2234
			       struct kvm_mmu_page *sp)
2235
{
2236
	return 0;
2237 2238
}

2239
static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
M
Marcelo Tosatti 已提交
2240 2241 2242
{
}

2243 2244
static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
				 struct kvm_mmu_page *sp, u64 *spte,
2245
				 const void *pte)
2246 2247 2248 2249
{
	WARN_ON(1);
}

2250 2251 2252 2253 2254 2255 2256 2257 2258 2259
#define KVM_PAGE_ARRAY_NR 16

struct kvm_mmu_pages {
	struct mmu_page_and_offset {
		struct kvm_mmu_page *sp;
		unsigned int idx;
	} page[KVM_PAGE_ARRAY_NR];
	unsigned int nr;
};

2260 2261
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
			 int idx)
2262
{
2263
	int i;
2264

2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
	if (sp->unsync)
		for (i=0; i < pvec->nr; i++)
			if (pvec->page[i].sp == sp)
				return 0;

	pvec->page[pvec->nr].sp = sp;
	pvec->page[pvec->nr].idx = idx;
	pvec->nr++;
	return (pvec->nr == KVM_PAGE_ARRAY_NR);
}

2276 2277 2278 2279 2280 2281 2282
static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{
	--sp->unsync_children;
	WARN_ON((int)sp->unsync_children < 0);
	__clear_bit(idx, sp->unsync_child_bitmap);
}

2283 2284 2285 2286
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	int i, ret, nr_unsync_leaf = 0;
2287

2288
	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
2289
		struct kvm_mmu_page *child;
2290 2291
		u64 ent = sp->spt[i];

2292 2293 2294 2295
		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
			clear_unsync_child_bit(sp, i);
			continue;
		}
2296 2297 2298 2299 2300 2301 2302 2303

		child = page_header(ent & PT64_BASE_ADDR_MASK);

		if (child->unsync_children) {
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;

			ret = __mmu_unsync_walk(child, pvec);
2304 2305 2306 2307
			if (!ret) {
				clear_unsync_child_bit(sp, i);
				continue;
			} else if (ret > 0) {
2308
				nr_unsync_leaf += ret;
2309
			} else
2310 2311 2312 2313 2314 2315
				return ret;
		} else if (child->unsync) {
			nr_unsync_leaf++;
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;
		} else
2316
			clear_unsync_child_bit(sp, i);
2317 2318
	}

2319 2320 2321
	return nr_unsync_leaf;
}

2322 2323
#define INVALID_INDEX (-1)

2324 2325 2326
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
P
Paolo Bonzini 已提交
2327
	pvec->nr = 0;
2328 2329 2330
	if (!sp->unsync_children)
		return 0;

2331
	mmu_pages_add(pvec, sp, INVALID_INDEX);
2332
	return __mmu_unsync_walk(sp, pvec);
2333 2334 2335 2336 2337
}

static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	WARN_ON(!sp->unsync);
2338
	trace_kvm_mmu_sync_page(sp);
2339 2340 2341 2342
	sp->unsync = 0;
	--kvm->stat.mmu_unsync;
}

2343 2344
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list);
2345 2346
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list);
2347

2348

2349
#define for_each_valid_sp(_kvm, _sp, _gfn)				\
2350 2351
	hlist_for_each_entry(_sp,					\
	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
2352
		if (is_obsolete_sp((_kvm), (_sp))) {			\
2353
		} else
2354 2355

#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
2356 2357
	for_each_valid_sp(_kvm, _sp, _gfn)				\
		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
2358

2359 2360 2361 2362 2363
static inline bool is_ept_sp(struct kvm_mmu_page *sp)
{
	return sp->role.cr0_wp && sp->role.smap_andnot_wp;
}

2364
/* @sp->gfn should be write-protected at the call site */
2365 2366
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			    struct list_head *invalid_list)
2367
{
2368 2369
	if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
	    vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
2370
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2371
		return false;
2372 2373
	}

2374
	return true;
2375 2376
}

2377 2378 2379 2380
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
					struct list_head *invalid_list,
					bool remote_flush)
{
2381
	if (!remote_flush && list_empty(invalid_list))
2382 2383 2384 2385 2386 2387 2388 2389 2390
		return false;

	if (!list_empty(invalid_list))
		kvm_mmu_commit_zap_page(kvm, invalid_list);
	else
		kvm_flush_remote_tlbs(kvm);
	return true;
}

2391 2392 2393
static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
				 struct list_head *invalid_list,
				 bool remote_flush, bool local_flush)
2394
{
2395
	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
2396
		return;
2397

2398
	if (local_flush)
2399
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2400 2401
}

2402 2403 2404 2405 2406 2407 2408
#ifdef CONFIG_KVM_MMU_AUDIT
#include "mmu_audit.c"
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
#endif

2409 2410
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
2411 2412
	return sp->role.invalid ||
	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2413 2414
}

2415
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2416
			 struct list_head *invalid_list)
2417
{
2418 2419
	kvm_unlink_unsync_page(vcpu->kvm, sp);
	return __kvm_sync_page(vcpu, sp, invalid_list);
2420 2421
}

2422
/* @gfn should be write-protected at the call site */
2423 2424
static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
			   struct list_head *invalid_list)
2425 2426
{
	struct kvm_mmu_page *s;
2427
	bool ret = false;
2428

2429
	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2430
		if (!s->unsync)
2431 2432 2433
			continue;

		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
2434
		ret |= kvm_sync_page(vcpu, s, invalid_list);
2435 2436
	}

2437
	return ret;
2438 2439
}

2440
struct mmu_page_path {
2441 2442
	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
	unsigned int idx[PT64_ROOT_MAX_LEVEL];
2443 2444
};

2445
#define for_each_sp(pvec, sp, parents, i)			\
P
Paolo Bonzini 已提交
2446
		for (i = mmu_pages_first(&pvec, &parents);	\
2447 2448 2449
			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
			i = mmu_pages_next(&pvec, &parents, i))

2450 2451 2452
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
			  struct mmu_page_path *parents,
			  int i)
2453 2454 2455 2456 2457
{
	int n;

	for (n = i+1; n < pvec->nr; n++) {
		struct kvm_mmu_page *sp = pvec->page[n].sp;
P
Paolo Bonzini 已提交
2458 2459
		unsigned idx = pvec->page[n].idx;
		int level = sp->role.level;
2460

P
Paolo Bonzini 已提交
2461 2462 2463
		parents->idx[level-1] = idx;
		if (level == PT_PAGE_TABLE_LEVEL)
			break;
2464

P
Paolo Bonzini 已提交
2465
		parents->parent[level-2] = sp;
2466 2467 2468 2469 2470
	}

	return n;
}

P
Paolo Bonzini 已提交
2471 2472 2473 2474 2475 2476 2477 2478 2479
static int mmu_pages_first(struct kvm_mmu_pages *pvec,
			   struct mmu_page_path *parents)
{
	struct kvm_mmu_page *sp;
	int level;

	if (pvec->nr == 0)
		return 0;

2480 2481
	WARN_ON(pvec->page[0].idx != INVALID_INDEX);

P
Paolo Bonzini 已提交
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494
	sp = pvec->page[0].sp;
	level = sp->role.level;
	WARN_ON(level == PT_PAGE_TABLE_LEVEL);

	parents->parent[level-2] = sp;

	/* Also set up a sentinel.  Further entries in pvec are all
	 * children of sp, so this element is never overwritten.
	 */
	parents->parent[level-1] = NULL;
	return mmu_pages_next(pvec, parents, 0);
}

2495
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2496
{
2497 2498 2499 2500 2501 2502 2503 2504 2505
	struct kvm_mmu_page *sp;
	unsigned int level = 0;

	do {
		unsigned int idx = parents->idx[level];
		sp = parents->parent[level];
		if (!sp)
			return;

2506
		WARN_ON(idx == INVALID_INDEX);
2507
		clear_unsync_child_bit(sp, idx);
2508
		level++;
P
Paolo Bonzini 已提交
2509
	} while (!sp->unsync_children);
2510
}
2511

2512 2513 2514 2515 2516 2517 2518
static void mmu_sync_children(struct kvm_vcpu *vcpu,
			      struct kvm_mmu_page *parent)
{
	int i;
	struct kvm_mmu_page *sp;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2519
	LIST_HEAD(invalid_list);
2520
	bool flush = false;
2521 2522

	while (mmu_unsync_walk(parent, &pages)) {
2523
		bool protected = false;
2524 2525

		for_each_sp(pages, sp, parents, i)
2526
			protected |= rmap_write_protect(vcpu, sp->gfn);
2527

2528
		if (protected) {
2529
			kvm_flush_remote_tlbs(vcpu->kvm);
2530 2531
			flush = false;
		}
2532

2533
		for_each_sp(pages, sp, parents, i) {
2534
			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2535 2536
			mmu_pages_clear_parents(&parents);
		}
2537 2538 2539 2540 2541
		if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
			cond_resched_lock(&vcpu->kvm->mmu_lock);
			flush = false;
		}
2542
	}
2543 2544

	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2545 2546
}

2547 2548
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
2549
	atomic_set(&sp->write_flooding_count,  0);
2550 2551 2552 2553 2554 2555 2556 2557 2558
}

static void clear_sp_write_flooding_count(u64 *spte)
{
	struct kvm_mmu_page *sp =  page_header(__pa(spte));

	__clear_sp_write_flooding_count(sp);
}

2559 2560 2561 2562
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
2563
					     int direct,
2564
					     unsigned access)
2565 2566 2567
{
	union kvm_mmu_page_role role;
	unsigned quadrant;
2568 2569
	struct kvm_mmu_page *sp;
	bool need_sync = false;
2570
	bool flush = false;
2571
	int collisions = 0;
2572
	LIST_HEAD(invalid_list);
2573

2574
	role = vcpu->arch.mmu->mmu_role.base;
2575
	role.level = level;
2576
	role.direct = direct;
2577
	if (role.direct)
2578
		role.gpte_is_8_bytes = true;
2579
	role.access = access;
2580 2581
	if (!vcpu->arch.mmu->direct_map
	    && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2582 2583 2584 2585
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
2586 2587 2588 2589 2590 2591
	for_each_valid_sp(vcpu->kvm, sp, gfn) {
		if (sp->gfn != gfn) {
			collisions++;
			continue;
		}

2592 2593
		if (!need_sync && sp->unsync)
			need_sync = true;
2594

2595 2596
		if (sp->role.word != role.word)
			continue;
2597

2598 2599 2600 2601 2602 2603 2604 2605 2606 2607
		if (sp->unsync) {
			/* The page is good, but __kvm_sync_page might still end
			 * up zapping it.  If so, break in order to rebuild it.
			 */
			if (!__kvm_sync_page(vcpu, sp, &invalid_list))
				break;

			WARN_ON(!list_empty(&invalid_list));
			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
		}
2608

2609
		if (sp->unsync_children)
2610
			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2611

2612
		__clear_sp_write_flooding_count(sp);
2613
		trace_kvm_mmu_get_page(sp, false);
2614
		goto out;
2615
	}
2616

A
Avi Kivity 已提交
2617
	++vcpu->kvm->stat.mmu_cache_miss;
2618 2619 2620

	sp = kvm_mmu_alloc_page(vcpu, direct);

2621 2622
	sp->gfn = gfn;
	sp->role = role;
2623 2624
	hlist_add_head(&sp->hash_link,
		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
2625
	if (!direct) {
2626 2627 2628 2629 2630 2631 2632 2633
		/*
		 * we should do write protection before syncing pages
		 * otherwise the content of the synced shadow page may
		 * be inconsistent with guest page table.
		 */
		account_shadowed(vcpu->kvm, sp);
		if (level == PT_PAGE_TABLE_LEVEL &&
		      rmap_write_protect(vcpu, gfn))
2634
			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2635 2636

		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
2637
			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2638
	}
2639
	clear_page(sp->spt);
A
Avi Kivity 已提交
2640
	trace_kvm_mmu_get_page(sp, true);
2641 2642

	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2643 2644 2645
out:
	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2646
	return sp;
2647 2648
}

2649 2650 2651
static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
					struct kvm_vcpu *vcpu, hpa_t root,
					u64 addr)
2652 2653
{
	iterator->addr = addr;
2654
	iterator->shadow_addr = root;
2655
	iterator->level = vcpu->arch.mmu->shadow_root_level;
2656

2657
	if (iterator->level == PT64_ROOT_4LEVEL &&
2658 2659
	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
	    !vcpu->arch.mmu->direct_map)
2660 2661
		--iterator->level;

2662
	if (iterator->level == PT32E_ROOT_LEVEL) {
2663 2664 2665 2666
		/*
		 * prev_root is currently only used for 64-bit hosts. So only
		 * the active root_hpa is valid here.
		 */
2667
		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2668

2669
		iterator->shadow_addr
2670
			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2671 2672 2673 2674 2675 2676 2677
		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
		--iterator->level;
		if (!iterator->shadow_addr)
			iterator->level = 0;
	}
}

2678 2679 2680
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
			     struct kvm_vcpu *vcpu, u64 addr)
{
2681
	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2682 2683 2684
				    addr);
}

2685 2686 2687 2688
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
	if (iterator->level < PT_PAGE_TABLE_LEVEL)
		return false;
2689

2690 2691 2692 2693 2694
	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
	return true;
}

2695 2696
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
			       u64 spte)
2697
{
2698
	if (is_last_spte(spte, iterator->level)) {
2699 2700 2701 2702
		iterator->level = 0;
		return;
	}

2703
	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2704 2705 2706
	--iterator->level;
}

2707 2708
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
2709
	__shadow_walk_next(iterator, *iterator->sptep);
2710 2711
}

2712 2713
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
			     struct kvm_mmu_page *sp)
2714 2715 2716
{
	u64 spte;

2717
	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2718

2719
	spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
2720
	       shadow_user_mask | shadow_x_mask | shadow_me_mask;
2721 2722

	if (sp_ad_disabled(sp))
2723
		spte |= SPTE_AD_DISABLED_MASK;
2724 2725
	else
		spte |= shadow_accessed_mask;
X
Xiao Guangrong 已提交
2726

2727
	mmu_spte_set(sptep, spte);
2728 2729 2730 2731 2732

	mmu_page_add_parent_pte(vcpu, sp, sptep);

	if (sp->unsync_children || sp->unsync)
		mark_unsync(sptep);
2733 2734
}

2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
				   unsigned direct_access)
{
	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
		struct kvm_mmu_page *child;

		/*
		 * For the direct sp, if the guest pte's dirty bit
		 * changed form clean to dirty, it will corrupt the
		 * sp's access: allow writable in the read-only sp,
		 * so we should update the spte at this point to get
		 * a new sp with the correct access.
		 */
		child = page_header(*sptep & PT64_BASE_ADDR_MASK);
		if (child->role.access == direct_access)
			return;

2752
		drop_parent_pte(child, sptep);
2753
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2754 2755 2756
	}
}

X
Xiao Guangrong 已提交
2757
static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2758 2759 2760 2761 2762 2763 2764
			     u64 *spte)
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
	if (is_shadow_present_pte(pte)) {
X
Xiao Guangrong 已提交
2765
		if (is_last_spte(pte, sp->role.level)) {
2766
			drop_spte(kvm, spte);
X
Xiao Guangrong 已提交
2767 2768 2769
			if (is_large_pte(pte))
				--kvm->stat.lpages;
		} else {
2770
			child = page_header(pte & PT64_BASE_ADDR_MASK);
2771
			drop_parent_pte(child, spte);
2772
		}
X
Xiao Guangrong 已提交
2773 2774 2775 2776
		return true;
	}

	if (is_mmio_spte(pte))
2777
		mmu_spte_clear_no_track(spte);
2778

X
Xiao Guangrong 已提交
2779
	return false;
2780 2781
}

2782
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
2783
					 struct kvm_mmu_page *sp)
2784
{
2785 2786
	unsigned i;

2787 2788
	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		mmu_page_zap_pte(kvm, sp, sp->spt + i);
2789 2790
}

2791
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2792
{
2793 2794
	u64 *sptep;
	struct rmap_iterator iter;
2795

2796
	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2797
		drop_parent_pte(sp, sptep);
2798 2799
}

2800
static int mmu_zap_unsync_children(struct kvm *kvm,
2801 2802
				   struct kvm_mmu_page *parent,
				   struct list_head *invalid_list)
2803
{
2804 2805 2806
	int i, zapped = 0;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2807

2808
	if (parent->role.level == PT_PAGE_TABLE_LEVEL)
2809
		return 0;
2810 2811 2812 2813 2814

	while (mmu_unsync_walk(parent, &pages)) {
		struct kvm_mmu_page *sp;

		for_each_sp(pages, sp, parents, i) {
2815
			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2816
			mmu_pages_clear_parents(&parents);
2817
			zapped++;
2818 2819 2820 2821
		}
	}

	return zapped;
2822 2823
}

2824 2825 2826 2827
static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
				       struct kvm_mmu_page *sp,
				       struct list_head *invalid_list,
				       int *nr_zapped)
2828
{
2829
	bool list_unstable;
A
Avi Kivity 已提交
2830

2831
	trace_kvm_mmu_prepare_zap_page(sp);
2832
	++kvm->stat.mmu_shadow_zapped;
2833
	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2834
	kvm_mmu_page_unlink_children(kvm, sp);
2835
	kvm_mmu_unlink_parents(kvm, sp);
2836

2837 2838 2839
	/* Zapping children means active_mmu_pages has become unstable. */
	list_unstable = *nr_zapped;

2840
	if (!sp->role.invalid && !sp->role.direct)
2841
		unaccount_shadowed(kvm, sp);
2842

2843 2844
	if (sp->unsync)
		kvm_unlink_unsync_page(kvm, sp);
2845
	if (!sp->root_count) {
2846
		/* Count self */
2847
		(*nr_zapped)++;
2848
		list_move(&sp->link, invalid_list);
2849
		kvm_mod_used_mmu_pages(kvm, -1);
2850
	} else {
A
Avi Kivity 已提交
2851
		list_move(&sp->link, &kvm->arch.active_mmu_pages);
2852

2853 2854 2855 2856 2857 2858
		/*
		 * Obsolete pages cannot be used on any vCPUs, see the comment
		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
		 * treats invalid shadow pages as being obsolete.
		 */
		if (!is_obsolete_sp(kvm, sp))
2859
			kvm_reload_remote_mmus(kvm);
2860
	}
2861

P
Paolo Bonzini 已提交
2862 2863 2864
	if (sp->lpage_disallowed)
		unaccount_huge_nx_page(kvm, sp);

2865
	sp->role.invalid = 1;
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
	return list_unstable;
}

static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list)
{
	int nr_zapped;

	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
	return nr_zapped;
2876 2877
}

2878 2879 2880
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list)
{
2881
	struct kvm_mmu_page *sp, *nsp;
2882 2883 2884 2885

	if (list_empty(invalid_list))
		return;

2886
	/*
2887 2888 2889 2890 2891 2892 2893
	 * We need to make sure everyone sees our modifications to
	 * the page tables and see changes to vcpu->mode here. The barrier
	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
	 *
	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
	 * guest mode and/or lockless shadow page table walks.
2894 2895
	 */
	kvm_flush_remote_tlbs(kvm);
2896

2897
	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2898
		WARN_ON(!sp->role.invalid || sp->root_count);
2899
		kvm_mmu_free_page(sp);
2900
	}
2901 2902
}

2903 2904 2905 2906 2907 2908 2909 2910
static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
					struct list_head *invalid_list)
{
	struct kvm_mmu_page *sp;

	if (list_empty(&kvm->arch.active_mmu_pages))
		return false;

G
Geliang Tang 已提交
2911 2912
	sp = list_last_entry(&kvm->arch.active_mmu_pages,
			     struct kvm_mmu_page, link);
2913
	return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2914 2915
}

2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
{
	LIST_HEAD(invalid_list);

	if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
		return 0;

	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
		if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
			break;

		++vcpu->kvm->stat.mmu_recycled;
	}
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);

	if (!kvm_mmu_available_pages(vcpu->kvm))
		return -ENOSPC;
	return 0;
}

2936 2937
/*
 * Changing the number of mmu pages allocated to the vm
2938
 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2939
 */
2940
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2941
{
2942
	LIST_HEAD(invalid_list);
2943

2944 2945
	spin_lock(&kvm->mmu_lock);

2946
	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2947 2948 2949 2950
		/* Need to free some mmu pages to achieve the goal. */
		while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
			if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
				break;
2951

2952
		kvm_mmu_commit_zap_page(kvm, &invalid_list);
2953
		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2954 2955
	}

2956
	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2957 2958

	spin_unlock(&kvm->mmu_lock);
2959 2960
}

2961
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2962
{
2963
	struct kvm_mmu_page *sp;
2964
	LIST_HEAD(invalid_list);
2965 2966
	int r;

2967
	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2968
	r = 0;
2969
	spin_lock(&kvm->mmu_lock);
2970
	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2971
		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2972 2973
			 sp->role.word);
		r = 1;
2974
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2975
	}
2976
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2977 2978
	spin_unlock(&kvm->mmu_lock);

2979
	return r;
2980
}
2981
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
2982

2983
static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2984 2985 2986 2987 2988 2989 2990 2991
{
	trace_kvm_mmu_unsync_page(sp);
	++vcpu->kvm->stat.mmu_unsync;
	sp->unsync = 1;

	kvm_mmu_mark_parents_unsync(sp);
}

2992 2993
static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
				   bool can_unsync)
2994
{
2995
	struct kvm_mmu_page *sp;
2996

2997 2998
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
		return true;
2999

3000
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
3001
		if (!can_unsync)
3002
			return true;
3003

3004 3005
		if (sp->unsync)
			continue;
3006

3007 3008
		WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
		kvm_unsync_page(vcpu, sp);
3009
	}
3010

3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049
	/*
	 * We need to ensure that the marking of unsync pages is visible
	 * before the SPTE is updated to allow writes because
	 * kvm_mmu_sync_roots() checks the unsync flags without holding
	 * the MMU lock and so can race with this. If the SPTE was updated
	 * before the page had been marked as unsync-ed, something like the
	 * following could happen:
	 *
	 * CPU 1                    CPU 2
	 * ---------------------------------------------------------------------
	 * 1.2 Host updates SPTE
	 *     to be writable
	 *                      2.1 Guest writes a GPTE for GVA X.
	 *                          (GPTE being in the guest page table shadowed
	 *                           by the SP from CPU 1.)
	 *                          This reads SPTE during the page table walk.
	 *                          Since SPTE.W is read as 1, there is no
	 *                          fault.
	 *
	 *                      2.2 Guest issues TLB flush.
	 *                          That causes a VM Exit.
	 *
	 *                      2.3 kvm_mmu_sync_pages() reads sp->unsync.
	 *                          Since it is false, so it just returns.
	 *
	 *                      2.4 Guest accesses GVA X.
	 *                          Since the mapping in the SP was not updated,
	 *                          so the old mapping for GVA X incorrectly
	 *                          gets used.
	 * 1.1 Host marks SP
	 *     as unsync
	 *     (sp->unsync = true)
	 *
	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
	 * pairs with this write barrier.
	 */
	smp_wmb();

3050
	return false;
3051 3052
}

D
Dan Williams 已提交
3053
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
3054 3055
{
	if (pfn_valid(pfn))
3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
		return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
			/*
			 * Some reserved pages, such as those from NVDIMM
			 * DAX devices, are not for MMIO, and can be mapped
			 * with cached memory type for better performance.
			 * However, the above check misconceives those pages
			 * as MMIO, and results in KVM mapping them with UC
			 * memory type, which would hurt the performance.
			 * Therefore, we check the host memory type in addition
			 * and only treat UC/UC-/WC pages as MMIO.
			 */
			(!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
3068

3069 3070 3071
	return !e820__mapped_raw_any(pfn_to_hpa(pfn),
				     pfn_to_hpa(pfn + 1) - 1,
				     E820_TYPE_RAM);
3072 3073
}

3074 3075 3076 3077
/* Bits which may be returned by set_spte() */
#define SET_SPTE_WRITE_PROTECTED_PT	BIT(0)
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH	BIT(1)

A
Avi Kivity 已提交
3078
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
3079
		    unsigned pte_access, int level,
D
Dan Williams 已提交
3080
		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
3081
		    bool can_unsync, bool host_writable)
3082
{
3083
	u64 spte = 0;
M
Marcelo Tosatti 已提交
3084
	int ret = 0;
3085
	struct kvm_mmu_page *sp;
S
Sheng Yang 已提交
3086

3087
	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
3088 3089
		return 0;

3090 3091
	sp = page_header(__pa(sptep));
	if (sp_ad_disabled(sp))
3092
		spte |= SPTE_AD_DISABLED_MASK;
3093 3094
	else if (kvm_vcpu_ad_need_write_protect(vcpu))
		spte |= SPTE_AD_WRPROT_ONLY_MASK;
3095

3096 3097 3098 3099 3100 3101
	/*
	 * For the EPT case, shadow_present_mask is 0 if hardware
	 * supports exec-only page table entries.  In that case,
	 * ACC_USER_MASK and shadow_user_mask are used to represent
	 * read access.  See FNAME(gpte_access) in paging_tmpl.h.
	 */
3102
	spte |= shadow_present_mask;
3103
	if (!speculative)
3104
		spte |= spte_shadow_accessed_mask(spte);
3105

P
Paolo Bonzini 已提交
3106 3107 3108 3109 3110
	if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) &&
	    is_nx_huge_page_enabled()) {
		pte_access &= ~ACC_EXEC_MASK;
	}

S
Sheng Yang 已提交
3111 3112 3113 3114
	if (pte_access & ACC_EXEC_MASK)
		spte |= shadow_x_mask;
	else
		spte |= shadow_nx_mask;
3115

3116
	if (pte_access & ACC_USER_MASK)
S
Sheng Yang 已提交
3117
		spte |= shadow_user_mask;
3118

3119
	if (level > PT_PAGE_TABLE_LEVEL)
M
Marcelo Tosatti 已提交
3120
		spte |= PT_PAGE_SIZE_MASK;
3121
	if (tdp_enabled)
3122
		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
3123
			kvm_is_mmio_pfn(pfn));
3124

3125
	if (host_writable)
3126
		spte |= SPTE_HOST_WRITEABLE;
3127 3128
	else
		pte_access &= ~ACC_WRITE_MASK;
3129

3130 3131 3132
	if (!kvm_is_mmio_pfn(pfn))
		spte |= shadow_me_mask;

3133
	spte |= (u64)pfn << PAGE_SHIFT;
3134

3135
	if (pte_access & ACC_WRITE_MASK) {
3136

X
Xiao Guangrong 已提交
3137
		/*
3138 3139 3140 3141
		 * Other vcpu creates new sp in the window between
		 * mapping_level() and acquiring mmu-lock. We can
		 * allow guest to retry the access, the mapping can
		 * be fixed if guest refault.
X
Xiao Guangrong 已提交
3142
		 */
3143
		if (level > PT_PAGE_TABLE_LEVEL &&
3144
		    mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
A
Avi Kivity 已提交
3145
			goto done;
3146

3147
		spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
3148

3149 3150 3151 3152 3153 3154
		/*
		 * Optimization: for pte sync, if spte was writable the hash
		 * lookup is unnecessary (and expensive). Write protection
		 * is responsibility of mmu_get_page / kvm_sync_page.
		 * Same reasoning can be applied to dirty page accounting.
		 */
3155
		if (!can_unsync && is_writable_pte(*sptep))
3156 3157
			goto set_pte;

3158
		if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
3159
			pgprintk("%s: found shadow page for %llx, marking ro\n",
3160
				 __func__, gfn);
3161
			ret |= SET_SPTE_WRITE_PROTECTED_PT;
3162
			pte_access &= ~ACC_WRITE_MASK;
3163
			spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
3164 3165 3166
		}
	}

3167
	if (pte_access & ACC_WRITE_MASK) {
3168
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
3169
		spte |= spte_shadow_dirty_mask(spte);
3170
	}
3171

3172 3173 3174
	if (speculative)
		spte = mark_spte_for_access_track(spte);

3175
set_pte:
3176
	if (mmu_spte_update(sptep, spte))
3177
		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
A
Avi Kivity 已提交
3178
done:
M
Marcelo Tosatti 已提交
3179 3180 3181
	return ret;
}

3182 3183 3184
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
			int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
		       	bool speculative, bool host_writable)
M
Marcelo Tosatti 已提交
3185 3186
{
	int was_rmapped = 0;
3187
	int rmap_count;
3188
	int set_spte_ret;
3189
	int ret = RET_PF_RETRY;
3190
	bool flush = false;
M
Marcelo Tosatti 已提交
3191

3192 3193
	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
		 *sptep, write_fault, gfn);
M
Marcelo Tosatti 已提交
3194

3195
	if (is_shadow_present_pte(*sptep)) {
M
Marcelo Tosatti 已提交
3196 3197 3198 3199
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
3200 3201
		if (level > PT_PAGE_TABLE_LEVEL &&
		    !is_large_pte(*sptep)) {
M
Marcelo Tosatti 已提交
3202
			struct kvm_mmu_page *child;
A
Avi Kivity 已提交
3203
			u64 pte = *sptep;
M
Marcelo Tosatti 已提交
3204 3205

			child = page_header(pte & PT64_BASE_ADDR_MASK);
3206
			drop_parent_pte(child, sptep);
3207
			flush = true;
A
Avi Kivity 已提交
3208
		} else if (pfn != spte_to_pfn(*sptep)) {
3209
			pgprintk("hfn old %llx new %llx\n",
A
Avi Kivity 已提交
3210
				 spte_to_pfn(*sptep), pfn);
3211
			drop_spte(vcpu->kvm, sptep);
3212
			flush = true;
3213 3214
		} else
			was_rmapped = 1;
M
Marcelo Tosatti 已提交
3215
	}
3216

3217 3218 3219
	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
				speculative, true, host_writable);
	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
M
Marcelo Tosatti 已提交
3220
		if (write_fault)
3221
			ret = RET_PF_EMULATE;
3222
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3223
	}
3224

3225
	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
3226 3227
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
				KVM_PAGES_PER_HPAGE(level));
M
Marcelo Tosatti 已提交
3228

3229
	if (unlikely(is_mmio_spte(*sptep)))
3230
		ret = RET_PF_EMULATE;
3231

A
Avi Kivity 已提交
3232
	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
3233
	trace_kvm_mmu_set_spte(level, gfn, sptep);
A
Avi Kivity 已提交
3234
	if (!was_rmapped && is_large_pte(*sptep))
M
Marcelo Tosatti 已提交
3235 3236
		++vcpu->kvm->stat.lpages;

3237 3238 3239 3240 3241 3242
	if (is_shadow_present_pte(*sptep)) {
		if (!was_rmapped) {
			rmap_count = rmap_add(vcpu, sptep, gfn);
			if (rmap_count > RMAP_RECYCLE_THRESHOLD)
				rmap_recycle(vcpu, sptep, gfn);
		}
3243
	}
3244

3245
	return ret;
3246 3247
}

D
Dan Williams 已提交
3248
static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
3249 3250 3251 3252
				     bool no_dirty_log)
{
	struct kvm_memory_slot *slot;

3253
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
3254
	if (!slot)
3255
		return KVM_PFN_ERR_FAULT;
3256

3257
	return gfn_to_pfn_memslot_atomic(slot, gfn);
3258 3259 3260 3261 3262 3263 3264
}

static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp,
				    u64 *start, u64 *end)
{
	struct page *pages[PTE_PREFETCH_NUM];
3265
	struct kvm_memory_slot *slot;
3266 3267 3268 3269 3270
	unsigned access = sp->role.access;
	int i, ret;
	gfn_t gfn;

	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
3271 3272
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
	if (!slot)
3273 3274
		return -1;

3275
	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
3276 3277 3278
	if (ret <= 0)
		return -1;

3279
	for (i = 0; i < ret; i++, gfn++, start++) {
3280 3281
		mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
			     page_to_pfn(pages[i]), true, true);
3282 3283
		put_page(pages[i]);
	}
3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299

	return 0;
}

static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *sptep)
{
	u64 *spte, *start = NULL;
	int i;

	WARN_ON(!sp->role.direct);

	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
	spte = sp->spt + i;

	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
3300
		if (is_shadow_present_pte(*spte) || spte == sptep) {
3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314
			if (!start)
				continue;
			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
				break;
			start = NULL;
		} else if (!start)
			start = spte;
	}
}

static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{
	struct kvm_mmu_page *sp;

3315 3316
	sp = page_header(__pa(sptep));

3317
	/*
3318 3319 3320
	 * Without accessed bits, there's no way to distinguish between
	 * actually accessed translations and prefetched, so disable pte
	 * prefetch if accessed bits aren't available.
3321
	 */
3322
	if (sp_ad_disabled(sp))
3323 3324 3325 3326 3327 3328 3329 3330
		return;

	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return;

	__direct_pte_prefetch(vcpu, sp, sptep);
}

3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
					gfn_t gfn, kvm_pfn_t *pfnp,
					int *levelp)
{
	kvm_pfn_t pfn = *pfnp;
	int level = *levelp;

	/*
	 * Check if it's a transparent hugepage. If this would be an
	 * hugetlbfs page, level wouldn't be set to
	 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
	 * here.
	 */
	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
	    !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
	    PageTransCompoundMap(pfn_to_page(pfn))) {
		unsigned long mask;
3348

3349
		/*
3350 3351
		 * mmu_notifier_retry() was successful and mmu_lock is held, so
		 * the pmd can't be split from under us.
3352 3353 3354 3355
		 */
		*levelp = level = PT_DIRECTORY_LEVEL;
		mask = KVM_PAGES_PER_HPAGE(level) - 1;
		VM_BUG_ON((gfn & mask) != (pfn & mask));
3356
		*pfnp = pfn & ~mask;
3357 3358 3359
	}
}

P
Paolo Bonzini 已提交
3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382
static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
				       gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
{
	int level = *levelp;
	u64 spte = *it.sptep;

	if (it.level == level && level > PT_PAGE_TABLE_LEVEL &&
	    is_nx_huge_page_enabled() &&
	    is_shadow_present_pte(spte) &&
	    !is_large_pte(spte)) {
		/*
		 * A small SPTE exists for this pfn, but FNAME(fetch)
		 * and __direct_map would like to create a large PTE
		 * instead: just force them to go down another level,
		 * patching back for them into pfn the next 9 bits of
		 * the address.
		 */
		u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1);
		*pfnp |= gfn & page_mask;
		(*levelp)--;
	}
}

3383
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
3384 3385 3386
			int map_writable, int level, int max_level,
			kvm_pfn_t pfn, bool prefault,
			bool account_disallowed_nx_lpage)
3387
{
3388
	struct kvm_shadow_walk_iterator it;
3389
	struct kvm_mmu_page *sp;
3390 3391 3392
	int ret;
	gfn_t gfn = gpa >> PAGE_SHIFT;
	gfn_t base_gfn = gfn;
A
Avi Kivity 已提交
3393

3394
	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
3395
		return RET_PF_RETRY;
3396

3397 3398 3399
	if (likely(max_level > PT_PAGE_TABLE_LEVEL))
		transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);

3400
	trace_kvm_mmu_spte_requested(gpa, level, pfn);
3401
	for_each_shadow_entry(vcpu, gpa, it) {
P
Paolo Bonzini 已提交
3402 3403 3404 3405 3406 3407
		/*
		 * We cannot overwrite existing page tables with an NX
		 * large page, as the leaf could be executable.
		 */
		disallowed_hugepage_adjust(it, gfn, &pfn, &level);

3408 3409
		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
		if (it.level == level)
3410
			break;
A
Avi Kivity 已提交
3411

3412 3413 3414 3415
		drop_large_spte(vcpu, it.sptep);
		if (!is_shadow_present_pte(*it.sptep)) {
			sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
					      it.level - 1, true, ACC_ALL);
3416

3417
			link_shadow_page(vcpu, it.sptep, sp);
3418
			if (account_disallowed_nx_lpage)
P
Paolo Bonzini 已提交
3419
				account_huge_nx_page(vcpu->kvm, sp);
3420 3421
		}
	}
3422 3423 3424 3425 3426 3427 3428

	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
			   write, level, base_gfn, pfn, prefault,
			   map_writable);
	direct_pte_prefetch(vcpu, it.sptep);
	++vcpu->stat.pf_fixed;
	return ret;
A
Avi Kivity 已提交
3429 3430
}

H
Huang Ying 已提交
3431
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
3432
{
3433
	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
3434 3435
}

D
Dan Williams 已提交
3436
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
3437
{
X
Xiao Guangrong 已提交
3438 3439 3440 3441 3442 3443
	/*
	 * Do not cache the mmio info caused by writing the readonly gfn
	 * into the spte otherwise read access on readonly gfn also can
	 * caused mmio page fault and treat it as mmio access.
	 */
	if (pfn == KVM_PFN_ERR_RO_FAULT)
3444
		return RET_PF_EMULATE;
X
Xiao Guangrong 已提交
3445

3446
	if (pfn == KVM_PFN_ERR_HWPOISON) {
3447
		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3448
		return RET_PF_RETRY;
3449
	}
3450

3451
	return -EFAULT;
3452 3453
}

3454
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
D
Dan Williams 已提交
3455
				kvm_pfn_t pfn, unsigned access, int *ret_val)
3456 3457
{
	/* The pfn is invalid, report the error! */
3458
	if (unlikely(is_error_pfn(pfn))) {
3459
		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
3460
		return true;
3461 3462
	}

3463
	if (unlikely(is_noslot_pfn(pfn)))
3464 3465
		vcpu_cache_mmio_info(vcpu, gva, gfn,
				     access & shadow_mmio_access_mask);
3466

3467
	return false;
3468 3469
}

3470
static bool page_fault_can_be_fast(u32 error_code)
3471
{
3472 3473 3474 3475 3476 3477 3478
	/*
	 * Do not fix the mmio spte with invalid generation number which
	 * need to be updated by slow page fault path.
	 */
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return false;

3479 3480 3481 3482 3483
	/* See if the page fault is due to an NX violation */
	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
		return false;

3484
	/*
3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495
	 * #PF can be fast if:
	 * 1. The shadow page table entry is not present, which could mean that
	 *    the fault is potentially caused by access tracking (if enabled).
	 * 2. The shadow page table entry is present and the fault
	 *    is caused by write-protect, that means we just need change the W
	 *    bit of the spte which can be done out of mmu-lock.
	 *
	 * However, if access tracking is disabled we know that a non-present
	 * page must be a genuine page fault where we have to create a new SPTE.
	 * So, if access tracking is disabled, we return true only for write
	 * accesses to a present page.
3496 3497
	 */

3498 3499 3500
	return shadow_acc_track_mask != 0 ||
	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3501 3502
}

3503 3504 3505 3506
/*
 * Returns true if the SPTE was fixed successfully. Otherwise,
 * someone else modified the SPTE from its original value.
 */
3507
static bool
3508
fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3509
			u64 *sptep, u64 old_spte, u64 new_spte)
3510 3511 3512 3513 3514
{
	gfn_t gfn;

	WARN_ON(!sp->role.direct);

3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526
	/*
	 * Theoretically we could also set dirty bit (and flush TLB) here in
	 * order to eliminate unnecessary PML logging. See comments in
	 * set_spte. But fast_page_fault is very unlikely to happen with PML
	 * enabled, so we do not do this. This might result in the same GPA
	 * to be logged in PML buffer again when the write really happens, and
	 * eventually to be called by mark_page_dirty twice. But it's also no
	 * harm. This also avoids the TLB flush needed after setting dirty bit
	 * so non-PML cases won't be impacted.
	 *
	 * Compare with set_spte where instead shadow_dirty_mask is set.
	 */
3527
	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3528 3529
		return false;

3530
	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3531 3532 3533 3534 3535 3536 3537
		/*
		 * The gfn of direct spte is stable since it is
		 * calculated by sp->gfn.
		 */
		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
	}
3538 3539 3540 3541

	return true;
}

3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553
static bool is_access_allowed(u32 fault_err_code, u64 spte)
{
	if (fault_err_code & PFERR_FETCH_MASK)
		return is_executable_pte(spte);

	if (fault_err_code & PFERR_WRITE_MASK)
		return is_writable_pte(spte);

	/* Fault was on Read access */
	return spte & PT_PRESENT_MASK;
}

3554 3555 3556 3557 3558
/*
 * Return value:
 * - true: let the vcpu to access on the same address again.
 * - false: let the real page fault path to fix it.
 */
3559
static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int level,
3560 3561 3562
			    u32 error_code)
{
	struct kvm_shadow_walk_iterator iterator;
3563
	struct kvm_mmu_page *sp;
3564
	bool fault_handled = false;
3565
	u64 spte = 0ull;
3566
	uint retry_count = 0;
3567

3568
	if (!page_fault_can_be_fast(error_code))
3569 3570 3571 3572
		return false;

	walk_shadow_page_lockless_begin(vcpu);

3573
	do {
3574
		u64 new_spte;
3575

3576
		for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3577 3578 3579 3580
			if (!is_shadow_present_pte(spte) ||
			    iterator.level < level)
				break;

3581 3582 3583
		sp = page_header(__pa(iterator.sptep));
		if (!is_last_spte(spte, sp->role.level))
			break;
3584

3585
		/*
3586 3587 3588 3589 3590
		 * Check whether the memory access that caused the fault would
		 * still cause it if it were to be performed right now. If not,
		 * then this is a spurious fault caused by TLB lazily flushed,
		 * or some other CPU has already fixed the PTE after the
		 * current CPU took the fault.
3591 3592 3593 3594
		 *
		 * Need not check the access of upper level table entries since
		 * they are always ACC_ALL.
		 */
3595 3596 3597 3598
		if (is_access_allowed(error_code, spte)) {
			fault_handled = true;
			break;
		}
3599

3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613
		new_spte = spte;

		if (is_access_track_spte(spte))
			new_spte = restore_acc_track_spte(new_spte);

		/*
		 * Currently, to simplify the code, write-protection can
		 * be removed in the fast path only if the SPTE was
		 * write-protected for dirty-logging or access tracking.
		 */
		if ((error_code & PFERR_WRITE_MASK) &&
		    spte_can_locklessly_be_made_writable(spte))
		{
			new_spte |= PT_WRITABLE_MASK;
3614 3615

			/*
3616 3617 3618 3619 3620 3621 3622 3623 3624
			 * Do not fix write-permission on the large spte.  Since
			 * we only dirty the first page into the dirty-bitmap in
			 * fast_pf_fix_direct_spte(), other pages are missed
			 * if its slot has dirty logging enabled.
			 *
			 * Instead, we let the slow page fault path create a
			 * normal spte to fix the access.
			 *
			 * See the comments in kvm_arch_commit_memory_region().
3625
			 */
3626
			if (sp->role.level > PT_PAGE_TABLE_LEVEL)
3627
				break;
3628
		}
3629

3630
		/* Verify that the fault can be handled in the fast path */
3631 3632
		if (new_spte == spte ||
		    !is_access_allowed(error_code, new_spte))
3633 3634 3635 3636 3637
			break;

		/*
		 * Currently, fast page fault only works for direct mapping
		 * since the gfn is not stable for indirect shadow page. See
3638
		 * Documentation/virt/kvm/locking.txt to get more detail.
3639 3640
		 */
		fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
3641
							iterator.sptep, spte,
3642
							new_spte);
3643 3644 3645 3646 3647 3648 3649 3650 3651 3652
		if (fault_handled)
			break;

		if (++retry_count > 4) {
			printk_once(KERN_WARNING
				"kvm: Fast #PF retrying more than 4 times.\n");
			break;
		}

	} while (true);
3653

3654
	trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
3655
			      spte, fault_handled);
3656 3657
	walk_shadow_page_lockless_end(vcpu);

3658
	return fault_handled;
3659 3660
}

3661 3662
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
			       struct list_head *invalid_list)
3663
{
3664
	struct kvm_mmu_page *sp;
3665

3666
	if (!VALID_PAGE(*root_hpa))
A
Avi Kivity 已提交
3667
		return;
3668

3669 3670 3671 3672
	sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK);
	--sp->root_count;
	if (!sp->root_count && sp->role.invalid)
		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3673

3674 3675 3676
	*root_hpa = INVALID_PAGE;
}

3677
/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3678 3679
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			ulong roots_to_free)
3680 3681 3682
{
	int i;
	LIST_HEAD(invalid_list);
3683
	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3684

3685
	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3686

3687
	/* Before acquiring the MMU lock, see if we need to do any real work. */
3688 3689 3690 3691 3692 3693 3694 3695 3696
	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
			    VALID_PAGE(mmu->prev_roots[i].hpa))
				break;

		if (i == KVM_MMU_NUM_PREV_ROOTS)
			return;
	}
3697 3698

	spin_lock(&vcpu->kvm->mmu_lock);
3699

3700 3701 3702 3703
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
			mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa,
					   &invalid_list);
3704

3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717
	if (free_active_root) {
		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
			mmu_free_root_page(vcpu->kvm, &mmu->root_hpa,
					   &invalid_list);
		} else {
			for (i = 0; i < 4; ++i)
				if (mmu->pae_root[i] != 0)
					mmu_free_root_page(vcpu->kvm,
							   &mmu->pae_root[i],
							   &invalid_list);
			mmu->root_hpa = INVALID_PAGE;
		}
3718
		mmu->root_cr3 = 0;
3719
	}
3720

3721
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3722
	spin_unlock(&vcpu->kvm->mmu_lock);
3723
}
3724
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3725

3726 3727 3728 3729 3730
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
	int ret = 0;

	if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
3731
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3732 3733 3734 3735 3736 3737
		ret = 1;
	}

	return ret;
}

3738 3739 3740
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_page *sp;
3741
	unsigned i;
3742

3743
	if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
3744
		spin_lock(&vcpu->kvm->mmu_lock);
3745 3746
		if(make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
3747
			return -ENOSPC;
3748
		}
3749
		sp = kvm_mmu_get_page(vcpu, 0, 0,
3750
				vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
3751 3752
		++sp->root_count;
		spin_unlock(&vcpu->kvm->mmu_lock);
3753 3754
		vcpu->arch.mmu->root_hpa = __pa(sp->spt);
	} else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
3755
		for (i = 0; i < 4; ++i) {
3756
			hpa_t root = vcpu->arch.mmu->pae_root[i];
3757

3758
			MMU_WARN_ON(VALID_PAGE(root));
3759
			spin_lock(&vcpu->kvm->mmu_lock);
3760 3761
			if (make_mmu_pages_available(vcpu) < 0) {
				spin_unlock(&vcpu->kvm->mmu_lock);
3762
				return -ENOSPC;
3763
			}
3764
			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
3765
					i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
3766 3767 3768
			root = __pa(sp->spt);
			++sp->root_count;
			spin_unlock(&vcpu->kvm->mmu_lock);
3769
			vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
3770
		}
3771
		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3772 3773
	} else
		BUG();
3774
	vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
3775 3776 3777 3778 3779

	return 0;
}

static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3780
{
3781
	struct kvm_mmu_page *sp;
3782
	u64 pdptr, pm_mask;
3783
	gfn_t root_gfn, root_cr3;
3784
	int i;
3785

3786 3787
	root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
	root_gfn = root_cr3 >> PAGE_SHIFT;
3788

3789 3790 3791 3792 3793 3794 3795
	if (mmu_check_root(vcpu, root_gfn))
		return 1;

	/*
	 * Do we shadow a long mode page table? If so we need to
	 * write-protect the guests page table root.
	 */
3796 3797
	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
		hpa_t root = vcpu->arch.mmu->root_hpa;
3798

3799
		MMU_WARN_ON(VALID_PAGE(root));
3800

3801
		spin_lock(&vcpu->kvm->mmu_lock);
3802 3803
		if (make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
3804
			return -ENOSPC;
3805
		}
3806
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
3807
				vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
3808 3809
		root = __pa(sp->spt);
		++sp->root_count;
3810
		spin_unlock(&vcpu->kvm->mmu_lock);
3811
		vcpu->arch.mmu->root_hpa = root;
3812
		goto set_root_cr3;
3813
	}
3814

3815 3816
	/*
	 * We shadow a 32 bit page table. This may be a legacy 2-level
3817 3818
	 * or a PAE 3-level page table. In either case we need to be aware that
	 * the shadow page table may be a PAE or a long mode page table.
3819
	 */
3820
	pm_mask = PT_PRESENT_MASK;
3821
	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3822 3823
		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;

3824
	for (i = 0; i < 4; ++i) {
3825
		hpa_t root = vcpu->arch.mmu->pae_root[i];
3826

3827
		MMU_WARN_ON(VALID_PAGE(root));
3828 3829
		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
			pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
B
Bandan Das 已提交
3830
			if (!(pdptr & PT_PRESENT_MASK)) {
3831
				vcpu->arch.mmu->pae_root[i] = 0;
A
Avi Kivity 已提交
3832 3833
				continue;
			}
A
Avi Kivity 已提交
3834
			root_gfn = pdptr >> PAGE_SHIFT;
3835 3836
			if (mmu_check_root(vcpu, root_gfn))
				return 1;
3837
		}
3838
		spin_lock(&vcpu->kvm->mmu_lock);
3839 3840
		if (make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
3841
			return -ENOSPC;
3842
		}
3843 3844
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
				      0, ACC_ALL);
3845 3846
		root = __pa(sp->spt);
		++sp->root_count;
3847 3848
		spin_unlock(&vcpu->kvm->mmu_lock);

3849
		vcpu->arch.mmu->pae_root[i] = root | pm_mask;
3850
	}
3851
	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3852 3853 3854 3855 3856

	/*
	 * If we shadow a 32 bit page table with a long mode page
	 * table we enter this path.
	 */
3857 3858
	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
		if (vcpu->arch.mmu->lm_root == NULL) {
3859 3860 3861 3862 3863 3864 3865
			/*
			 * The additional page necessary for this is only
			 * allocated on demand.
			 */

			u64 *lm_root;

3866
			lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3867 3868 3869
			if (lm_root == NULL)
				return 1;

3870
			lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
3871

3872
			vcpu->arch.mmu->lm_root = lm_root;
3873 3874
		}

3875
		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
3876 3877
	}

3878 3879 3880
set_root_cr3:
	vcpu->arch.mmu->root_cr3 = root_cr3;

3881
	return 0;
3882 3883
}

3884 3885
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
3886
	if (vcpu->arch.mmu->direct_map)
3887 3888 3889 3890 3891
		return mmu_alloc_direct_roots(vcpu);
	else
		return mmu_alloc_shadow_roots(vcpu);
}

3892
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3893 3894 3895 3896
{
	int i;
	struct kvm_mmu_page *sp;

3897
	if (vcpu->arch.mmu->direct_map)
3898 3899
		return;

3900
	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3901
		return;
3902

3903
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3904

3905 3906
	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
		hpa_t root = vcpu->arch.mmu->root_hpa;
3907
		sp = page_header(root);
3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925

		/*
		 * Even if another CPU was marking the SP as unsync-ed
		 * simultaneously, any guest page table changes are not
		 * guaranteed to be visible anyway until this VCPU issues a TLB
		 * flush strictly after those changes are made. We only need to
		 * ensure that the other CPU sets these flags before any actual
		 * changes to the page tables are made. The comments in
		 * mmu_need_write_protect() describe what could go wrong if this
		 * requirement isn't satisfied.
		 */
		if (!smp_load_acquire(&sp->unsync) &&
		    !smp_load_acquire(&sp->unsync_children))
			return;

		spin_lock(&vcpu->kvm->mmu_lock);
		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);

3926
		mmu_sync_children(vcpu, sp);
3927

3928
		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3929
		spin_unlock(&vcpu->kvm->mmu_lock);
3930 3931
		return;
	}
3932 3933 3934 3935

	spin_lock(&vcpu->kvm->mmu_lock);
	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);

3936
	for (i = 0; i < 4; ++i) {
3937
		hpa_t root = vcpu->arch.mmu->pae_root[i];
3938

3939
		if (root && VALID_PAGE(root)) {
3940 3941 3942 3943 3944 3945
			root &= PT64_BASE_ADDR_MASK;
			sp = page_header(root);
			mmu_sync_children(vcpu, sp);
		}
	}

3946
	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3947
	spin_unlock(&vcpu->kvm->mmu_lock);
3948
}
N
Nadav Har'El 已提交
3949
EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
3950

3951
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3952
				  u32 access, struct x86_exception *exception)
A
Avi Kivity 已提交
3953
{
3954 3955
	if (exception)
		exception->error_code = 0;
A
Avi Kivity 已提交
3956 3957 3958
	return vaddr;
}

3959
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3960 3961
					 u32 access,
					 struct x86_exception *exception)
3962
{
3963 3964
	if (exception)
		exception->error_code = 0;
3965
	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3966 3967
}

3968 3969 3970
static bool
__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
{
3971
	int bit7 = (pte >> 7) & 1;
3972

3973
	return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
3974 3975
}

3976
static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
3977
{
3978
	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
3979 3980
}

3981
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3982
{
3983 3984 3985 3986 3987 3988 3989
	/*
	 * A nested guest cannot use the MMIO cache if it is using nested
	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
	 */
	if (mmu_is_nested(vcpu))
		return false;

3990 3991 3992 3993 3994 3995
	if (direct)
		return vcpu_match_mmio_gpa(vcpu, addr);

	return vcpu_match_mmio_gva(vcpu, addr);
}

3996 3997 3998
/* return true if reserved bit is detected on spte. */
static bool
walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3999 4000
{
	struct kvm_shadow_walk_iterator iterator;
4001
	u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
4002
	struct rsvd_bits_validate *rsvd_check;
4003 4004
	int root, leaf;
	bool reserved = false;
4005

4006 4007
	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;

4008
	walk_shadow_page_lockless_begin(vcpu);
4009

4010 4011
	for (shadow_walk_init(&iterator, vcpu, addr),
		 leaf = root = iterator.level;
4012 4013 4014 4015 4016
	     shadow_walk_okay(&iterator);
	     __shadow_walk_next(&iterator, spte)) {
		spte = mmu_spte_get_lockless(iterator.sptep);

		sptes[leaf - 1] = spte;
4017
		leaf--;
4018

4019 4020
		if (!is_shadow_present_pte(spte))
			break;
4021

4022 4023 4024 4025 4026 4027 4028
		/*
		 * Use a bitwise-OR instead of a logical-OR to aggregate the
		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
		 * adding a Jcc in the loop.
		 */
		reserved |= __is_bad_mt_xwr(rsvd_check, spte) |
			    __is_rsvd_bits_set(rsvd_check, spte, iterator.level);
4029 4030
	}

4031 4032
	walk_shadow_page_lockless_end(vcpu);

4033 4034 4035
	if (reserved) {
		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
		       __func__, addr);
4036
		while (root > leaf) {
4037 4038 4039 4040 4041
			pr_err("------ spte 0x%llx level %d.\n",
			       sptes[root - 1], root);
			root--;
		}
	}
4042

4043 4044
	*sptep = spte;
	return reserved;
4045 4046
}

P
Paolo Bonzini 已提交
4047
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4048 4049
{
	u64 spte;
4050
	bool reserved;
4051

4052
	if (mmio_info_in_cache(vcpu, addr, direct))
4053
		return RET_PF_EMULATE;
4054

4055
	reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
4056
	if (WARN_ON(reserved))
4057
		return -EINVAL;
4058 4059 4060 4061 4062

	if (is_mmio_spte(spte)) {
		gfn_t gfn = get_mmio_spte_gfn(spte);
		unsigned access = get_mmio_spte_access(spte);

4063
		if (!check_mmio_spte(vcpu, spte))
4064
			return RET_PF_INVALID;
4065

4066 4067
		if (direct)
			addr = 0;
X
Xiao Guangrong 已提交
4068 4069

		trace_handle_mmio_page_fault(addr, gfn, access);
4070
		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4071
		return RET_PF_EMULATE;
4072 4073 4074 4075 4076 4077
	}

	/*
	 * If the page table is zapped by other cpus, let CPU fault again on
	 * the address.
	 */
4078
	return RET_PF_RETRY;
4079 4080
}

4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
					 u32 error_code, gfn_t gfn)
{
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return false;

	if (!(error_code & PFERR_PRESENT_MASK) ||
	      !(error_code & PFERR_WRITE_MASK))
		return false;

	/*
	 * guest is writing the page which is write tracked which can
	 * not be fixed by page fault handler.
	 */
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
		return true;

	return false;
}

4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 spte;

	walk_shadow_page_lockless_begin(vcpu);
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
		clear_sp_write_flooding_count(iterator.sptep);
		if (!is_shadow_present_pte(spte))
			break;
	}
	walk_shadow_page_lockless_end(vcpu);
}

4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
				   gfn_t gfn)
{
	struct kvm_arch_async_pf arch;

	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
	arch.gfn = gfn;
	arch.direct_map = vcpu->arch.mmu->direct_map;
	arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);

	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}

static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
			 bool *writable)
{
	struct kvm_memory_slot *slot;
	bool async;

	/*
	 * Don't expose private memslots to L2.
	 */
	if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) {
		*pfn = KVM_PFN_NOSLOT;
		return false;
	}

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	async = false;
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
	if (!async)
		return false; /* *pfn has correct page already */

	if (!prefault && kvm_can_do_async_pf(vcpu)) {
		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
			return true;
		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
			return true;
	}

	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
	return false;
}

4164 4165
static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
			     bool prefault, int max_level, bool is_tdp)
A
Avi Kivity 已提交
4166
{
4167 4168 4169
	bool write = error_code & PFERR_WRITE_MASK;
	bool exec = error_code & PFERR_FETCH_MASK;
	bool lpage_disallowed = exec && is_nx_huge_page_enabled();
4170
	bool map_writable;
A
Avi Kivity 已提交
4171

4172 4173 4174 4175
	gfn_t gfn = gpa >> PAGE_SHIFT;
	unsigned long mmu_seq;
	kvm_pfn_t pfn;
	int level, r;
4176

4177
	if (page_fault_handle_page_track(vcpu, error_code, gfn))
4178
		return RET_PF_EMULATE;
4179

4180 4181 4182
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
4183

4184 4185
	if (lpage_disallowed)
		max_level = PT_PAGE_TABLE_LEVEL;
4186

4187 4188
	level = mapping_level(vcpu, gfn, &max_level);
	if (level > PT_PAGE_TABLE_LEVEL)
4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199
		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);

	if (fast_page_fault(vcpu, gpa, level, error_code))
		return RET_PF_RETRY;

	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	smp_rmb();

	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
		return RET_PF_RETRY;

4200
	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
4201
		return r;
A
Avi Kivity 已提交
4202

4203 4204 4205 4206 4207 4208
	r = RET_PF_RETRY;
	spin_lock(&vcpu->kvm->mmu_lock);
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
		goto out_unlock;
	if (make_mmu_pages_available(vcpu) < 0)
		goto out_unlock;
4209 4210
	r = __direct_map(vcpu, gpa, write, map_writable, level, max_level, pfn,
			 prefault, is_tdp && lpage_disallowed);
4211

4212 4213 4214 4215
out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
	return r;
A
Avi Kivity 已提交
4216 4217
}

4218 4219 4220 4221 4222 4223 4224 4225 4226 4227
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
				u32 error_code, bool prefault)
{
	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);

	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
				 PT_DIRECTORY_LEVEL, false);
}

4228
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4229
				u64 fault_address, char *insn, int insn_len)
4230 4231 4232
{
	int r = 1;

4233 4234 4235 4236 4237 4238
#ifndef CONFIG_X86_64
	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
	if (WARN_ON_ONCE(fault_address >> 32))
		return -EFAULT;
#endif

P
Paolo Bonzini 已提交
4239
	vcpu->arch.l1tf_flush_l1d = true;
4240 4241 4242 4243
	switch (vcpu->arch.apf.host_apf_reason) {
	default:
		trace_kvm_page_fault(fault_address, error_code);

4244
		if (kvm_event_needs_reinjection(vcpu))
4245 4246 4247 4248 4249 4250 4251
			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
				insn_len);
		break;
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
		vcpu->arch.apf.host_apf_reason = 0;
		local_irq_disable();
4252
		kvm_async_pf_task_wait(fault_address, 0);
4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265
		local_irq_enable();
		break;
	case KVM_PV_REASON_PAGE_READY:
		vcpu->arch.apf.host_apf_reason = 0;
		local_irq_disable();
		kvm_async_pf_task_wake(fault_address);
		local_irq_enable();
		break;
	}
	return r;
}
EXPORT_SYMBOL_GPL(kvm_handle_page_fault);

4266
static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
4267
			  bool prefault)
4268
{
4269
	int max_level;
4270

4271 4272 4273 4274
	for (max_level = PT_MAX_HUGEPAGE_LEVEL;
	     max_level > PT_PAGE_TABLE_LEVEL;
	     max_level--) {
		int page_num = KVM_PAGES_PER_HPAGE(max_level);
4275
		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
4276 4277 4278 4279 4280

		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
			break;
	}

4281 4282
	return direct_page_fault(vcpu, gpa, error_code, prefault,
				 max_level, true);
4283 4284
}

4285 4286
static void nonpaging_init_context(struct kvm_vcpu *vcpu,
				   struct kvm_mmu *context)
A
Avi Kivity 已提交
4287 4288 4289
{
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
4290
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
4291
	context->invlpg = nonpaging_invlpg;
4292
	context->update_pte = nonpaging_update_pte;
4293
	context->root_level = 0;
A
Avi Kivity 已提交
4294
	context->shadow_root_level = PT32E_ROOT_LEVEL;
4295
	context->direct_map = true;
4296
	context->nx = false;
A
Avi Kivity 已提交
4297 4298
}

4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311
/*
 * Find out if a previously cached root matching the new CR3/role is available.
 * The current root is also inserted into the cache.
 * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
 * returned.
 * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
 * false is returned. This root should now be freed by the caller.
 */
static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
				  union kvm_mmu_page_role new_role)
{
	uint i;
	struct kvm_mmu_root_info root;
4312
	struct kvm_mmu *mmu = vcpu->arch.mmu;
4313

4314
	root.cr3 = mmu->root_cr3;
4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326
	root.hpa = mmu->root_hpa;

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		swap(root, mmu->prev_roots[i]);

		if (new_cr3 == root.cr3 && VALID_PAGE(root.hpa) &&
		    page_header(root.hpa) != NULL &&
		    new_role.word == page_header(root.hpa)->role.word)
			break;
	}

	mmu->root_hpa = root.hpa;
4327
	mmu->root_cr3 = root.cr3;
4328 4329 4330 4331

	return i < KVM_MMU_NUM_PREV_ROOTS;
}

4332
static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4333 4334
			    union kvm_mmu_page_role new_role,
			    bool skip_tlb_flush)
A
Avi Kivity 已提交
4335
{
4336
	struct kvm_mmu *mmu = vcpu->arch.mmu;
4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347

	/*
	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
	 * later if necessary.
	 */
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
	    mmu->root_level >= PT64_ROOT_4LEVEL) {
		if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT))
			return false;

4348
		if (cached_root_available(vcpu, new_cr3, new_role)) {
4349 4350 4351 4352 4353 4354 4355
			/*
			 * It is possible that the cached previous root page is
			 * obsolete because of a change in the MMU generation
			 * number. However, changing the generation number is
			 * accompanied by KVM_REQ_MMU_RELOAD, which will free
			 * the root set here and allocate a new one.
			 */
4356
			kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
4357 4358
			if (!skip_tlb_flush) {
				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4359
				kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4360 4361 4362 4363 4364 4365 4366 4367 4368 4369
			}

			/*
			 * The last MMIO access's GVA and GPA are cached in the
			 * VCPU. When switching to a new CR3, that GVA->GPA
			 * mapping may no longer be valid. So clear any cached
			 * MMIO info even when we don't need to sync the shadow
			 * page tables.
			 */
			vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4370

4371 4372 4373 4374 4375 4376 4377 4378
			__clear_sp_write_flooding_count(
				page_header(mmu->root_hpa));

			return true;
		}
	}

	return false;
A
Avi Kivity 已提交
4379 4380
}

4381
static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4382 4383
			      union kvm_mmu_page_role new_role,
			      bool skip_tlb_flush)
A
Avi Kivity 已提交
4384
{
4385
	if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
4386 4387
		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu,
				   KVM_MMU_ROOT_CURRENT);
A
Avi Kivity 已提交
4388 4389
}

4390
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
4391
{
4392 4393
	__kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
			  skip_tlb_flush);
4394
}
4395
EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
4396

4397 4398
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
4399
	return kvm_read_cr3(vcpu);
4400 4401
}

4402 4403
static void inject_page_fault(struct kvm_vcpu *vcpu,
			      struct x86_exception *fault)
A
Avi Kivity 已提交
4404
{
4405
	vcpu->arch.mmu->inject_page_fault(vcpu, fault);
A
Avi Kivity 已提交
4406 4407
}

4408
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4409
			   unsigned access, int *nr_present)
4410 4411 4412 4413 4414 4415 4416 4417
{
	if (unlikely(is_mmio_spte(*sptep))) {
		if (gfn != get_mmio_spte_gfn(*sptep)) {
			mmu_spte_clear_no_track(sptep);
			return true;
		}

		(*nr_present)++;
4418
		mark_mmio_spte(vcpu, sptep, gfn, access);
4419 4420 4421 4422 4423 4424
		return true;
	}

	return false;
}

4425 4426
static inline bool is_last_gpte(struct kvm_mmu *mmu,
				unsigned level, unsigned gpte)
A
Avi Kivity 已提交
4427
{
4428 4429 4430 4431 4432 4433 4434
	/*
	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
	 * If it is clear, there are no large pages at this level, so clear
	 * PT_PAGE_SIZE_MASK in gpte if that is the case.
	 */
	gpte &= level - mmu->last_nonleaf_level;

4435 4436 4437 4438 4439 4440 4441
	/*
	 * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
	 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
	 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
	 */
	gpte |= level - PT_PAGE_TABLE_LEVEL - 1;

4442
	return gpte & PT_PAGE_SIZE_MASK;
A
Avi Kivity 已提交
4443 4444
}

4445 4446 4447 4448 4449
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
#undef PTTYPE

A
Avi Kivity 已提交
4450 4451 4452 4453 4454 4455 4456 4457
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

4458 4459 4460 4461
static void
__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
			struct rsvd_bits_validate *rsvd_check,
			int maxphyaddr, int level, bool nx, bool gbpages,
4462
			bool pse, bool amd)
4463 4464
{
	u64 exb_bit_rsvd = 0;
4465
	u64 gbpages_bit_rsvd = 0;
4466
	u64 nonleaf_bit8_rsvd = 0;
4467

4468
	rsvd_check->bad_mt_xwr = 0;
4469

4470
	if (!nx)
4471
		exb_bit_rsvd = rsvd_bits(63, 63);
4472
	if (!gbpages)
4473
		gbpages_bit_rsvd = rsvd_bits(7, 7);
4474 4475 4476 4477 4478

	/*
	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
	 * leaf entries) on AMD CPUs only.
	 */
4479
	if (amd)
4480 4481
		nonleaf_bit8_rsvd = rsvd_bits(8, 8);

4482
	switch (level) {
4483 4484
	case PT32_ROOT_LEVEL:
		/* no rsvd bits for 2 level 4K page table entries */
4485 4486 4487 4488
		rsvd_check->rsvd_bits_mask[0][1] = 0;
		rsvd_check->rsvd_bits_mask[0][0] = 0;
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4489

4490
		if (!pse) {
4491
			rsvd_check->rsvd_bits_mask[1][1] = 0;
4492 4493 4494
			break;
		}

4495 4496
		if (is_cpuid_PSE36())
			/* 36bits PSE 4MB page */
4497
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4498 4499
		else
			/* 32 bits PSE 4MB page */
4500
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4501 4502
		break;
	case PT32E_ROOT_LEVEL:
4503
		rsvd_check->rsvd_bits_mask[0][2] =
4504
			rsvd_bits(maxphyaddr, 63) |
4505
			rsvd_bits(5, 8) | rsvd_bits(1, 2);	/* PDPTE */
4506
		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4507
			rsvd_bits(maxphyaddr, 62);	/* PDE */
4508
		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
4509
			rsvd_bits(maxphyaddr, 62); 	/* PTE */
4510
		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4511 4512
			rsvd_bits(maxphyaddr, 62) |
			rsvd_bits(13, 20);		/* large page */
4513 4514
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4515
		break;
4516 4517 4518 4519 4520 4521
	case PT64_ROOT_5LEVEL:
		rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[1][4] =
			rsvd_check->rsvd_bits_mask[0][4];
4522
		/* fall through */
4523
	case PT64_ROOT_4LEVEL:
4524 4525
		rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
4526
			rsvd_bits(maxphyaddr, 51);
4527 4528
		rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | gbpages_bit_rsvd |
4529
			rsvd_bits(maxphyaddr, 51);
4530 4531 4532 4533 4534 4535 4536
		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[1][3] =
			rsvd_check->rsvd_bits_mask[0][3];
		rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
4537
			gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
4538
			rsvd_bits(13, 29);
4539
		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4540 4541
			rsvd_bits(maxphyaddr, 51) |
			rsvd_bits(13, 20);		/* large page */
4542 4543
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4544 4545 4546 4547
		break;
	}
}

4548 4549 4550 4551 4552
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
{
	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
				cpuid_maxphyaddr(vcpu), context->root_level,
4553 4554
				context->nx,
				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4555
				is_pse(vcpu), guest_cpuid_is_amd(vcpu));
4556 4557
}

4558 4559 4560
static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
			    int maxphyaddr, bool execonly)
4561
{
4562
	u64 bad_mt_xwr;
4563

4564 4565
	rsvd_check->rsvd_bits_mask[0][4] =
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4566
	rsvd_check->rsvd_bits_mask[0][3] =
4567
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4568
	rsvd_check->rsvd_bits_mask[0][2] =
4569
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4570
	rsvd_check->rsvd_bits_mask[0][1] =
4571
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4572
	rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
4573 4574

	/* large page */
4575
	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4576 4577
	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
	rsvd_check->rsvd_bits_mask[1][2] =
4578
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
4579
	rsvd_check->rsvd_bits_mask[1][1] =
4580
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
4581
	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4582

4583 4584 4585 4586 4587 4588 4589 4590
	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
	if (!execonly) {
		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4591
	}
4592
	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4593 4594
}

4595 4596 4597 4598 4599 4600 4601
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
		struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
				    cpuid_maxphyaddr(vcpu), execonly);
}

4602 4603 4604 4605 4606 4607 4608 4609
/*
 * the page table on host is the shadow page table for the page
 * table in guest or amd nested guest, its mmu features completely
 * follow the features in guest.
 */
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
4610 4611
	bool uses_nx = context->nx ||
		context->mmu_role.base.smep_andnot_wp;
4612 4613
	struct rsvd_bits_validate *shadow_zero_check;
	int i;
4614

4615 4616 4617 4618
	/*
	 * Passing "true" to the last argument is okay; it adds a check
	 * on bit 8 of the SPTEs which KVM doesn't use anyway.
	 */
4619 4620
	shadow_zero_check = &context->shadow_zero_check;
	__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4621
				shadow_phys_bits,
4622
				context->shadow_root_level, uses_nx,
4623 4624
				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
				is_pse(vcpu), true);
4625 4626 4627 4628 4629 4630 4631 4632 4633

	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}

4634 4635 4636
}
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);

4637 4638 4639 4640 4641 4642
static inline bool boot_cpu_is_amd(void)
{
	WARN_ON_ONCE(!tdp_enabled);
	return shadow_x_mask == 0;
}

4643 4644 4645 4646 4647 4648 4649 4650
/*
 * the direct page table on host, use as much mmu features as
 * possible, however, kvm currently does not do execution-protection.
 */
static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context)
{
4651 4652 4653 4654 4655
	struct rsvd_bits_validate *shadow_zero_check;
	int i;

	shadow_zero_check = &context->shadow_zero_check;

4656
	if (boot_cpu_is_amd())
4657
		__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4658
					shadow_phys_bits,
4659
					context->shadow_root_level, false,
4660 4661
					boot_cpu_has(X86_FEATURE_GBPAGES),
					true, true);
4662
	else
4663
		__reset_rsvds_bits_mask_ept(shadow_zero_check,
4664
					    shadow_phys_bits,
4665 4666
					    false);

4667 4668 4669 4670 4671 4672 4673
	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}
4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684
}

/*
 * as the comments in reset_shadow_zero_bits_mask() except it
 * is the shadow page table for intel nested guest.
 */
static void
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4685
				    shadow_phys_bits, execonly);
4686 4687
}

4688 4689 4690 4691 4692 4693 4694 4695 4696 4697
#define BYTE_MASK(access) \
	((1 & (access) ? 2 : 0) | \
	 (2 & (access) ? 4 : 0) | \
	 (3 & (access) ? 8 : 0) | \
	 (4 & (access) ? 16 : 0) | \
	 (5 & (access) ? 32 : 0) | \
	 (6 & (access) ? 64 : 0) | \
	 (7 & (access) ? 128 : 0))


4698 4699
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
				      struct kvm_mmu *mmu, bool ept)
4700
{
4701 4702 4703 4704 4705 4706 4707 4708 4709
	unsigned byte;

	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
	const u8 u = BYTE_MASK(ACC_USER_MASK);

	bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
	bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
	bool cr0_wp = is_write_protection(vcpu);
4710 4711

	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4712 4713
		unsigned pfec = byte << 1;

F
Feng Wu 已提交
4714
		/*
4715 4716
		 * Each "*f" variable has a 1 bit for each UWX value
		 * that causes a fault with the given PFEC.
F
Feng Wu 已提交
4717
		 */
4718

4719
		/* Faults from writes to non-writable pages */
4720
		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4721
		/* Faults from user mode accesses to supervisor pages */
4722
		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4723
		/* Faults from fetches of non-executable pages*/
4724
		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749
		/* Faults from kernel mode fetches of user pages */
		u8 smepf = 0;
		/* Faults from kernel mode accesses of user pages */
		u8 smapf = 0;

		if (!ept) {
			/* Faults from kernel mode accesses to user pages */
			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;

			/* Not really needed: !nx will cause pte.nx to fault */
			if (!mmu->nx)
				ff = 0;

			/* Allow supervisor writes if !cr0.wp */
			if (!cr0_wp)
				wf = (pfec & PFERR_USER_MASK) ? wf : 0;

			/* Disallow supervisor fetches of user code if cr4.smep */
			if (cr4_smep)
				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;

			/*
			 * SMAP:kernel-mode data accesses from user-mode
			 * mappings should fault. A fault is considered
			 * as a SMAP violation if all of the following
P
Peng Hao 已提交
4750
			 * conditions are true:
4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763
			 *   - X86_CR4_SMAP is set in CR4
			 *   - A user page is accessed
			 *   - The access is not a fetch
			 *   - Page fault in kernel mode
			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
			 *
			 * Here, we cover the first three conditions.
			 * The fourth is computed dynamically in permission_fault();
			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
			 * *not* subject to SMAP restrictions.
			 */
			if (cr4_smap)
				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4764
		}
4765 4766

		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4767 4768 4769
	}
}

4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844
/*
* PKU is an additional mechanism by which the paging controls access to
* user-mode addresses based on the value in the PKRU register.  Protection
* key violations are reported through a bit in the page fault error code.
* Unlike other bits of the error code, the PK bit is not known at the
* call site of e.g. gva_to_gpa; it must be computed directly in
* permission_fault based on two bits of PKRU, on some machine state (CR4,
* CR0, EFER, CPL), and on other bits of the error code and the page tables.
*
* In particular the following conditions come from the error code, the
* page tables and the machine state:
* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
* - PK is always zero if U=0 in the page tables
* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
*
* The PKRU bitmask caches the result of these four conditions.  The error
* code (minus the P bit) and the page table's U bit form an index into the
* PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
* with the two bits of the PKRU register corresponding to the protection key.
* For the first three conditions above the bits will be 00, thus masking
* away both AD and WD.  For all reads or if the last condition holds, WD
* only will be masked away.
*/
static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
				bool ept)
{
	unsigned bit;
	bool wp;

	if (ept) {
		mmu->pkru_mask = 0;
		return;
	}

	/* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
	if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
		mmu->pkru_mask = 0;
		return;
	}

	wp = is_write_protection(vcpu);

	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
		unsigned pfec, pkey_bits;
		bool check_pkey, check_write, ff, uf, wf, pte_user;

		pfec = bit << 1;
		ff = pfec & PFERR_FETCH_MASK;
		uf = pfec & PFERR_USER_MASK;
		wf = pfec & PFERR_WRITE_MASK;

		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
		pte_user = pfec & PFERR_RSVD_MASK;

		/*
		 * Only need to check the access which is not an
		 * instruction fetch and is to a user page.
		 */
		check_pkey = (!ff && pte_user);
		/*
		 * write access is controlled by PKRU if it is a
		 * user access or CR0.WP = 1.
		 */
		check_write = check_pkey && wf && (uf || wp);

		/* PKRU.AD stops both read and write access. */
		pkey_bits = !!check_pkey;
		/* PKRU.WD stops write access. */
		pkey_bits |= (!!check_write) << 1;

		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
	}
}

4845
static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
A
Avi Kivity 已提交
4846
{
4847 4848 4849 4850 4851
	unsigned root_level = mmu->root_level;

	mmu->last_nonleaf_level = root_level;
	if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
		mmu->last_nonleaf_level++;
A
Avi Kivity 已提交
4852 4853
}

4854 4855 4856
static void paging64_init_context_common(struct kvm_vcpu *vcpu,
					 struct kvm_mmu *context,
					 int level)
A
Avi Kivity 已提交
4857
{
4858
	context->nx = is_nx(vcpu);
4859
	context->root_level = level;
4860

4861
	reset_rsvds_bits_mask(vcpu, context);
4862
	update_permission_bitmask(vcpu, context, false);
4863
	update_pkru_bitmask(vcpu, context, false);
4864
	update_last_nonleaf_level(vcpu, context);
A
Avi Kivity 已提交
4865

4866
	MMU_WARN_ON(!is_pae(vcpu));
A
Avi Kivity 已提交
4867 4868
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
4869
	context->sync_page = paging64_sync_page;
M
Marcelo Tosatti 已提交
4870
	context->invlpg = paging64_invlpg;
4871
	context->update_pte = paging64_update_pte;
4872
	context->shadow_root_level = level;
4873
	context->direct_map = false;
A
Avi Kivity 已提交
4874 4875
}

4876 4877
static void paging64_init_context(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
4878
{
4879 4880 4881 4882
	int root_level = is_la57_mode(vcpu) ?
			 PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;

	paging64_init_context_common(vcpu, context, root_level);
4883 4884
}

4885 4886
static void paging32_init_context(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
A
Avi Kivity 已提交
4887
{
4888
	context->nx = false;
4889
	context->root_level = PT32_ROOT_LEVEL;
4890

4891
	reset_rsvds_bits_mask(vcpu, context);
4892
	update_permission_bitmask(vcpu, context, false);
4893
	update_pkru_bitmask(vcpu, context, false);
4894
	update_last_nonleaf_level(vcpu, context);
A
Avi Kivity 已提交
4895 4896 4897

	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
4898
	context->sync_page = paging32_sync_page;
M
Marcelo Tosatti 已提交
4899
	context->invlpg = paging32_invlpg;
4900
	context->update_pte = paging32_update_pte;
A
Avi Kivity 已提交
4901
	context->shadow_root_level = PT32E_ROOT_LEVEL;
4902
	context->direct_map = false;
A
Avi Kivity 已提交
4903 4904
}

4905 4906
static void paging32E_init_context(struct kvm_vcpu *vcpu,
				   struct kvm_mmu *context)
A
Avi Kivity 已提交
4907
{
4908
	paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
4909 4910
}

4911 4912 4913 4914
static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
{
	union kvm_mmu_extended_role ext = {0};

4915
	ext.cr0_pg = !!is_paging(vcpu);
4916
	ext.cr4_pae = !!is_pae(vcpu);
4917 4918 4919 4920
	ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
	ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
	ext.cr4_pse = !!is_pse(vcpu);
	ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
4921
	ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
4922
	ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
4923 4924 4925 4926 4927 4928

	ext.valid = 1;

	return ext;
}

4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
						   bool base_only)
{
	union kvm_mmu_role role = {0};

	role.base.access = ACC_ALL;
	role.base.nxe = !!is_nx(vcpu);
	role.base.cr0_wp = is_write_protection(vcpu);
	role.base.smm = is_smm(vcpu);
	role.base.guest_mode = is_guest_mode(vcpu);

	if (base_only)
		return role;

	role.ext = kvm_calc_mmu_role_ext(vcpu);

	return role;
}

static union kvm_mmu_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4950
{
4951
	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4952

4953 4954 4955
	role.base.ad_disabled = (shadow_accessed_mask == 0);
	role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
	role.base.direct = true;
4956
	role.base.gpte_is_8_bytes = true;
4957 4958 4959 4960

	return role;
}

4961
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4962
{
4963
	struct kvm_mmu *context = vcpu->arch.mmu;
4964 4965
	union kvm_mmu_role new_role =
		kvm_calc_tdp_mmu_root_page_role(vcpu, false);
4966

4967 4968 4969 4970 4971
	new_role.base.word &= mmu_base_role_mask.word;
	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;

	context->mmu_role.as_u64 = new_role.as_u64;
4972
	context->page_fault = tdp_page_fault;
4973
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
4974
	context->invlpg = nonpaging_invlpg;
4975
	context->update_pte = nonpaging_update_pte;
4976
	context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
4977
	context->direct_map = true;
4978
	context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
4979
	context->get_cr3 = get_cr3;
4980
	context->get_pdptr = kvm_pdptr_read;
4981
	context->inject_page_fault = kvm_inject_page_fault;
4982 4983

	if (!is_paging(vcpu)) {
4984
		context->nx = false;
4985 4986 4987
		context->gva_to_gpa = nonpaging_gva_to_gpa;
		context->root_level = 0;
	} else if (is_long_mode(vcpu)) {
4988
		context->nx = is_nx(vcpu);
4989 4990
		context->root_level = is_la57_mode(vcpu) ?
				PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4991 4992
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging64_gva_to_gpa;
4993
	} else if (is_pae(vcpu)) {
4994
		context->nx = is_nx(vcpu);
4995
		context->root_level = PT32E_ROOT_LEVEL;
4996 4997
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging64_gva_to_gpa;
4998
	} else {
4999
		context->nx = false;
5000
		context->root_level = PT32_ROOT_LEVEL;
5001 5002
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging32_gva_to_gpa;
5003 5004
	}

5005
	update_permission_bitmask(vcpu, context, false);
5006
	update_pkru_bitmask(vcpu, context, false);
5007
	update_last_nonleaf_level(vcpu, context);
5008
	reset_tdp_shadow_zero_bits_mask(vcpu, context);
5009 5010
}

5011 5012 5013 5014 5015 5016 5017 5018 5019 5020
static union kvm_mmu_role
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
{
	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);

	role.base.smep_andnot_wp = role.ext.cr4_smep &&
		!is_write_protection(vcpu);
	role.base.smap_andnot_wp = role.ext.cr4_smap &&
		!is_write_protection(vcpu);
	role.base.direct = !is_paging(vcpu);
5021
	role.base.gpte_is_8_bytes = !!is_pae(vcpu);
5022 5023

	if (!is_long_mode(vcpu))
5024
		role.base.level = PT32E_ROOT_LEVEL;
5025
	else if (is_la57_mode(vcpu))
5026
		role.base.level = PT64_ROOT_5LEVEL;
5027
	else
5028
		role.base.level = PT64_ROOT_4LEVEL;
5029 5030 5031 5032 5033 5034

	return role;
}

void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
5035
	struct kvm_mmu *context = vcpu->arch.mmu;
5036 5037 5038 5039 5040 5041
	union kvm_mmu_role new_role =
		kvm_calc_shadow_mmu_root_page_role(vcpu, false);

	new_role.base.word &= mmu_base_role_mask.word;
	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;
A
Avi Kivity 已提交
5042 5043

	if (!is_paging(vcpu))
5044
		nonpaging_init_context(vcpu, context);
A
Avi Kivity 已提交
5045
	else if (is_long_mode(vcpu))
5046
		paging64_init_context(vcpu, context);
A
Avi Kivity 已提交
5047
	else if (is_pae(vcpu))
5048
		paging32E_init_context(vcpu, context);
A
Avi Kivity 已提交
5049
	else
5050
		paging32_init_context(vcpu, context);
5051

5052
	context->mmu_role.as_u64 = new_role.as_u64;
5053
	reset_shadow_zero_bits_mask(vcpu, context);
5054 5055 5056
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

5057 5058 5059
static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
				   bool execonly)
5060
{
5061
	union kvm_mmu_role role = {0};
5062

5063 5064
	/* SMM flag is inherited from root_mmu */
	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
5065

5066
	role.base.level = PT64_ROOT_4LEVEL;
5067
	role.base.gpte_is_8_bytes = true;
5068 5069 5070 5071
	role.base.direct = false;
	role.base.ad_disabled = !accessed_dirty;
	role.base.guest_mode = true;
	role.base.access = ACC_ALL;
5072

5073 5074 5075 5076 5077 5078 5079
	/*
	 * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
	 * SMAP variation to denote shadow EPT entries.
	 */
	role.base.cr0_wp = true;
	role.base.smap_andnot_wp = true;

5080
	role.ext = kvm_calc_mmu_role_ext(vcpu);
5081
	role.ext.execonly = execonly;
5082 5083 5084 5085

	return role;
}

5086
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
5087
			     bool accessed_dirty, gpa_t new_eptp)
N
Nadav Har'El 已提交
5088
{
5089
	struct kvm_mmu *context = vcpu->arch.mmu;
5090 5091 5092 5093 5094 5095 5096 5097 5098
	union kvm_mmu_role new_role =
		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
						   execonly);

	__kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);

	new_role.base.word &= mmu_base_role_mask.word;
	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;
5099

5100
	context->shadow_root_level = PT64_ROOT_4LEVEL;
N
Nadav Har'El 已提交
5101 5102

	context->nx = true;
5103
	context->ept_ad = accessed_dirty;
N
Nadav Har'El 已提交
5104 5105 5106 5107 5108
	context->page_fault = ept_page_fault;
	context->gva_to_gpa = ept_gva_to_gpa;
	context->sync_page = ept_sync_page;
	context->invlpg = ept_invlpg;
	context->update_pte = ept_update_pte;
5109
	context->root_level = PT64_ROOT_4LEVEL;
N
Nadav Har'El 已提交
5110
	context->direct_map = false;
5111
	context->mmu_role.as_u64 = new_role.as_u64;
5112

N
Nadav Har'El 已提交
5113
	update_permission_bitmask(vcpu, context, true);
5114
	update_pkru_bitmask(vcpu, context, true);
5115
	update_last_nonleaf_level(vcpu, context);
N
Nadav Har'El 已提交
5116
	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
5117
	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
N
Nadav Har'El 已提交
5118 5119 5120
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

5121
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
5122
{
5123
	struct kvm_mmu *context = vcpu->arch.mmu;
5124 5125 5126 5127 5128 5129

	kvm_init_shadow_mmu(vcpu);
	context->set_cr3           = kvm_x86_ops->set_cr3;
	context->get_cr3           = get_cr3;
	context->get_pdptr         = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
A
Avi Kivity 已提交
5130 5131
}

5132
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
5133
{
5134
	union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
5135 5136
	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;

5137 5138 5139 5140 5141
	new_role.base.word &= mmu_base_role_mask.word;
	if (new_role.as_u64 == g_context->mmu_role.as_u64)
		return;

	g_context->mmu_role.as_u64 = new_role.as_u64;
5142
	g_context->get_cr3           = get_cr3;
5143
	g_context->get_pdptr         = kvm_pdptr_read;
5144 5145 5146
	g_context->inject_page_fault = kvm_inject_page_fault;

	/*
5147
	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5148 5149 5150 5151 5152
	 * L1's nested page tables (e.g. EPT12). The nested translation
	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
	 * L2's page tables as the first level of translation and L1's
	 * nested page tables as the second level of translation. Basically
	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
5153 5154
	 */
	if (!is_paging(vcpu)) {
5155
		g_context->nx = false;
5156 5157 5158
		g_context->root_level = 0;
		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
	} else if (is_long_mode(vcpu)) {
5159
		g_context->nx = is_nx(vcpu);
5160 5161
		g_context->root_level = is_la57_mode(vcpu) ?
					PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
5162
		reset_rsvds_bits_mask(vcpu, g_context);
5163 5164
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
	} else if (is_pae(vcpu)) {
5165
		g_context->nx = is_nx(vcpu);
5166
		g_context->root_level = PT32E_ROOT_LEVEL;
5167
		reset_rsvds_bits_mask(vcpu, g_context);
5168 5169
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
	} else {
5170
		g_context->nx = false;
5171
		g_context->root_level = PT32_ROOT_LEVEL;
5172
		reset_rsvds_bits_mask(vcpu, g_context);
5173 5174 5175
		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
	}

5176
	update_permission_bitmask(vcpu, g_context, false);
5177
	update_pkru_bitmask(vcpu, g_context, false);
5178
	update_last_nonleaf_level(vcpu, g_context);
5179 5180
}

5181
void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
5182
{
5183
	if (reset_roots) {
5184 5185
		uint i;

5186
		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
5187 5188

		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5189
			vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5190 5191
	}

5192
	if (mmu_is_nested(vcpu))
5193
		init_kvm_nested_mmu(vcpu);
5194
	else if (tdp_enabled)
5195
		init_kvm_tdp_mmu(vcpu);
5196
	else
5197
		init_kvm_softmmu(vcpu);
5198
}
5199
EXPORT_SYMBOL_GPL(kvm_init_mmu);
5200

5201 5202 5203
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
5204 5205
	union kvm_mmu_role role;

5206
	if (tdp_enabled)
5207
		role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
5208
	else
5209 5210 5211
		role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);

	return role.base;
5212
}
5213

5214
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5215
{
5216
	kvm_mmu_unload(vcpu);
5217
	kvm_init_mmu(vcpu, true);
A
Avi Kivity 已提交
5218
}
5219
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
5220 5221

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5222
{
5223 5224
	int r;

5225
	r = mmu_topup_memory_caches(vcpu);
A
Avi Kivity 已提交
5226 5227
	if (r)
		goto out;
5228
	r = mmu_alloc_roots(vcpu);
5229
	kvm_mmu_sync_roots(vcpu);
5230 5231
	if (r)
		goto out;
5232
	kvm_mmu_load_cr3(vcpu);
5233
	kvm_x86_ops->tlb_flush(vcpu, true);
5234 5235
out:
	return r;
A
Avi Kivity 已提交
5236
}
A
Avi Kivity 已提交
5237 5238 5239 5240
EXPORT_SYMBOL_GPL(kvm_mmu_load);

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
5241 5242 5243 5244
	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
A
Avi Kivity 已提交
5245
}
5246
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
A
Avi Kivity 已提交
5247

5248
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
5249 5250
				  struct kvm_mmu_page *sp, u64 *spte,
				  const void *new)
5251
{
5252
	if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
5253 5254
		++vcpu->kvm->stat.mmu_pde_zapped;
		return;
5255
        }
5256

A
Avi Kivity 已提交
5257
	++vcpu->kvm->stat.mmu_pte_updated;
5258
	vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
5259 5260
}

5261 5262 5263 5264 5265 5266 5267 5268
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
5269 5270
	old ^= shadow_nx_mask;
	new ^= shadow_nx_mask;
5271 5272 5273
	return (old & ~new & PT64_PERM_MASK) != 0;
}

5274
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5275
				    int *bytes)
5276
{
5277
	u64 gentry = 0;
5278
	int r;
5279 5280 5281

	/*
	 * Assume that the pte write on a page table of the same type
5282 5283
	 * as the current vcpu paging mode since we update the sptes only
	 * when they have the same mode.
5284
	 */
5285
	if (is_pae(vcpu) && *bytes == 4) {
5286
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5287 5288
		*gpa &= ~(gpa_t)7;
		*bytes = 8;
5289 5290
	}

5291 5292 5293 5294
	if (*bytes == 4 || *bytes == 8) {
		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
		if (r)
			gentry = 0;
5295 5296
	}

5297 5298 5299 5300 5301 5302 5303
	return gentry;
}

/*
 * If we're seeing too many writes to a page, it may no longer be a page table,
 * or we may be forking, in which case it is better to unmap the page.
 */
5304
static bool detect_write_flooding(struct kvm_mmu_page *sp)
5305
{
5306 5307 5308 5309
	/*
	 * Skip write-flooding detected for the sp whose level is 1, because
	 * it can become unsync, then the guest page is not write-protected.
	 */
5310
	if (sp->role.level == PT_PAGE_TABLE_LEVEL)
5311
		return false;
5312

5313 5314
	atomic_inc(&sp->write_flooding_count);
	return atomic_read(&sp->write_flooding_count) >= 3;
5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329
}

/*
 * Misaligned accesses are too much trouble to fix up; also, they usually
 * indicate a page is not used as a page table.
 */
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
				    int bytes)
{
	unsigned offset, pte_size, misaligned;

	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
		 gpa, bytes, sp->role.word);

	offset = offset_in_page(gpa);
5330
	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5331 5332 5333 5334 5335 5336 5337 5338

	/*
	 * Sometimes, the OS only writes the last one bytes to update status
	 * bits, for example, in linux, andb instruction is used in clear_bit().
	 */
	if (!(offset & (pte_size - 1)) && bytes == 1)
		return false;

5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353
	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
	misaligned |= bytes < 4;

	return misaligned;
}

static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{
	unsigned page_offset, quadrant;
	u64 *spte;
	int level;

	page_offset = offset_in_page(gpa);
	level = sp->role.level;
	*nspte = 1;
5354
	if (!sp->role.gpte_is_8_bytes) {
5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375
		page_offset <<= 1;	/* 32->64 */
		/*
		 * A 32-bit pde maps 4MB while the shadow pdes map
		 * only 2MB.  So we need to double the offset again
		 * and zap two pdes instead of one.
		 */
		if (level == PT32_ROOT_LEVEL) {
			page_offset &= ~7; /* kill rounding error */
			page_offset <<= 1;
			*nspte = 2;
		}
		quadrant = page_offset >> PAGE_SHIFT;
		page_offset &= ~PAGE_MASK;
		if (quadrant != sp->role.quadrant)
			return NULL;
	}

	spte = &sp->spt[page_offset / sizeof(*spte)];
	return spte;
}

5376
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5377 5378
			      const u8 *new, int bytes,
			      struct kvm_page_track_notifier_node *node)
5379 5380 5381 5382 5383 5384
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_mmu_page *sp;
	LIST_HEAD(invalid_list);
	u64 entry, gentry, *spte;
	int npte;
5385
	bool remote_flush, local_flush;
5386 5387 5388 5389 5390

	/*
	 * If we don't have indirect shadow pages, it means no page is
	 * write-protected, so we can exit simply.
	 */
5391
	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5392 5393
		return;

5394
	remote_flush = local_flush = false;
5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405

	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);

	/*
	 * No need to care whether allocation memory is successful
	 * or not since pte prefetch is skiped if it does not have
	 * enough objects in the cache.
	 */
	mmu_topup_memory_caches(vcpu);

	spin_lock(&vcpu->kvm->mmu_lock);
5406 5407 5408

	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);

5409
	++vcpu->kvm->stat.mmu_pte_write;
5410
	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5411

5412
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5413
		if (detect_write_misaligned(sp, gpa, bytes) ||
5414
		      detect_write_flooding(sp)) {
5415
			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
A
Avi Kivity 已提交
5416
			++vcpu->kvm->stat.mmu_flooded;
5417 5418
			continue;
		}
5419 5420 5421 5422 5423

		spte = get_written_sptes(sp, gpa, &npte);
		if (!spte)
			continue;

5424
		local_flush = true;
5425
		while (npte--) {
5426 5427
			u32 base_role = vcpu->arch.mmu->mmu_role.base.word;

5428
			entry = *spte;
5429
			mmu_page_zap_pte(vcpu->kvm, sp, spte);
5430
			if (gentry &&
5431
			      !((sp->role.word ^ base_role)
5432
			      & mmu_base_role_mask.word) && rmap_can_add(vcpu))
5433
				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
G
Gleb Natapov 已提交
5434
			if (need_remote_flush(entry, *spte))
5435
				remote_flush = true;
5436
			++spte;
5437 5438
		}
	}
5439
	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5440
	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5441
	spin_unlock(&vcpu->kvm->mmu_lock);
5442 5443
}

5444 5445
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
5446 5447
	gpa_t gpa;
	int r;
5448

5449
	if (vcpu->arch.mmu->direct_map)
5450 5451
		return 0;

5452
	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
5453 5454

	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
5455

5456
	return r;
5457
}
5458
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
5459

5460
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5461
		       void *insn, int insn_len)
5462
{
5463
	int r, emulation_type = 0;
5464
	bool direct = vcpu->arch.mmu->direct_map;
5465

5466
	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5467 5468
		return RET_PF_RETRY;

5469
	/* With shadow page tables, fault_address contains a GVA or nGPA.  */
5470
	if (vcpu->arch.mmu->direct_map) {
5471
		vcpu->arch.gpa_available = true;
5472
		vcpu->arch.gpa_val = cr2_or_gpa;
5473
	}
5474

5475
	r = RET_PF_INVALID;
5476
	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5477
		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5478
		if (r == RET_PF_EMULATE)
5479 5480
			goto emulate;
	}
5481

5482
	if (r == RET_PF_INVALID) {
5483
		r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa,
5484 5485
					       lower_32_bits(error_code),
					       false);
5486 5487 5488 5489 5490
		WARN_ON(r == RET_PF_INVALID);
	}

	if (r == RET_PF_RETRY)
		return 1;
5491
	if (r < 0)
5492
		return r;
5493

5494 5495 5496 5497 5498 5499 5500
	/*
	 * Before emulating the instruction, check if the error code
	 * was due to a RO violation while translating the guest page.
	 * This can occur when using nested virtualization with nested
	 * paging in both guests. If true, we simply unprotect the page
	 * and resume the guest.
	 */
5501
	if (vcpu->arch.mmu->direct_map &&
5502
	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5503
		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5504 5505 5506
		return 1;
	}

5507 5508 5509 5510 5511 5512
	/*
	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
	 * optimistically try to just unprotect the page and let the processor
	 * re-execute the instruction that caused the page fault.  Do not allow
	 * retrying MMIO emulation, as it's not only pointless but could also
	 * cause us to enter an infinite loop because the processor will keep
5513 5514 5515 5516
	 * faulting on the non-existent MMIO address.  Retrying an instruction
	 * from a nested guest is also pointless and dangerous as we are only
	 * explicitly shadowing L1's page tables, i.e. unprotecting something
	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5517
	 */
5518
	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5519
		emulation_type = EMULTYPE_ALLOW_RETRY;
5520
emulate:
5521 5522 5523 5524 5525
	/*
	 * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
	 * This can happen if a guest gets a page-fault on data access but the HW
	 * table walker is not able to read the instruction page (e.g instruction
	 * page is not present in memory). In those cases we simply restart the
5526
	 * guest, with the exception of AMD Erratum 1096 which is unrecoverable.
5527
	 */
5528 5529 5530 5531
	if (unlikely(insn && !insn_len)) {
		if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
			return 1;
	}
5532

5533
	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5534
				       insn_len);
5535 5536 5537
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

M
Marcelo Tosatti 已提交
5538 5539
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
5540
	struct kvm_mmu *mmu = vcpu->arch.mmu;
5541
	int i;
5542

5543 5544 5545 5546
	/* INVLPG on a * non-canonical address is a NOP according to the SDM.  */
	if (is_noncanonical_address(gva, vcpu))
		return;

5547
	mmu->invlpg(vcpu, gva, mmu->root_hpa);
5548 5549 5550 5551

	/*
	 * INVLPG is required to invalidate any global mappings for the VA,
	 * irrespective of PCID. Since it would take us roughly similar amount
5552 5553 5554
	 * of work to determine whether any of the prev_root mappings of the VA
	 * is marked global, or to just sync it blindly, so we might as well
	 * just always sync it.
5555
	 *
5556 5557 5558
	 * Mappings not reachable via the current cr3 or the prev_roots will be
	 * synced when switching to that cr3, so nothing needs to be done here
	 * for them.
5559
	 */
5560 5561 5562
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (VALID_PAGE(mmu->prev_roots[i].hpa))
			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5563

5564
	kvm_x86_ops->tlb_flush_gva(vcpu, gva);
M
Marcelo Tosatti 已提交
5565 5566 5567 5568
	++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

5569 5570
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
5571
	struct kvm_mmu *mmu = vcpu->arch.mmu;
5572
	bool tlb_flush = false;
5573
	uint i;
5574 5575

	if (pcid == kvm_get_active_pcid(vcpu)) {
5576
		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5577
		tlb_flush = true;
5578 5579
	}

5580 5581 5582 5583 5584 5585
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) {
			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
			tlb_flush = true;
		}
5586
	}
5587

5588 5589 5590
	if (tlb_flush)
		kvm_x86_ops->tlb_flush_gva(vcpu, gva);

5591 5592 5593
	++vcpu->stat.invlpg;

	/*
5594 5595 5596
	 * Mappings not reachable via the current cr3 or the prev_roots will be
	 * synced when switching to that cr3, so nothing needs to be done here
	 * for them.
5597 5598 5599 5600
	 */
}
EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);

5601 5602 5603 5604 5605 5606
void kvm_enable_tdp(void)
{
	tdp_enabled = true;
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);

5607 5608 5609 5610 5611 5612
void kvm_disable_tdp(void)
{
	tdp_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_tdp);

5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632

/* The return value indicates if tlb flush on all vcpus is needed. */
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);

/* The caller should hold mmu-lock before calling this function. */
static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
			slot_level_handler fn, int start_level, int end_level,
			gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
{
	struct slot_rmap_walk_iterator iterator;
	bool flush = false;

	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
			end_gfn, &iterator) {
		if (iterator.rmap)
			flush |= fn(kvm, iterator.rmap);

		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
			if (flush && lock_flush_tlb) {
5633 5634 5635
				kvm_flush_remote_tlbs_with_address(kvm,
						start_gfn,
						iterator.gfn - start_gfn + 1);
5636 5637 5638 5639 5640 5641 5642
				flush = false;
			}
			cond_resched_lock(&kvm->mmu_lock);
		}
	}

	if (flush && lock_flush_tlb) {
5643 5644
		kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
						   end_gfn - start_gfn + 1);
5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685
		flush = false;
	}

	return flush;
}

static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
		  slot_level_handler fn, int start_level, int end_level,
		  bool lock_flush_tlb)
{
	return slot_handle_level_range(kvm, memslot, fn, start_level,
			end_level, memslot->base_gfn,
			memslot->base_gfn + memslot->npages - 1,
			lock_flush_tlb);
}

static __always_inline bool
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
		      slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
				 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}

static __always_inline bool
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
			slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
				 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}

static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
		 slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
				 PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
}

5686
static void free_mmu_pages(struct kvm_mmu *mmu)
A
Avi Kivity 已提交
5687
{
5688 5689
	free_page((unsigned long)mmu->pae_root);
	free_page((unsigned long)mmu->lm_root);
A
Avi Kivity 已提交
5690 5691
}

5692
static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
A
Avi Kivity 已提交
5693
{
5694
	struct page *page;
A
Avi Kivity 已提交
5695 5696
	int i;

5697
	/*
5698 5699 5700 5701 5702 5703 5704
	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
	 * while the PDP table is a per-vCPU construct that's allocated at MMU
	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
	 * x86_64.  Therefore we need to allocate the PDP table in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.  Except for
	 * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
	 * skip allocating the PDP table.
5705
	 */
5706 5707 5708
	if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
		return 0;

5709
	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5710
	if (!page)
5711 5712
		return -ENOMEM;

5713
	mmu->pae_root = page_address(page);
5714
	for (i = 0; i < 4; ++i)
5715
		mmu->pae_root[i] = INVALID_PAGE;
5716

A
Avi Kivity 已提交
5717 5718 5719
	return 0;
}

5720
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5721
{
5722
	uint i;
5723
	int ret;
5724

5725 5726
	vcpu->arch.mmu = &vcpu->arch.root_mmu;
	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
A
Avi Kivity 已提交
5727

5728
	vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
5729
	vcpu->arch.root_mmu.root_cr3 = 0;
5730
	vcpu->arch.root_mmu.translate_gpa = translate_gpa;
5731
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5732
		vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
A
Avi Kivity 已提交
5733

5734
	vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
5735
	vcpu->arch.guest_mmu.root_cr3 = 0;
5736 5737 5738
	vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5739

5740
	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753

	ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu);
	if (ret)
		return ret;

	ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu);
	if (ret)
		goto fail_allocate_root;

	return ret;
 fail_allocate_root:
	free_mmu_pages(&vcpu->arch.guest_mmu);
	return ret;
A
Avi Kivity 已提交
5754 5755
}

5756
#define BATCH_ZAP_PAGES	10
5757 5758 5759
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
	struct kvm_mmu_page *sp, *node;
5760
	int nr_zapped, batch = 0;
5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772

restart:
	list_for_each_entry_safe_reverse(sp, node,
	      &kvm->arch.active_mmu_pages, link) {
		/*
		 * No obsolete valid page exists before a newly created page
		 * since active_mmu_pages is a FIFO list.
		 */
		if (!is_obsolete_sp(kvm, sp))
			break;

		/*
5773 5774 5775 5776
		 * Skip invalid pages with a non-zero root count, zapping pages
		 * with a non-zero root count will never succeed, i.e. the page
		 * will get thrown back on active_mmu_pages and we'll get stuck
		 * in an infinite loop.
5777
		 */
5778
		if (sp->role.invalid && sp->root_count)
5779 5780
			continue;

5781 5782 5783 5784 5785 5786
		/*
		 * No need to flush the TLB since we're only zapping shadow
		 * pages with an obsolete generation number and all vCPUS have
		 * loaded a new root, i.e. the shadow pages being zapped cannot
		 * be in active use by the guest.
		 */
5787
		if (batch >= BATCH_ZAP_PAGES &&
5788
		    cond_resched_lock(&kvm->mmu_lock)) {
5789
			batch = 0;
5790 5791 5792
			goto restart;
		}

5793 5794
		if (__kvm_mmu_prepare_zap_page(kvm, sp,
				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5795
			batch += nr_zapped;
5796
			goto restart;
5797
		}
5798 5799
	}

5800 5801 5802 5803 5804
	/*
	 * Trigger a remote TLB flush before freeing the page tables to ensure
	 * KVM is not in the middle of a lockless shadow page table walk, which
	 * may reference the pages.
	 */
5805
	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818
}

/*
 * Fast invalidate all shadow pages and use lock-break technique
 * to zap obsolete pages.
 *
 * It's required when memslot is being deleted or VM is being
 * destroyed, in these cases, we should ensure that KVM MMU does
 * not use any resource of the being-deleted slot or all slots
 * after calling the function.
 */
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
{
5819 5820
	lockdep_assert_held(&kvm->slots_lock);

5821
	spin_lock(&kvm->mmu_lock);
5822
	trace_kvm_mmu_zap_all_fast(kvm);
5823 5824 5825 5826 5827 5828 5829 5830 5831

	/*
	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
	 * held for the entire duration of zapping obsolete pages, it's
	 * impossible for there to be multiple invalid generations associated
	 * with *valid* shadow pages at any given time, i.e. there is exactly
	 * one valid generation and (at most) one invalid generation.
	 */
	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5832

5833 5834 5835 5836 5837 5838 5839 5840 5841 5842
	/*
	 * Notify all vcpus to reload its shadow page table and flush TLB.
	 * Then all vcpus will switch to new shadow page table with the new
	 * mmu_valid_gen.
	 *
	 * Note: we need to do this under the protection of mmu_lock,
	 * otherwise, vcpu would purge shadow page but miss tlb flush.
	 */
	kvm_reload_remote_mmus(kvm);

5843 5844 5845 5846
	kvm_zap_obsolete_pages(kvm);
	spin_unlock(&kvm->mmu_lock);
}

5847 5848 5849 5850 5851
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
{
	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}

5852
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5853 5854
			struct kvm_memory_slot *slot,
			struct kvm_page_track_notifier_node *node)
5855
{
5856
	kvm_mmu_zap_all_fast(kvm);
5857 5858
}

5859
void kvm_mmu_init_vm(struct kvm *kvm)
5860
{
5861
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5862

5863
	node->track_write = kvm_mmu_pte_write;
5864
	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5865
	kvm_page_track_register_notifier(kvm, node);
5866 5867
}

5868
void kvm_mmu_uninit_vm(struct kvm *kvm)
5869
{
5870
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5871

5872
	kvm_page_track_unregister_notifier(kvm, node);
5873 5874
}

X
Xiao Guangrong 已提交
5875 5876 5877 5878
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
5879
	int i;
X
Xiao Guangrong 已提交
5880 5881

	spin_lock(&kvm->mmu_lock);
5882 5883 5884 5885 5886 5887 5888 5889 5890
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
		kvm_for_each_memslot(memslot, slots) {
			gfn_t start, end;

			start = max(gfn_start, memslot->base_gfn);
			end = min(gfn_end, memslot->base_gfn + memslot->npages);
			if (start >= end)
				continue;
X
Xiao Guangrong 已提交
5891

5892 5893 5894
			slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
						PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
						start, end - 1, true);
5895
		}
X
Xiao Guangrong 已提交
5896 5897 5898 5899 5900
	}

	spin_unlock(&kvm->mmu_lock);
}

5901 5902
static bool slot_rmap_write_protect(struct kvm *kvm,
				    struct kvm_rmap_head *rmap_head)
5903
{
5904
	return __rmap_write_protect(kvm, rmap_head, false);
5905 5906
}

5907 5908
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
				      struct kvm_memory_slot *memslot)
A
Avi Kivity 已提交
5909
{
5910
	bool flush;
A
Avi Kivity 已提交
5911

5912
	spin_lock(&kvm->mmu_lock);
5913 5914
	flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
				      false);
5915
	spin_unlock(&kvm->mmu_lock);
5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930

	/*
	 * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log()
	 * which do tlb flush out of mmu-lock should be serialized by
	 * kvm->slots_lock otherwise tlb flush would be missed.
	 */
	lockdep_assert_held(&kvm->slots_lock);

	/*
	 * We can flush all the TLBs out of the mmu lock without TLB
	 * corruption since we just change the spte from writable to
	 * readonly so that we only need to care the case of changing
	 * spte from present to present (changing the spte from present
	 * to nonpresent will flush all the TLBs immediately), in other
	 * words, the only case we care is mmu_spte_update() where we
W
Wei Yang 已提交
5931
	 * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
5932 5933 5934
	 * instead of PT_WRITABLE_MASK, that means it does not depend
	 * on PT_WRITABLE_MASK anymore.
	 */
5935
	if (flush)
5936 5937
		kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
			memslot->npages);
A
Avi Kivity 已提交
5938
}
5939

5940
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5941
					 struct kvm_rmap_head *rmap_head)
5942 5943 5944 5945
{
	u64 *sptep;
	struct rmap_iterator iter;
	int need_tlb_flush = 0;
D
Dan Williams 已提交
5946
	kvm_pfn_t pfn;
5947 5948
	struct kvm_mmu_page *sp;

5949
restart:
5950
	for_each_rmap_spte(rmap_head, &iter, sptep) {
5951 5952 5953 5954
		sp = page_header(__pa(sptep));
		pfn = spte_to_pfn(*sptep);

		/*
5955 5956 5957 5958 5959
		 * We cannot do huge page mapping for indirect shadow pages,
		 * which are found on the last rmap (level = 1) when not using
		 * tdp; such shadow pages are synced with the page table in
		 * the guest, and the guest page table is using 4K page size
		 * mapping if the indirect sp has level = 1.
5960
		 */
5961 5962 5963
		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
		    !kvm_is_zone_device_pfn(pfn) &&
		    PageTransCompoundMap(pfn_to_page(pfn))) {
5964
			pte_list_remove(rmap_head, sptep);
5965 5966 5967 5968 5969 5970 5971

			if (kvm_available_flush_tlb_with_range())
				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
					KVM_PAGES_PER_HPAGE(sp->role.level));
			else
				need_tlb_flush = 1;

5972 5973
			goto restart;
		}
5974 5975 5976 5977 5978 5979
	}

	return need_tlb_flush;
}

void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5980
				   const struct kvm_memory_slot *memslot)
5981
{
5982
	/* FIXME: const-ify all uses of struct kvm_memory_slot.  */
5983
	spin_lock(&kvm->mmu_lock);
5984 5985
	slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
			 kvm_mmu_zap_collapsible_spte, true);
5986 5987 5988
	spin_unlock(&kvm->mmu_lock);
}

5989 5990 5991
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
				   struct kvm_memory_slot *memslot)
{
5992
	bool flush;
5993 5994

	spin_lock(&kvm->mmu_lock);
5995
	flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006
	spin_unlock(&kvm->mmu_lock);

	lockdep_assert_held(&kvm->slots_lock);

	/*
	 * It's also safe to flush TLBs out of mmu lock here as currently this
	 * function is only used for dirty logging, in which case flushing TLB
	 * out of mmu lock also guarantees no dirty pages will be lost in
	 * dirty_bitmap.
	 */
	if (flush)
6007 6008
		kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
				memslot->npages);
6009 6010 6011 6012 6013 6014
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);

void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
					struct kvm_memory_slot *memslot)
{
6015
	bool flush;
6016 6017

	spin_lock(&kvm->mmu_lock);
6018 6019
	flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
					false);
6020 6021 6022 6023 6024 6025
	spin_unlock(&kvm->mmu_lock);

	/* see kvm_mmu_slot_remove_write_access */
	lockdep_assert_held(&kvm->slots_lock);

	if (flush)
6026 6027
		kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
				memslot->npages);
6028 6029 6030 6031 6032 6033
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);

void kvm_mmu_slot_set_dirty(struct kvm *kvm,
			    struct kvm_memory_slot *memslot)
{
6034
	bool flush;
6035 6036

	spin_lock(&kvm->mmu_lock);
6037
	flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
6038 6039 6040 6041 6042 6043
	spin_unlock(&kvm->mmu_lock);

	lockdep_assert_held(&kvm->slots_lock);

	/* see kvm_mmu_slot_leaf_clear_dirty */
	if (flush)
6044 6045
		kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
				memslot->npages);
6046 6047 6048
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);

6049
void kvm_mmu_zap_all(struct kvm *kvm)
6050 6051
{
	struct kvm_mmu_page *sp, *node;
6052
	LIST_HEAD(invalid_list);
6053
	int ign;
6054

6055
	spin_lock(&kvm->mmu_lock);
6056
restart:
6057
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
6058
		if (sp->role.invalid && sp->root_count)
6059
			continue;
6060
		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
6061
			goto restart;
6062
		if (cond_resched_lock(&kvm->mmu_lock))
6063 6064 6065
			goto restart;
	}

6066
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
6067 6068 6069
	spin_unlock(&kvm->mmu_lock);
}

6070
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
6071
{
6072
	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
6073

6074
	gen &= MMIO_SPTE_GEN_MASK;
6075

6076
	/*
6077 6078 6079 6080 6081 6082 6083 6084
	 * Generation numbers are incremented in multiples of the number of
	 * address spaces in order to provide unique generations across all
	 * address spaces.  Strip what is effectively the address space
	 * modifier prior to checking for a wrap of the MMIO generation so
	 * that a wrap in any address space is detected.
	 */
	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);

6085
	/*
6086
	 * The very rare case: if the MMIO generation number has wrapped,
6087 6088
	 * zap all shadow pages.
	 */
6089
	if (unlikely(gen == 0)) {
6090
		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
6091
		kvm_mmu_zap_all_fast(kvm);
6092
	}
6093 6094
}

6095 6096
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
6097 6098
{
	struct kvm *kvm;
6099
	int nr_to_scan = sc->nr_to_scan;
6100
	unsigned long freed = 0;
6101

J
Junaid Shahid 已提交
6102
	mutex_lock(&kvm_lock);
6103 6104

	list_for_each_entry(kvm, &vm_list, vm_list) {
6105
		int idx;
6106
		LIST_HEAD(invalid_list);
6107

6108 6109 6110 6111 6112 6113 6114 6115
		/*
		 * Never scan more than sc->nr_to_scan VM instances.
		 * Will not hit this condition practically since we do not try
		 * to shrink more than one VM and it is very unlikely to see
		 * !n_used_mmu_pages so many times.
		 */
		if (!nr_to_scan--)
			break;
6116 6117 6118 6119 6120 6121
		/*
		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
		 * here. We may skip a VM instance errorneosly, but we do not
		 * want to shrink a VM that only started to populate its MMU
		 * anyway.
		 */
6122 6123
		if (!kvm->arch.n_used_mmu_pages &&
		    !kvm_has_zapped_obsolete_pages(kvm))
6124 6125
			continue;

6126
		idx = srcu_read_lock(&kvm->srcu);
6127 6128
		spin_lock(&kvm->mmu_lock);

6129 6130 6131 6132 6133 6134
		if (kvm_has_zapped_obsolete_pages(kvm)) {
			kvm_mmu_commit_zap_page(kvm,
			      &kvm->arch.zapped_obsolete_pages);
			goto unlock;
		}

6135 6136
		if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
			freed++;
6137
		kvm_mmu_commit_zap_page(kvm, &invalid_list);
6138

6139
unlock:
6140
		spin_unlock(&kvm->mmu_lock);
6141
		srcu_read_unlock(&kvm->srcu, idx);
6142

6143 6144 6145 6146 6147
		/*
		 * unfair on small ones
		 * per-vm shrinkers cry out
		 * sadness comes quickly
		 */
6148 6149
		list_move_tail(&kvm->vm_list, &vm_list);
		break;
6150 6151
	}

J
Junaid Shahid 已提交
6152
	mutex_unlock(&kvm_lock);
6153 6154 6155 6156 6157 6158
	return freed;
}

static unsigned long
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
6159
	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
6160 6161 6162
}

static struct shrinker mmu_shrinker = {
6163 6164
	.count_objects = mmu_shrink_count,
	.scan_objects = mmu_shrink_scan,
6165 6166 6167
	.seeks = DEFAULT_SEEKS * 10,
};

I
Ingo Molnar 已提交
6168
static void mmu_destroy_caches(void)
6169
{
6170 6171
	kmem_cache_destroy(pte_list_desc_cache);
	kmem_cache_destroy(mmu_page_header_cache);
6172 6173
}

6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195
static void kvm_set_mmio_spte_mask(void)
{
	u64 mask;

	/*
	 * Set the reserved bits and the present bit of an paging-structure
	 * entry to generate page fault with PFER.RSV = 1.
	 */

	/*
	 * Mask the uppermost physical address bit, which would be reserved as
	 * long as the supported physical address width is less than 52.
	 */
	mask = 1ull << 51;

	/* Set the present bit. */
	mask |= 1ull;

	/*
	 * If reserved bit is not supported, clear the present bit to disable
	 * mmio page fault.
	 */
6196
	if (shadow_phys_bits == 52)
6197 6198
		mask &= ~1ull;

6199
	kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
6200 6201
}

P
Paolo Bonzini 已提交
6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235
static bool get_nx_auto_mode(void)
{
	/* Return true when CPU has the bug, and mitigations are ON */
	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
}

static void __set_nx_huge_pages(bool val)
{
	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
}

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
{
	bool old_val = nx_huge_pages;
	bool new_val;

	/* In "auto" mode deploy workaround only if CPU has the bug. */
	if (sysfs_streq(val, "off"))
		new_val = 0;
	else if (sysfs_streq(val, "force"))
		new_val = 1;
	else if (sysfs_streq(val, "auto"))
		new_val = get_nx_auto_mode();
	else if (strtobool(val, &new_val) < 0)
		return -EINVAL;

	__set_nx_huge_pages(new_val);

	if (new_val != old_val) {
		struct kvm *kvm;

		mutex_lock(&kvm_lock);

		list_for_each_entry(kvm, &vm_list, vm_list) {
6236
			mutex_lock(&kvm->slots_lock);
P
Paolo Bonzini 已提交
6237
			kvm_mmu_zap_all_fast(kvm);
6238
			mutex_unlock(&kvm->slots_lock);
6239 6240

			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
P
Paolo Bonzini 已提交
6241 6242 6243 6244 6245 6246 6247
		}
		mutex_unlock(&kvm_lock);
	}

	return 0;
}

6248 6249
int kvm_mmu_module_init(void)
{
6250 6251
	int ret = -ENOMEM;

P
Paolo Bonzini 已提交
6252 6253 6254
	if (nx_huge_pages == -1)
		__set_nx_huge_pages(get_nx_auto_mode());

6255 6256 6257 6258 6259 6260 6261 6262 6263 6264
	/*
	 * MMU roles use union aliasing which is, generally speaking, an
	 * undefined behavior. However, we supposedly know how compilers behave
	 * and the current status quo is unlikely to change. Guardians below are
	 * supposed to let us know if the assumption becomes false.
	 */
	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));

6265
	kvm_mmu_reset_all_pte_masks();
6266

6267 6268
	kvm_set_mmio_spte_mask();

6269 6270
	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
					    sizeof(struct pte_list_desc),
6271
					    0, SLAB_ACCOUNT, NULL);
6272
	if (!pte_list_desc_cache)
6273
		goto out;
6274

6275 6276
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
6277
						  0, SLAB_ACCOUNT, NULL);
6278
	if (!mmu_page_header_cache)
6279
		goto out;
6280

6281
	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6282
		goto out;
6283

6284 6285 6286
	ret = register_shrinker(&mmu_shrinker);
	if (ret)
		goto out;
6287

6288 6289
	return 0;

6290
out:
6291
	mmu_destroy_caches();
6292
	return ret;
6293 6294
}

6295
/*
P
Peng Hao 已提交
6296
 * Calculate mmu pages needed for kvm.
6297
 */
6298
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6299
{
6300 6301
	unsigned long nr_mmu_pages;
	unsigned long nr_pages = 0;
6302
	struct kvm_memslots *slots;
6303
	struct kvm_memory_slot *memslot;
6304
	int i;
6305

6306 6307
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
6308

6309 6310 6311
		kvm_for_each_memslot(memslot, slots)
			nr_pages += memslot->npages;
	}
6312 6313

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6314
	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6315 6316 6317 6318

	return nr_mmu_pages;
}

6319 6320
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
6321
	kvm_mmu_unload(vcpu);
6322 6323
	free_mmu_pages(&vcpu->arch.root_mmu);
	free_mmu_pages(&vcpu->arch.guest_mmu);
6324
	mmu_free_memory_caches(vcpu);
6325 6326 6327 6328 6329 6330 6331
}

void kvm_mmu_module_exit(void)
{
	mmu_destroy_caches();
	percpu_counter_destroy(&kvm_total_used_mmu_pages);
	unregister_shrinker(&mmu_shrinker);
6332 6333
	mmu_audit_disable();
}
6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446

static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
{
	unsigned int old_val;
	int err;

	old_val = nx_huge_pages_recovery_ratio;
	err = param_set_uint(val, kp);
	if (err)
		return err;

	if (READ_ONCE(nx_huge_pages) &&
	    !old_val && nx_huge_pages_recovery_ratio) {
		struct kvm *kvm;

		mutex_lock(&kvm_lock);

		list_for_each_entry(kvm, &vm_list, vm_list)
			wake_up_process(kvm->arch.nx_lpage_recovery_thread);

		mutex_unlock(&kvm_lock);
	}

	return err;
}

static void kvm_recover_nx_lpages(struct kvm *kvm)
{
	int rcu_idx;
	struct kvm_mmu_page *sp;
	unsigned int ratio;
	LIST_HEAD(invalid_list);
	ulong to_zap;

	rcu_idx = srcu_read_lock(&kvm->srcu);
	spin_lock(&kvm->mmu_lock);

	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
	to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
	while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) {
		/*
		 * We use a separate list instead of just using active_mmu_pages
		 * because the number of lpage_disallowed pages is expected to
		 * be relatively small compared to the total.
		 */
		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
				      struct kvm_mmu_page,
				      lpage_disallowed_link);
		WARN_ON_ONCE(!sp->lpage_disallowed);
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
		WARN_ON_ONCE(sp->lpage_disallowed);

		if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) {
			kvm_mmu_commit_zap_page(kvm, &invalid_list);
			if (to_zap)
				cond_resched_lock(&kvm->mmu_lock);
		}
	}

	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, rcu_idx);
}

static long get_nx_lpage_recovery_timeout(u64 start_time)
{
	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
		? start_time + 60 * HZ - get_jiffies_64()
		: MAX_SCHEDULE_TIMEOUT;
}

static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
{
	u64 start_time;
	long remaining_time;

	while (true) {
		start_time = get_jiffies_64();
		remaining_time = get_nx_lpage_recovery_timeout(start_time);

		set_current_state(TASK_INTERRUPTIBLE);
		while (!kthread_should_stop() && remaining_time > 0) {
			schedule_timeout(remaining_time);
			remaining_time = get_nx_lpage_recovery_timeout(start_time);
			set_current_state(TASK_INTERRUPTIBLE);
		}

		set_current_state(TASK_RUNNING);

		if (kthread_should_stop())
			return 0;

		kvm_recover_nx_lpages(kvm);
	}
}

int kvm_mmu_post_init_vm(struct kvm *kvm)
{
	int err;

	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
					  "kvm-nx-lpage-recovery",
					  &kvm->arch.nx_lpage_recovery_thread);
	if (!err)
		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);

	return err;
}

void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
{
	if (kvm->arch.nx_lpage_recovery_thread)
		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
}